Merge branch 'master' into process-free-attestation
This commit is contained in:
commit
d11839c392
@ -11,6 +11,7 @@ before_install:
|
|||||||
- sudo chown -R $USER /usr/local/include/google
|
- sudo chown -R $USER /usr/local/include/google
|
||||||
script:
|
script:
|
||||||
- cargo build --verbose --all --release
|
- cargo build --verbose --all --release
|
||||||
|
- cargo fmt --all -- --check
|
||||||
rust:
|
rust:
|
||||||
- beta
|
- beta
|
||||||
- nightly
|
- nightly
|
||||||
|
@ -5,14 +5,13 @@ members = [
|
|||||||
"eth2/state_processing",
|
"eth2/state_processing",
|
||||||
"eth2/types",
|
"eth2/types",
|
||||||
"eth2/utils/bls",
|
"eth2/utils/bls",
|
||||||
"eth2/utils/boolean-bitfield",
|
|
||||||
"eth2/utils/cached_tree_hash",
|
"eth2/utils/cached_tree_hash",
|
||||||
"eth2/utils/compare_fields",
|
"eth2/utils/compare_fields",
|
||||||
"eth2/utils/compare_fields_derive",
|
"eth2/utils/compare_fields_derive",
|
||||||
"eth2/utils/eth2_config",
|
"eth2/utils/eth2_config",
|
||||||
"eth2/utils/fixed_len_vec",
|
"eth2/utils/eth2_interop_keypairs",
|
||||||
"eth2/utils/hashing",
|
"eth2/utils/hashing",
|
||||||
"eth2/utils/honey-badger-split",
|
"eth2/utils/logging",
|
||||||
"eth2/utils/merkle_proof",
|
"eth2/utils/merkle_proof",
|
||||||
"eth2/utils/int_to_bytes",
|
"eth2/utils/int_to_bytes",
|
||||||
"eth2/utils/serde_hex",
|
"eth2/utils/serde_hex",
|
||||||
@ -28,12 +27,14 @@ members = [
|
|||||||
"beacon_node/store",
|
"beacon_node/store",
|
||||||
"beacon_node/client",
|
"beacon_node/client",
|
||||||
"beacon_node/http_server",
|
"beacon_node/http_server",
|
||||||
|
"beacon_node/rest_api",
|
||||||
"beacon_node/network",
|
"beacon_node/network",
|
||||||
"beacon_node/eth2-libp2p",
|
"beacon_node/eth2-libp2p",
|
||||||
"beacon_node/rpc",
|
"beacon_node/rpc",
|
||||||
"beacon_node/version",
|
"beacon_node/version",
|
||||||
"beacon_node/beacon_chain",
|
"beacon_node/beacon_chain",
|
||||||
"tests/ef_tests",
|
"tests/ef_tests",
|
||||||
|
"tests/cli_util",
|
||||||
"protos",
|
"protos",
|
||||||
"validator_client",
|
"validator_client",
|
||||||
"account_manager",
|
"account_manager",
|
||||||
|
27
README.md
27
README.md
@ -2,12 +2,12 @@
|
|||||||
|
|
||||||
An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime.
|
An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime.
|
||||||
|
|
||||||
[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Gitter Badge]][Gitter Link]
|
[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Chat Badge]][Chat Link]
|
||||||
|
|
||||||
[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg
|
[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg
|
||||||
[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines
|
[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines
|
||||||
[Gitter Badge]: https://badges.gitter.im/Join%20Chat.svg
|
[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da
|
||||||
[Gitter Link]: https://gitter.im/sigp/lighthouse
|
[Chat Link]: https://discord.gg/cyAszAh
|
||||||
[Doc Status]: https://img.shields.io/badge/docs-master-blue.svg
|
[Doc Status]: https://img.shields.io/badge/docs-master-blue.svg
|
||||||
[Doc Link]: http://lighthouse-docs.sigmaprime.io/
|
[Doc Link]: http://lighthouse-docs.sigmaprime.io/
|
||||||
|
|
||||||
@ -16,12 +16,12 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim
|
|||||||
Lighthouse is:
|
Lighthouse is:
|
||||||
|
|
||||||
- Fully open-source, licensed under Apache 2.0.
|
- Fully open-source, licensed under Apache 2.0.
|
||||||
- Security-focussed, fuzzing has begun and security reviews are planned
|
- Security-focused, fuzzing has begun and security reviews are planned
|
||||||
for late-2019.
|
for late-2019.
|
||||||
- Built in [Rust](https://www.rust-lang.org/), a modern language providing unique safety guarantees and
|
- Built in [Rust](https://www.rust-lang.org/), a modern language providing unique safety guarantees and
|
||||||
excellent performance (comparable to C++).
|
excellent performance (comparable to C++).
|
||||||
- Funded by various organisations, including Sigma Prime, the
|
- Funded by various organisations, including Sigma Prime, the
|
||||||
Ethereum Foundation, Consensys and private individuals.
|
Ethereum Foundation, ConsenSys and private individuals.
|
||||||
- Actively working to promote an inter-operable, multi-client Ethereum 2.0.
|
- Actively working to promote an inter-operable, multi-client Ethereum 2.0.
|
||||||
|
|
||||||
|
|
||||||
@ -34,16 +34,15 @@ user-facing functionality.
|
|||||||
|
|
||||||
Current development overview:
|
Current development overview:
|
||||||
|
|
||||||
- Specification `v0.6.3` implemented, optimized and passing test vectors.
|
- Specification `v0.8.1` implemented, optimized and passing test vectors.
|
||||||
- Rust-native libp2p integrated, with Gossipsub.
|
- Rust-native libp2p with Gossipsub and Discv5.
|
||||||
- Discv5 (P2P discovery mechanism) integration started.
|
|
||||||
- Metrics via Prometheus.
|
- Metrics via Prometheus.
|
||||||
- Basic gRPC API, soon to be replaced with RESTful HTTP/JSON.
|
- Basic gRPC API, soon to be replaced with RESTful HTTP/JSON.
|
||||||
|
|
||||||
### Roadmap
|
### Roadmap
|
||||||
|
|
||||||
- **July 2019**: `lighthouse-0.0.1` release: A stable testnet for developers with a useful
|
- **Early-September 2019**: `lighthouse-0.0.1` release: A stable testnet for
|
||||||
HTTP API.
|
developers with a useful HTTP API.
|
||||||
- **September 2019**: Inter-operability with other Ethereum 2.0 clients.
|
- **September 2019**: Inter-operability with other Ethereum 2.0 clients.
|
||||||
- **October 2019**: Public, multi-client testnet with user-facing functionality.
|
- **October 2019**: Public, multi-client testnet with user-facing functionality.
|
||||||
- **January 2020**: Production Beacon Chain testnet.
|
- **January 2020**: Production Beacon Chain testnet.
|
||||||
@ -122,7 +121,7 @@ Note that all future created nodes can use the same boot-node ENR. Once connecte
|
|||||||
In a third terminal window, start a validator client:
|
In a third terminal window, start a validator client:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ./validator-client
|
$ ./validator_client
|
||||||
```
|
```
|
||||||
|
|
||||||
You should be able to observe the validator signing blocks, the boot node
|
You should be able to observe the validator signing blocks, the boot node
|
||||||
@ -153,6 +152,8 @@ If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
|||||||
- [`protos/`](protos/): protobuf/gRPC definitions that are common across the Lighthouse project.
|
- [`protos/`](protos/): protobuf/gRPC definitions that are common across the Lighthouse project.
|
||||||
- [`validator_client/`](validator_client/): the "Validator Client" binary and crates exclusively
|
- [`validator_client/`](validator_client/): the "Validator Client" binary and crates exclusively
|
||||||
associated with it.
|
associated with it.
|
||||||
|
- [`tests/`](tests/): code specific to testing, most notably contains the
|
||||||
|
Ethereum Foundation test vectors.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
@ -170,7 +171,9 @@ your support!
|
|||||||
|
|
||||||
## Contact
|
## Contact
|
||||||
|
|
||||||
The best place for discussion is the [sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse).
|
The best place for discussion is the [Lighthouse Discord
|
||||||
|
server](https://discord.gg/cyAszAh). Alternatively, you may use the
|
||||||
|
[sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse).
|
||||||
|
|
||||||
## Donations
|
## Donations
|
||||||
|
|
||||||
|
@ -12,5 +12,4 @@ slog-term = "^2.4.0"
|
|||||||
slog-async = "^2.3.0"
|
slog-async = "^2.3.0"
|
||||||
validator_client = { path = "../validator_client" }
|
validator_client = { path = "../validator_client" }
|
||||||
types = { path = "../eth2/types" }
|
types = { path = "../eth2/types" }
|
||||||
eth2_config = { path = "../eth2/utils/eth2_config" }
|
|
||||||
dirs = "2.0.1"
|
dirs = "2.0.1"
|
||||||
|
@ -83,7 +83,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
default_dir.push(DEFAULT_DATA_DIR);
|
default_dir.push(DEFAULT_DATA_DIR);
|
||||||
PathBuf::from(default_dir)
|
default_dir
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -7,12 +7,10 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
eth2_config = { path = "../eth2/utils/eth2_config" }
|
eth2_config = { path = "../eth2/utils/eth2_config" }
|
||||||
types = { path = "../eth2/types" }
|
types = { path = "../eth2/types" }
|
||||||
toml = "^0.5"
|
|
||||||
store = { path = "./store" }
|
store = { path = "./store" }
|
||||||
client = { path = "client" }
|
client = { path = "client" }
|
||||||
version = { path = "version" }
|
version = { path = "version" }
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
serde = "1.0"
|
|
||||||
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
||||||
slog-term = "^2.4.0"
|
slog-term = "^2.4.0"
|
||||||
slog-async = "^2.3.0"
|
slog-async = "^2.3.0"
|
||||||
@ -21,6 +19,6 @@ tokio = "0.1.15"
|
|||||||
tokio-timer = "0.2.10"
|
tokio-timer = "0.2.10"
|
||||||
futures = "0.1.25"
|
futures = "0.1.25"
|
||||||
exit-future = "0.1.3"
|
exit-future = "0.1.3"
|
||||||
state_processing = { path = "../eth2/state_processing" }
|
|
||||||
env_logger = "0.6.1"
|
env_logger = "0.6.1"
|
||||||
dirs = "2.0.1"
|
dirs = "2.0.1"
|
||||||
|
logging = { path = "../eth2/utils/logging" }
|
||||||
|
@ -5,20 +5,15 @@ authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../../eth2/utils/bls" }
|
|
||||||
boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" }
|
|
||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
failure = "0.1"
|
|
||||||
failure_derive = "0.1"
|
|
||||||
hashing = { path = "../../eth2/utils/hashing" }
|
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
prometheus = "^0.6"
|
prometheus = "^0.6"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
operation_pool = { path = "../../eth2/operation_pool" }
|
operation_pool = { path = "../../eth2/operation_pool" }
|
||||||
env_logger = "0.6"
|
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_json = "1.0"
|
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
||||||
|
sloggers = { version = "^0.3" }
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||||
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
||||||
|
@ -8,6 +8,7 @@ use log::trace;
|
|||||||
use operation_pool::DepositInsertStatus;
|
use operation_pool::DepositInsertStatus;
|
||||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||||
use parking_lot::{RwLock, RwLockReadGuard};
|
use parking_lot::{RwLock, RwLockReadGuard};
|
||||||
|
use slog::{error, info, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use state_processing::per_block_processing::errors::{
|
use state_processing::per_block_processing::errors::{
|
||||||
AttesterSlashingValidationError, DepositValidationError,
|
AttesterSlashingValidationError, DepositValidationError,
|
||||||
@ -71,19 +72,21 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
/// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for
|
/// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for
|
||||||
/// inclusion in a block.
|
/// inclusion in a block.
|
||||||
pub op_pool: OperationPool<T::EthSpec>,
|
pub op_pool: OperationPool<T::EthSpec>,
|
||||||
/// Stores a "snapshot" of the chain at the time the head-of-the-chain block was recieved.
|
/// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received.
|
||||||
canonical_head: RwLock<CheckPoint<T::EthSpec>>,
|
canonical_head: RwLock<CheckPoint<T::EthSpec>>,
|
||||||
/// The same state from `self.canonical_head`, but updated at the start of each slot with a
|
/// The same state from `self.canonical_head`, but updated at the start of each slot with a
|
||||||
/// skip slot if no block is recieved. This is effectively a cache that avoids repeating calls
|
/// skip slot if no block is received. This is effectively a cache that avoids repeating calls
|
||||||
/// to `per_slot_processing`.
|
/// to `per_slot_processing`.
|
||||||
state: RwLock<BeaconState<T::EthSpec>>,
|
state: RwLock<BeaconState<T::EthSpec>>,
|
||||||
/// The root of the genesis block.
|
/// The root of the genesis block.
|
||||||
genesis_block_root: Hash256,
|
pub genesis_block_root: Hash256,
|
||||||
/// A state-machine that is updated with information from the network and chooses a canonical
|
/// A state-machine that is updated with information from the network and chooses a canonical
|
||||||
/// head block.
|
/// head block.
|
||||||
pub fork_choice: ForkChoice<T>,
|
pub fork_choice: ForkChoice<T>,
|
||||||
/// Stores metrics about this `BeaconChain`.
|
/// Stores metrics about this `BeaconChain`.
|
||||||
pub metrics: Metrics,
|
pub metrics: Metrics,
|
||||||
|
/// Logging to CLI, etc.
|
||||||
|
log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> BeaconChain<T> {
|
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||||
@ -92,28 +95,37 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
store: Arc<T::Store>,
|
store: Arc<T::Store>,
|
||||||
slot_clock: T::SlotClock,
|
slot_clock: T::SlotClock,
|
||||||
mut genesis_state: BeaconState<T::EthSpec>,
|
mut genesis_state: BeaconState<T::EthSpec>,
|
||||||
genesis_block: BeaconBlock,
|
mut genesis_block: BeaconBlock<T::EthSpec>,
|
||||||
spec: ChainSpec,
|
spec: ChainSpec,
|
||||||
|
log: Logger,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
genesis_state.build_all_caches(&spec)?;
|
genesis_state.build_all_caches(&spec)?;
|
||||||
|
|
||||||
let state_root = genesis_state.canonical_root();
|
let genesis_state_root = genesis_state.canonical_root();
|
||||||
store.put(&state_root, &genesis_state)?;
|
store.put(&genesis_state_root, &genesis_state)?;
|
||||||
|
|
||||||
|
genesis_block.state_root = genesis_state_root;
|
||||||
|
|
||||||
let genesis_block_root = genesis_block.block_header().canonical_root();
|
let genesis_block_root = genesis_block.block_header().canonical_root();
|
||||||
store.put(&genesis_block_root, &genesis_block)?;
|
store.put(&genesis_block_root, &genesis_block)?;
|
||||||
|
|
||||||
// Also store the genesis block under the `ZERO_HASH` key.
|
// Also store the genesis block under the `ZERO_HASH` key.
|
||||||
let genesis_block_root = genesis_block.block_header().canonical_root();
|
let genesis_block_root = genesis_block.canonical_root();
|
||||||
store.put(&spec.zero_hash, &genesis_block)?;
|
store.put(&Hash256::zero(), &genesis_block)?;
|
||||||
|
|
||||||
let canonical_head = RwLock::new(CheckPoint::new(
|
let canonical_head = RwLock::new(CheckPoint::new(
|
||||||
genesis_block.clone(),
|
genesis_block.clone(),
|
||||||
genesis_block_root,
|
genesis_block_root,
|
||||||
genesis_state.clone(),
|
genesis_state.clone(),
|
||||||
state_root,
|
genesis_state_root,
|
||||||
));
|
));
|
||||||
|
|
||||||
|
info!(log, "BeaconChain init";
|
||||||
|
"genesis_validator_count" => genesis_state.validators.len(),
|
||||||
|
"genesis_state_root" => format!("{}", genesis_state_root),
|
||||||
|
"genesis_block_root" => format!("{}", genesis_block_root),
|
||||||
|
);
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
spec,
|
spec,
|
||||||
slot_clock,
|
slot_clock,
|
||||||
@ -124,6 +136,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root),
|
fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root),
|
||||||
metrics: Metrics::new()?,
|
metrics: Metrics::new()?,
|
||||||
store,
|
store,
|
||||||
|
log,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,6 +144,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn from_store(
|
pub fn from_store(
|
||||||
store: Arc<T::Store>,
|
store: Arc<T::Store>,
|
||||||
spec: ChainSpec,
|
spec: ChainSpec,
|
||||||
|
log: Logger,
|
||||||
) -> Result<Option<BeaconChain<T>>, Error> {
|
) -> Result<Option<BeaconChain<T>>, Error> {
|
||||||
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
|
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
|
||||||
let p: PersistedBeaconChain<T> = match store.get(&key) {
|
let p: PersistedBeaconChain<T> = match store.get(&key) {
|
||||||
@ -145,7 +159,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
spec.seconds_per_slot,
|
spec.seconds_per_slot,
|
||||||
);
|
);
|
||||||
|
|
||||||
let last_finalized_root = p.canonical_head.beacon_state.finalized_root;
|
let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root;
|
||||||
let last_finalized_block = &p.canonical_head.beacon_block;
|
let last_finalized_block = &p.canonical_head.beacon_block;
|
||||||
|
|
||||||
let op_pool = p.op_pool.into_operation_pool(&p.state, &spec);
|
let op_pool = p.op_pool.into_operation_pool(&p.state, &spec);
|
||||||
@ -160,6 +174,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
genesis_block_root: p.genesis_block_root,
|
genesis_block_root: p.genesis_block_root,
|
||||||
metrics: Metrics::new()?,
|
metrics: Metrics::new()?,
|
||||||
store,
|
store,
|
||||||
|
log,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,8 +196,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// Returns the beacon block body for each beacon block root in `roots`.
|
/// Returns the beacon block body for each beacon block root in `roots`.
|
||||||
///
|
///
|
||||||
/// Fails if any root in `roots` does not have a corresponding block.
|
/// Fails if any root in `roots` does not have a corresponding block.
|
||||||
pub fn get_block_bodies(&self, roots: &[Hash256]) -> Result<Vec<BeaconBlockBody>, Error> {
|
pub fn get_block_bodies(
|
||||||
let bodies: Result<Vec<BeaconBlockBody>, _> = roots
|
&self,
|
||||||
|
roots: &[Hash256],
|
||||||
|
) -> Result<Vec<BeaconBlockBody<T::EthSpec>>, Error> {
|
||||||
|
let bodies: Result<Vec<_>, _> = roots
|
||||||
.iter()
|
.iter()
|
||||||
.map(|root| match self.get_block(root)? {
|
.map(|root| match self.get_block(root)? {
|
||||||
Some(block) => Ok(block.body),
|
Some(block) => Ok(block.body),
|
||||||
@ -253,7 +271,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// ## Errors
|
/// ## Errors
|
||||||
///
|
///
|
||||||
/// May return a database error.
|
/// May return a database error.
|
||||||
pub fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, Error> {
|
pub fn get_block(
|
||||||
|
&self,
|
||||||
|
block_root: &Hash256,
|
||||||
|
) -> Result<Option<BeaconBlock<T::EthSpec>>, Error> {
|
||||||
Ok(self.store.get(block_root)?)
|
Ok(self.store.get(block_root)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,15 +336,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
/// Returns the validator index (if any) for the given public key.
|
/// Returns the validator index (if any) for the given public key.
|
||||||
///
|
///
|
||||||
/// Information is retrieved from the present `beacon_state.validator_registry`.
|
/// Information is retrieved from the present `beacon_state.validators`.
|
||||||
pub fn validator_index(&self, pubkey: &PublicKey) -> Option<usize> {
|
pub fn validator_index(&self, pubkey: &PublicKey) -> Option<usize> {
|
||||||
for (i, validator) in self
|
for (i, validator) in self.head().beacon_state.validators.iter().enumerate() {
|
||||||
.head()
|
|
||||||
.beacon_state
|
|
||||||
.validator_registry
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
{
|
|
||||||
if validator.pubkey == *pubkey {
|
if validator.pubkey == *pubkey {
|
||||||
return Some(i);
|
return Some(i);
|
||||||
}
|
}
|
||||||
@ -392,12 +407,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
///
|
///
|
||||||
/// Information is read from the current state, so only information from the present and prior
|
/// Information is read from the current state, so only information from the present and prior
|
||||||
/// epoch is available.
|
/// epoch is available.
|
||||||
pub fn validator_attestion_slot_and_shard(
|
pub fn validator_attestation_slot_and_shard(
|
||||||
&self,
|
&self,
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
) -> Result<Option<(Slot, u64)>, BeaconStateError> {
|
) -> Result<Option<(Slot, u64)>, BeaconStateError> {
|
||||||
trace!(
|
trace!(
|
||||||
"BeaconChain::validator_attestion_slot_and_shard: validator_index: {}",
|
"BeaconChain::validator_attestation_slot_and_shard: validator_index: {}",
|
||||||
validator_index
|
validator_index
|
||||||
);
|
);
|
||||||
if let Some(attestation_duty) = self
|
if let Some(attestation_duty) = self
|
||||||
@ -463,9 +478,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
} else {
|
} else {
|
||||||
*state.get_block_root(current_epoch_start_slot)?
|
*state.get_block_root(current_epoch_start_slot)?
|
||||||
};
|
};
|
||||||
|
let target = Checkpoint {
|
||||||
|
epoch: state.current_epoch(),
|
||||||
|
root: target_root,
|
||||||
|
};
|
||||||
|
|
||||||
let previous_crosslink_root =
|
let parent_crosslink = state.get_current_crosslink(shard)?;
|
||||||
Hash256::from_slice(&state.get_current_crosslink(shard)?.tree_hash_root());
|
let crosslink = Crosslink {
|
||||||
|
shard,
|
||||||
|
parent_root: Hash256::from_slice(&parent_crosslink.tree_hash_root()),
|
||||||
|
start_epoch: parent_crosslink.end_epoch,
|
||||||
|
end_epoch: std::cmp::min(
|
||||||
|
target.epoch,
|
||||||
|
parent_crosslink.end_epoch + self.spec.max_epochs_per_crosslink,
|
||||||
|
),
|
||||||
|
data_root: Hash256::zero(),
|
||||||
|
};
|
||||||
|
|
||||||
// Collect some metrics.
|
// Collect some metrics.
|
||||||
self.metrics.attestation_production_successes.inc();
|
self.metrics.attestation_production_successes.inc();
|
||||||
@ -473,13 +501,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
Ok(AttestationData {
|
Ok(AttestationData {
|
||||||
beacon_block_root: head_block_root,
|
beacon_block_root: head_block_root,
|
||||||
source_epoch: state.current_justified_epoch,
|
source: state.current_justified_checkpoint.clone(),
|
||||||
source_root: state.current_justified_root,
|
target,
|
||||||
target_epoch: state.current_epoch(),
|
crosslink,
|
||||||
target_root,
|
|
||||||
shard,
|
|
||||||
previous_crosslink_root,
|
|
||||||
crosslink_data_root: Hash256::zero(),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -489,7 +513,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// if possible.
|
/// if possible.
|
||||||
pub fn process_attestation(
|
pub fn process_attestation(
|
||||||
&self,
|
&self,
|
||||||
attestation: Attestation,
|
attestation: Attestation<T::EthSpec>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
self.metrics.attestation_processing_requests.inc();
|
self.metrics.attestation_processing_requests.inc();
|
||||||
let timer = self.metrics.attestation_processing_times.start_timer();
|
let timer = self.metrics.attestation_processing_times.start_timer();
|
||||||
@ -545,9 +569,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// Accept some deposit and queue it for inclusion in an appropriate block.
|
/// Accept some deposit and queue it for inclusion in an appropriate block.
|
||||||
pub fn process_deposit(
|
pub fn process_deposit(
|
||||||
&self,
|
&self,
|
||||||
|
index: u64,
|
||||||
deposit: Deposit,
|
deposit: Deposit,
|
||||||
) -> Result<DepositInsertStatus, DepositValidationError> {
|
) -> Result<DepositInsertStatus, DepositValidationError> {
|
||||||
self.op_pool.insert_deposit(deposit)
|
self.op_pool.insert_deposit(index, deposit)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accept some exit and queue it for inclusion in an appropriate block.
|
/// Accept some exit and queue it for inclusion in an appropriate block.
|
||||||
@ -574,7 +599,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
|
/// Accept some attester slashing and queue it for inclusion in an appropriate block.
|
||||||
pub fn process_attester_slashing(
|
pub fn process_attester_slashing(
|
||||||
&self,
|
&self,
|
||||||
attester_slashing: AttesterSlashing,
|
attester_slashing: AttesterSlashing<T::EthSpec>,
|
||||||
) -> Result<(), AttesterSlashingValidationError> {
|
) -> Result<(), AttesterSlashingValidationError> {
|
||||||
self.op_pool
|
self.op_pool
|
||||||
.insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec)
|
.insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec)
|
||||||
@ -583,14 +608,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// Accept some block and attempt to add it to block DAG.
|
/// Accept some block and attempt to add it to block DAG.
|
||||||
///
|
///
|
||||||
/// Will accept blocks from prior slots, however it will reject any block from a future slot.
|
/// Will accept blocks from prior slots, however it will reject any block from a future slot.
|
||||||
pub fn process_block(&self, block: BeaconBlock) -> Result<BlockProcessingOutcome, Error> {
|
pub fn process_block(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlock<T::EthSpec>,
|
||||||
|
) -> Result<BlockProcessingOutcome, Error> {
|
||||||
self.metrics.block_processing_requests.inc();
|
self.metrics.block_processing_requests.inc();
|
||||||
let timer = self.metrics.block_processing_times.start_timer();
|
let timer = self.metrics.block_processing_times.start_timer();
|
||||||
|
|
||||||
let finalized_slot = self
|
let finalized_slot = self
|
||||||
.state
|
.state
|
||||||
.read()
|
.read()
|
||||||
.finalized_epoch
|
.finalized_checkpoint
|
||||||
|
.epoch
|
||||||
.start_slot(T::EthSpec::slots_per_epoch());
|
.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
if block.slot <= finalized_slot {
|
if block.slot <= finalized_slot {
|
||||||
@ -618,18 +647,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.store.exists::<BeaconBlock>(&block_root)? {
|
if self.store.exists::<BeaconBlock<T::EthSpec>>(&block_root)? {
|
||||||
return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown);
|
return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load the blocks parent block from the database, returning invalid if that block is not
|
// Load the blocks parent block from the database, returning invalid if that block is not
|
||||||
// found.
|
// found.
|
||||||
let parent_block_root = block.previous_block_root;
|
let parent_block: BeaconBlock<T::EthSpec> = match self.store.get(&block.parent_root)? {
|
||||||
let parent_block: BeaconBlock = match self.store.get(&parent_block_root)? {
|
Some(block) => block,
|
||||||
Some(previous_block_root) => previous_block_root,
|
|
||||||
None => {
|
None => {
|
||||||
return Ok(BlockProcessingOutcome::ParentUnknown {
|
return Ok(BlockProcessingOutcome::ParentUnknown {
|
||||||
parent: parent_block_root,
|
parent: block.parent_root,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -671,13 +699,27 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self.store.put(&state_root, &state)?;
|
self.store.put(&state_root, &state)?;
|
||||||
|
|
||||||
// Register the new block with the fork choice service.
|
// Register the new block with the fork choice service.
|
||||||
self.fork_choice.process_block(&state, &block, block_root)?;
|
if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"fork choice failed to process_block";
|
||||||
|
"error" => format!("{:?}", e),
|
||||||
|
"block_root" => format!("{}", block_root),
|
||||||
|
"block_slot" => format!("{}", block.slot)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
// Execute the fork choice algorithm, enthroning a new head if discovered.
|
// Execute the fork choice algorithm, enthroning a new head if discovered.
|
||||||
//
|
//
|
||||||
// Note: in the future we may choose to run fork-choice less often, potentially based upon
|
// Note: in the future we may choose to run fork-choice less often, potentially based upon
|
||||||
// some heuristic around number of attestations seen for the block.
|
// some heuristic around number of attestations seen for the block.
|
||||||
self.fork_choice()?;
|
if let Err(e) = self.fork_choice() {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"fork choice failed to find head";
|
||||||
|
"error" => format!("{:?}", e)
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
self.metrics.block_processing_successes.inc();
|
self.metrics.block_processing_successes.inc();
|
||||||
self.metrics
|
self.metrics
|
||||||
@ -695,7 +737,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn produce_block(
|
pub fn produce_block(
|
||||||
&self,
|
&self,
|
||||||
randao_reveal: Signature,
|
randao_reveal: Signature,
|
||||||
) -> Result<(BeaconBlock, BeaconState<T::EthSpec>), BlockProductionError> {
|
) -> Result<(BeaconBlock<T::EthSpec>, BeaconState<T::EthSpec>), BlockProductionError> {
|
||||||
let state = self.state.read().clone();
|
let state = self.state.read().clone();
|
||||||
let slot = self
|
let slot = self
|
||||||
.read_slot_clock()
|
.read_slot_clock()
|
||||||
@ -717,7 +759,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
mut state: BeaconState<T::EthSpec>,
|
mut state: BeaconState<T::EthSpec>,
|
||||||
produce_at_slot: Slot,
|
produce_at_slot: Slot,
|
||||||
randao_reveal: Signature,
|
randao_reveal: Signature,
|
||||||
) -> Result<(BeaconBlock, BeaconState<T::EthSpec>), BlockProductionError> {
|
) -> Result<(BeaconBlock<T::EthSpec>, BeaconState<T::EthSpec>), BlockProductionError> {
|
||||||
self.metrics.block_production_requests.inc();
|
self.metrics.block_production_requests.inc();
|
||||||
let timer = self.metrics.block_production_times.start_timer();
|
let timer = self.metrics.block_production_times.start_timer();
|
||||||
|
|
||||||
@ -728,7 +770,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
||||||
|
|
||||||
let previous_block_root = if state.slot > 0 {
|
let parent_root = if state.slot > 0 {
|
||||||
*state
|
*state
|
||||||
.get_block_root(state.slot - 1)
|
.get_block_root(state.slot - 1)
|
||||||
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?
|
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?
|
||||||
@ -744,24 +786,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
let mut block = BeaconBlock {
|
let mut block = BeaconBlock {
|
||||||
slot: state.slot,
|
slot: state.slot,
|
||||||
previous_block_root,
|
parent_root,
|
||||||
state_root: Hash256::zero(), // Updated after the state is calculated.
|
state_root: Hash256::zero(), // Updated after the state is calculated.
|
||||||
signature: Signature::empty_signature(), // To be completed by a validator.
|
signature: Signature::empty_signature(), // To be completed by a validator.
|
||||||
body: BeaconBlockBody {
|
body: BeaconBlockBody {
|
||||||
randao_reveal,
|
randao_reveal,
|
||||||
// TODO: replace with real data.
|
// TODO: replace with real data.
|
||||||
eth1_data: Eth1Data {
|
eth1_data: Eth1Data {
|
||||||
deposit_count: 0,
|
deposit_count: state.eth1_data.deposit_count,
|
||||||
deposit_root: Hash256::zero(),
|
deposit_root: Hash256::zero(),
|
||||||
block_hash: Hash256::zero(),
|
block_hash: Hash256::zero(),
|
||||||
},
|
},
|
||||||
graffiti,
|
graffiti,
|
||||||
proposer_slashings,
|
proposer_slashings: proposer_slashings.into(),
|
||||||
attester_slashings,
|
attester_slashings: attester_slashings.into(),
|
||||||
attestations: self.op_pool.get_attestations(&state, &self.spec),
|
attestations: self.op_pool.get_attestations(&state, &self.spec).into(),
|
||||||
deposits: self.op_pool.get_deposits(&state, &self.spec),
|
deposits: self.op_pool.get_deposits(&state).into(),
|
||||||
voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec),
|
voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(),
|
||||||
transfers: self.op_pool.get_transfers(&state, &self.spec),
|
transfers: self.op_pool.get_transfers(&state, &self.spec).into(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -794,7 +836,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
if beacon_block_root != self.head().beacon_block_root {
|
if beacon_block_root != self.head().beacon_block_root {
|
||||||
self.metrics.fork_choice_changed_head.inc();
|
self.metrics.fork_choice_changed_head.inc();
|
||||||
|
|
||||||
let beacon_block: BeaconBlock = self
|
let beacon_block: BeaconBlock<T::EthSpec> = self
|
||||||
.store
|
.store
|
||||||
.get(&beacon_block_root)?
|
.get(&beacon_block_root)?
|
||||||
.ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?;
|
.ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?;
|
||||||
@ -805,14 +847,32 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.get(&beacon_state_root)?
|
.get(&beacon_state_root)?
|
||||||
.ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?;
|
.ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?;
|
||||||
|
|
||||||
|
let previous_slot = self.head().beacon_block.slot;
|
||||||
|
let new_slot = beacon_block.slot;
|
||||||
|
|
||||||
// If we switched to a new chain (instead of building atop the present chain).
|
// If we switched to a new chain (instead of building atop the present chain).
|
||||||
if self.head().beacon_block_root != beacon_block.previous_block_root {
|
if self.head().beacon_block_root != beacon_block.parent_root {
|
||||||
self.metrics.fork_choice_reorg_count.inc();
|
self.metrics.fork_choice_reorg_count.inc();
|
||||||
|
warn!(
|
||||||
|
self.log,
|
||||||
|
"Beacon chain re-org";
|
||||||
|
"previous_slot" => previous_slot,
|
||||||
|
"new_slot" => new_slot
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
self.log,
|
||||||
|
"new head block";
|
||||||
|
"justified_root" => format!("{}", beacon_state.current_justified_checkpoint.root),
|
||||||
|
"finalized_root" => format!("{}", beacon_state.finalized_checkpoint.root),
|
||||||
|
"root" => format!("{}", beacon_block_root),
|
||||||
|
"slot" => new_slot,
|
||||||
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
let old_finalized_epoch = self.head().beacon_state.finalized_epoch;
|
let old_finalized_epoch = self.head().beacon_state.finalized_checkpoint.epoch;
|
||||||
let new_finalized_epoch = beacon_state.finalized_epoch;
|
let new_finalized_epoch = beacon_state.finalized_checkpoint.epoch;
|
||||||
let finalized_root = beacon_state.finalized_root;
|
let finalized_root = beacon_state.finalized_checkpoint.root;
|
||||||
|
|
||||||
// Never revert back past a finalized epoch.
|
// Never revert back past a finalized epoch.
|
||||||
if new_finalized_epoch < old_finalized_epoch {
|
if new_finalized_epoch < old_finalized_epoch {
|
||||||
@ -822,7 +882,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
self.update_canonical_head(CheckPoint {
|
self.update_canonical_head(CheckPoint {
|
||||||
beacon_block: beacon_block,
|
beacon_block,
|
||||||
beacon_block_root,
|
beacon_block_root,
|
||||||
beacon_state,
|
beacon_state,
|
||||||
beacon_state_root,
|
beacon_state_root,
|
||||||
@ -880,7 +940,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let finalized_block = self
|
let finalized_block = self
|
||||||
.store
|
.store
|
||||||
.get::<BeaconBlock>(&finalized_block_root)?
|
.get::<BeaconBlock<T::EthSpec>>(&finalized_block_root)?
|
||||||
.ok_or_else(|| Error::MissingBeaconBlock(finalized_block_root))?;
|
.ok_or_else(|| Error::MissingBeaconBlock(finalized_block_root))?;
|
||||||
|
|
||||||
let new_finalized_epoch = finalized_block.slot.epoch(T::EthSpec::slots_per_epoch());
|
let new_finalized_epoch = finalized_block.slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
@ -900,7 +960,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
/// Returns `true` if the given block root has not been processed.
|
/// Returns `true` if the given block root has not been processed.
|
||||||
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
|
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
|
||||||
Ok(!self.store.exists::<BeaconBlock>(beacon_block_root)?)
|
Ok(!self
|
||||||
|
.store
|
||||||
|
.exists::<BeaconBlock<T::EthSpec>>(beacon_block_root)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
|
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
|
||||||
@ -920,13 +982,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
dump.push(last_slot.clone());
|
dump.push(last_slot.clone());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let beacon_block_root = last_slot.beacon_block.previous_block_root;
|
let beacon_block_root = last_slot.beacon_block.parent_root;
|
||||||
|
|
||||||
if beacon_block_root == self.spec.zero_hash {
|
if beacon_block_root == Hash256::zero() {
|
||||||
break; // Genesis has been reached.
|
break; // Genesis has been reached.
|
||||||
}
|
}
|
||||||
|
|
||||||
let beacon_block: BeaconBlock =
|
let beacon_block: BeaconBlock<T::EthSpec> =
|
||||||
self.store.get(&beacon_block_root)?.ok_or_else(|| {
|
self.store.get(&beacon_block_root)?.ok_or_else(|| {
|
||||||
Error::DBInconsistent(format!("Missing block {}", beacon_block_root))
|
Error::DBInconsistent(format!("Missing block {}", beacon_block_root))
|
||||||
})?;
|
})?;
|
||||||
|
@ -6,7 +6,7 @@ use types::{BeaconBlock, BeaconState, EthSpec, Hash256};
|
|||||||
/// head, justified head and finalized head.
|
/// head, justified head and finalized head.
|
||||||
#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)]
|
#[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)]
|
||||||
pub struct CheckPoint<E: EthSpec> {
|
pub struct CheckPoint<E: EthSpec> {
|
||||||
pub beacon_block: BeaconBlock,
|
pub beacon_block: BeaconBlock<E>,
|
||||||
pub beacon_block_root: Hash256,
|
pub beacon_block_root: Hash256,
|
||||||
pub beacon_state: BeaconState<E>,
|
pub beacon_state: BeaconState<E>,
|
||||||
pub beacon_state_root: Hash256,
|
pub beacon_state_root: Hash256,
|
||||||
@ -15,7 +15,7 @@ pub struct CheckPoint<E: EthSpec> {
|
|||||||
impl<E: EthSpec> CheckPoint<E> {
|
impl<E: EthSpec> CheckPoint<E> {
|
||||||
/// Create a new checkpoint.
|
/// Create a new checkpoint.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
beacon_block: BeaconBlock,
|
beacon_block: BeaconBlock<E>,
|
||||||
beacon_block_root: Hash256,
|
beacon_block_root: Hash256,
|
||||||
beacon_state: BeaconState<E>,
|
beacon_state: BeaconState<E>,
|
||||||
beacon_state_root: Hash256,
|
beacon_state_root: Hash256,
|
||||||
@ -31,7 +31,7 @@ impl<E: EthSpec> CheckPoint<E> {
|
|||||||
/// Update all fields of the checkpoint.
|
/// Update all fields of the checkpoint.
|
||||||
pub fn update(
|
pub fn update(
|
||||||
&mut self,
|
&mut self,
|
||||||
beacon_block: BeaconBlock,
|
beacon_block: BeaconBlock<E>,
|
||||||
beacon_block_root: Hash256,
|
beacon_block_root: Hash256,
|
||||||
beacon_state: BeaconState<E>,
|
beacon_state: BeaconState<E>,
|
||||||
beacon_state_root: Hash256,
|
beacon_state_root: Hash256,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use crate::{BeaconChain, BeaconChainTypes};
|
use crate::{BeaconChain, BeaconChainTypes};
|
||||||
use lmd_ghost::LmdGhost;
|
use lmd_ghost::LmdGhost;
|
||||||
use state_processing::common::get_attesting_indices_unsorted;
|
use state_processing::common::get_attesting_indices;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{Error as StoreError, Store};
|
use store::{Error as StoreError, Store};
|
||||||
use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot};
|
use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot};
|
||||||
@ -19,6 +19,7 @@ pub enum Error {
|
|||||||
|
|
||||||
pub struct ForkChoice<T: BeaconChainTypes> {
|
pub struct ForkChoice<T: BeaconChainTypes> {
|
||||||
backend: T::LmdGhost,
|
backend: T::LmdGhost,
|
||||||
|
store: Arc<T::Store>,
|
||||||
/// Used for resolving the `0x00..00` alias back to genesis.
|
/// Used for resolving the `0x00..00` alias back to genesis.
|
||||||
///
|
///
|
||||||
/// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root
|
/// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root
|
||||||
@ -33,10 +34,11 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
/// block.
|
/// block.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
store: Arc<T::Store>,
|
store: Arc<T::Store>,
|
||||||
genesis_block: &BeaconBlock,
|
genesis_block: &BeaconBlock<T::EthSpec>,
|
||||||
genesis_block_root: Hash256,
|
genesis_block_root: Hash256,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
store: store.clone(),
|
||||||
backend: T::LmdGhost::new(store, genesis_block, genesis_block_root),
|
backend: T::LmdGhost::new(store, genesis_block, genesis_block_root),
|
||||||
genesis_block_root,
|
genesis_block_root,
|
||||||
}
|
}
|
||||||
@ -54,18 +56,21 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
let state = chain.current_state();
|
let state = chain.current_state();
|
||||||
|
|
||||||
let (block_root, block_slot) =
|
let (block_root, block_slot) =
|
||||||
if state.current_epoch() + 1 > state.current_justified_epoch {
|
if state.current_epoch() + 1 > state.current_justified_checkpoint.epoch {
|
||||||
(
|
(
|
||||||
state.current_justified_root,
|
state.current_justified_checkpoint.root,
|
||||||
start_slot(state.current_justified_epoch),
|
start_slot(state.current_justified_checkpoint.epoch),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
(state.finalized_root, start_slot(state.finalized_epoch))
|
(
|
||||||
|
state.finalized_checkpoint.root,
|
||||||
|
start_slot(state.finalized_checkpoint.epoch),
|
||||||
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
let block = chain
|
let block = chain
|
||||||
.store
|
.store
|
||||||
.get::<BeaconBlock>(&block_root)?
|
.get::<BeaconBlock<T::EthSpec>>(&block_root)?
|
||||||
.ok_or_else(|| Error::MissingBlock(block_root))?;
|
.ok_or_else(|| Error::MissingBlock(block_root))?;
|
||||||
|
|
||||||
// Resolve the `0x00.. 00` alias back to genesis
|
// Resolve the `0x00.. 00` alias back to genesis
|
||||||
@ -86,7 +91,7 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
// A function that returns the weight for some validator index.
|
// A function that returns the weight for some validator index.
|
||||||
let weight = |validator_index: usize| -> Option<u64> {
|
let weight = |validator_index: usize| -> Option<u64> {
|
||||||
start_state
|
start_state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(validator_index)
|
.get(validator_index)
|
||||||
.map(|v| v.effective_balance)
|
.map(|v| v.effective_balance)
|
||||||
};
|
};
|
||||||
@ -103,7 +108,7 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
pub fn process_block(
|
pub fn process_block(
|
||||||
&self,
|
&self,
|
||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock<T::EthSpec>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// Note: we never count the block as a latest message, only attestations.
|
// Note: we never count the block as a latest message, only attestations.
|
||||||
@ -127,16 +132,9 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
pub fn process_attestation(
|
pub fn process_attestation(
|
||||||
&self,
|
&self,
|
||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
attestation: &Attestation,
|
attestation: &Attestation<T::EthSpec>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// Note: `get_attesting_indices_unsorted` requires that the beacon state caches be built.
|
let block_hash = attestation.data.beacon_block_root;
|
||||||
let validator_indices = get_attesting_indices_unsorted(
|
|
||||||
state,
|
|
||||||
&attestation.data,
|
|
||||||
&attestation.aggregation_bitfield,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let block_hash = attestation.data.target_root;
|
|
||||||
|
|
||||||
// Ignore any attestations to the zero hash.
|
// Ignore any attestations to the zero hash.
|
||||||
//
|
//
|
||||||
@ -149,13 +147,20 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
// 2. Ignore all attestations to the zero hash.
|
// 2. Ignore all attestations to the zero hash.
|
||||||
//
|
//
|
||||||
// (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is
|
// (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is
|
||||||
// fine becuase votes to the genesis block are not useful; all validators implicitly attest
|
// fine because votes to the genesis block are not useful; all validators implicitly attest
|
||||||
// to genesis just by being present in the chain.
|
// to genesis just by being present in the chain.
|
||||||
if block_hash != Hash256::zero() {
|
//
|
||||||
let block_slot = attestation
|
// Additionally, don't add any block hash to fork choice unless we have imported the block.
|
||||||
.data
|
if block_hash != Hash256::zero()
|
||||||
.target_epoch
|
&& self
|
||||||
.start_slot(T::EthSpec::slots_per_epoch());
|
.store
|
||||||
|
.exists::<BeaconBlock<T::EthSpec>>(&block_hash)
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
|
let validator_indices =
|
||||||
|
get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?;
|
||||||
|
|
||||||
|
let block_slot = state.get_attestation_data_slot(&attestation.data)?;
|
||||||
|
|
||||||
for validator_index in validator_indices {
|
for validator_index in validator_indices {
|
||||||
self.backend
|
self.backend
|
||||||
@ -197,7 +202,7 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
|||||||
/// `finalized_block_root` must be the root of `finalized_block`.
|
/// `finalized_block_root` must be the root of `finalized_block`.
|
||||||
pub fn process_finalization(
|
pub fn process_finalization(
|
||||||
&self,
|
&self,
|
||||||
finalized_block: &BeaconBlock,
|
finalized_block: &BeaconBlock<T::EthSpec>,
|
||||||
finalized_block_root: Hash256,
|
finalized_block_root: Hash256,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.backend
|
self.backend
|
||||||
|
@ -11,7 +11,7 @@ pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA";
|
|||||||
#[derive(Encode, Decode)]
|
#[derive(Encode, Decode)]
|
||||||
pub struct PersistedBeaconChain<T: BeaconChainTypes> {
|
pub struct PersistedBeaconChain<T: BeaconChainTypes> {
|
||||||
pub canonical_head: CheckPoint<T::EthSpec>,
|
pub canonical_head: CheckPoint<T::EthSpec>,
|
||||||
pub op_pool: PersistedOperationPool,
|
pub op_pool: PersistedOperationPool<T::EthSpec>,
|
||||||
pub genesis_block_root: Hash256,
|
pub genesis_block_root: Hash256,
|
||||||
pub state: BeaconState<T::EthSpec>,
|
pub state: BeaconState<T::EthSpec>,
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
||||||
use lmd_ghost::LmdGhost;
|
use lmd_ghost::LmdGhost;
|
||||||
|
use sloggers::{null::NullLoggerBuilder, Build};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use slot_clock::TestingSlotClock;
|
use slot_clock::TestingSlotClock;
|
||||||
use state_processing::per_slot_processing;
|
use state_processing::per_slot_processing;
|
||||||
@ -10,7 +11,7 @@ use store::Store;
|
|||||||
use tree_hash::{SignedRoot, TreeHash};
|
use tree_hash::{SignedRoot, TreeHash};
|
||||||
use types::{
|
use types::{
|
||||||
test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation,
|
test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation,
|
||||||
AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec,
|
AttestationDataAndCustodyBit, BeaconBlock, BeaconState, BitList, ChainSpec, Domain, EthSpec,
|
||||||
Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot,
|
Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -64,6 +65,8 @@ where
|
|||||||
|
|
||||||
/// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and
|
/// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and
|
||||||
/// attestations.
|
/// attestations.
|
||||||
|
///
|
||||||
|
/// Used for testing.
|
||||||
pub struct BeaconChainHarness<L, E>
|
pub struct BeaconChainHarness<L, E>
|
||||||
where
|
where
|
||||||
L: LmdGhost<MemoryStore, E>,
|
L: LmdGhost<MemoryStore, E>,
|
||||||
@ -92,6 +95,9 @@ where
|
|||||||
let mut genesis_block = BeaconBlock::empty(&spec);
|
let mut genesis_block = BeaconBlock::empty(&spec);
|
||||||
genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
|
genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
|
||||||
|
|
||||||
|
let builder = NullLoggerBuilder;
|
||||||
|
let log = builder.build().expect("logger should build");
|
||||||
|
|
||||||
// Slot clock
|
// Slot clock
|
||||||
let slot_clock = TestingSlotClock::new(
|
let slot_clock = TestingSlotClock::new(
|
||||||
spec.genesis_slot,
|
spec.genesis_slot,
|
||||||
@ -105,6 +111,7 @@ where
|
|||||||
genesis_state,
|
genesis_state,
|
||||||
genesis_block,
|
genesis_block,
|
||||||
spec.clone(),
|
spec.clone(),
|
||||||
|
log,
|
||||||
)
|
)
|
||||||
.expect("Terminate if beacon chain generation fails");
|
.expect("Terminate if beacon chain generation fails");
|
||||||
|
|
||||||
@ -209,7 +216,7 @@ where
|
|||||||
mut state: BeaconState<E>,
|
mut state: BeaconState<E>,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
block_strategy: BlockStrategy,
|
block_strategy: BlockStrategy,
|
||||||
) -> (BeaconBlock, BeaconState<E>) {
|
) -> (BeaconBlock<E>, BeaconState<E>) {
|
||||||
if slot < state.slot {
|
if slot < state.slot {
|
||||||
panic!("produce slot cannot be prior to the state slot");
|
panic!("produce slot cannot be prior to the state slot");
|
||||||
}
|
}
|
||||||
@ -295,12 +302,9 @@ where
|
|||||||
)
|
)
|
||||||
.expect("should produce attestation data");
|
.expect("should produce attestation data");
|
||||||
|
|
||||||
let mut aggregation_bitfield = Bitfield::new();
|
let mut aggregation_bits = BitList::with_capacity(committee_size).unwrap();
|
||||||
aggregation_bitfield.set(i, true);
|
aggregation_bits.set(i, true).unwrap();
|
||||||
aggregation_bitfield.set(committee_size, false);
|
let custody_bits = BitList::with_capacity(committee_size).unwrap();
|
||||||
|
|
||||||
let mut custody_bitfield = Bitfield::new();
|
|
||||||
custody_bitfield.set(committee_size, false);
|
|
||||||
|
|
||||||
let signature = {
|
let signature = {
|
||||||
let message = AttestationDataAndCustodyBit {
|
let message = AttestationDataAndCustodyBit {
|
||||||
@ -310,7 +314,7 @@ where
|
|||||||
.tree_hash_root();
|
.tree_hash_root();
|
||||||
|
|
||||||
let domain =
|
let domain =
|
||||||
spec.get_domain(data.target_epoch, Domain::Attestation, fork);
|
spec.get_domain(data.target.epoch, Domain::Attestation, fork);
|
||||||
|
|
||||||
let mut agg_sig = AggregateSignature::new();
|
let mut agg_sig = AggregateSignature::new();
|
||||||
agg_sig.add(&Signature::new(
|
agg_sig.add(&Signature::new(
|
||||||
@ -323,9 +327,9 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
let attestation = Attestation {
|
let attestation = Attestation {
|
||||||
aggregation_bitfield,
|
aggregation_bits,
|
||||||
data,
|
data,
|
||||||
custody_bitfield,
|
custody_bits,
|
||||||
signature,
|
signature,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -337,6 +341,50 @@ where
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates two forks:
|
||||||
|
///
|
||||||
|
/// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks`
|
||||||
|
/// on the head
|
||||||
|
/// - The "faulty" fork: created by the `faulty_validators` who skipped a slot and
|
||||||
|
/// then built `faulty_fork_blocks`.
|
||||||
|
///
|
||||||
|
/// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain.
|
||||||
|
pub fn generate_two_forks_by_skipping_a_block(
|
||||||
|
&self,
|
||||||
|
honest_validators: &[usize],
|
||||||
|
faulty_validators: &[usize],
|
||||||
|
honest_fork_blocks: usize,
|
||||||
|
faulty_fork_blocks: usize,
|
||||||
|
) -> (Hash256, Hash256) {
|
||||||
|
let initial_head_slot = self.chain.head().beacon_block.slot;
|
||||||
|
|
||||||
|
// Move to the next slot so we may produce some more blocks on the head.
|
||||||
|
self.advance_slot();
|
||||||
|
|
||||||
|
// Extend the chain with blocks where only honest validators agree.
|
||||||
|
let honest_head = self.extend_chain(
|
||||||
|
honest_fork_blocks,
|
||||||
|
BlockStrategy::OnCanonicalHead,
|
||||||
|
AttestationStrategy::SomeValidators(honest_validators.to_vec()),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Go back to the last block where all agreed, and build blocks upon it where only faulty nodes
|
||||||
|
// agree.
|
||||||
|
let faulty_head = self.extend_chain(
|
||||||
|
faulty_fork_blocks,
|
||||||
|
BlockStrategy::ForkCanonicalChainAt {
|
||||||
|
previous_slot: initial_head_slot,
|
||||||
|
// `initial_head_slot + 2` means one slot is skipped.
|
||||||
|
first_slot: initial_head_slot + 2,
|
||||||
|
},
|
||||||
|
AttestationStrategy::SomeValidators(faulty_validators.to_vec()),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(honest_head != faulty_head, "forks should be distinct");
|
||||||
|
|
||||||
|
(honest_head, faulty_head)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the secret key for the given validator index.
|
/// Returns the secret key for the given validator index.
|
||||||
fn get_sk(&self, validator_index: usize) -> &SecretKey {
|
fn get_sk(&self, validator_index: usize) -> &SecretKey {
|
||||||
&self.keypairs[validator_index].sk
|
&self.keypairs[validator_index].sk
|
||||||
|
@ -24,7 +24,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<TestForkChoice, Min
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn fork() {
|
fn chooses_fork() {
|
||||||
let harness = get_harness(VALIDATOR_COUNT);
|
let harness = get_harness(VALIDATOR_COUNT);
|
||||||
|
|
||||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||||
@ -44,25 +44,11 @@ fn fork() {
|
|||||||
AttestationStrategy::AllValidators,
|
AttestationStrategy::AllValidators,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Move to the next slot so we may produce some more blocks on the head.
|
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
|
||||||
harness.advance_slot();
|
&honest_validators,
|
||||||
|
&faulty_validators,
|
||||||
// Extend the chain with blocks where only honest validators agree.
|
|
||||||
let honest_head = harness.extend_chain(
|
|
||||||
honest_fork_blocks,
|
honest_fork_blocks,
|
||||||
BlockStrategy::OnCanonicalHead,
|
|
||||||
AttestationStrategy::SomeValidators(honest_validators.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Go back to the last block where all agreed, and build blocks upon it where only faulty nodes
|
|
||||||
// agree.
|
|
||||||
let faulty_head = harness.extend_chain(
|
|
||||||
faulty_fork_blocks,
|
faulty_fork_blocks,
|
||||||
BlockStrategy::ForkCanonicalChainAt {
|
|
||||||
previous_slot: Slot::from(initial_blocks),
|
|
||||||
first_slot: Slot::from(initial_blocks + 2),
|
|
||||||
},
|
|
||||||
AttestationStrategy::SomeValidators(faulty_validators.clone()),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(honest_head != faulty_head, "forks should be distinct");
|
assert!(honest_head != faulty_head, "forks should be distinct");
|
||||||
@ -106,12 +92,12 @@ fn finalizes_with_full_participation() {
|
|||||||
"head should be at the expected epoch"
|
"head should be at the expected epoch"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.current_justified_epoch,
|
state.current_justified_checkpoint.epoch,
|
||||||
state.current_epoch() - 1,
|
state.current_epoch() - 1,
|
||||||
"the head should be justified one behind the current epoch"
|
"the head should be justified one behind the current epoch"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.finalized_epoch,
|
state.finalized_checkpoint.epoch,
|
||||||
state.current_epoch() - 2,
|
state.current_epoch() - 2,
|
||||||
"the head should be finalized two behind the current epoch"
|
"the head should be finalized two behind the current epoch"
|
||||||
);
|
);
|
||||||
@ -149,12 +135,12 @@ fn finalizes_with_two_thirds_participation() {
|
|||||||
// included in blocks during that epoch.
|
// included in blocks during that epoch.
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.current_justified_epoch,
|
state.current_justified_checkpoint.epoch,
|
||||||
state.current_epoch() - 2,
|
state.current_epoch() - 2,
|
||||||
"the head should be justified two behind the current epoch"
|
"the head should be justified two behind the current epoch"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.finalized_epoch,
|
state.finalized_checkpoint.epoch,
|
||||||
state.current_epoch() - 4,
|
state.current_epoch() - 4,
|
||||||
"the head should be finalized three behind the current epoch"
|
"the head should be finalized three behind the current epoch"
|
||||||
);
|
);
|
||||||
@ -188,11 +174,11 @@ fn does_not_finalize_with_less_than_two_thirds_participation() {
|
|||||||
"head should be at the expected epoch"
|
"head should be at the expected epoch"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.current_justified_epoch, 0,
|
state.current_justified_checkpoint.epoch, 0,
|
||||||
"no epoch should have been justified"
|
"no epoch should have been justified"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.finalized_epoch, 0,
|
state.finalized_checkpoint.epoch, 0,
|
||||||
"no epoch should have been finalized"
|
"no epoch should have been finalized"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -221,11 +207,11 @@ fn does_not_finalize_without_attestation() {
|
|||||||
"head should be at the expected epoch"
|
"head should be at the expected epoch"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.current_justified_epoch, 0,
|
state.current_justified_checkpoint.epoch, 0,
|
||||||
"no epoch should have been justified"
|
"no epoch should have been justified"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
state.finalized_epoch, 0,
|
state.finalized_checkpoint.epoch, 0,
|
||||||
"no epoch should have been finalized"
|
"no epoch should have been finalized"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -246,10 +232,10 @@ fn roundtrip_operation_pool() {
|
|||||||
|
|
||||||
// Add some deposits
|
// Add some deposits
|
||||||
let rng = &mut XorShiftRng::from_seed([66; 16]);
|
let rng = &mut XorShiftRng::from_seed([66; 16]);
|
||||||
for _ in 0..rng.gen_range(1, VALIDATOR_COUNT) {
|
for i in 0..rng.gen_range(1, VALIDATOR_COUNT) {
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_deposit(Deposit::random_for_test(rng))
|
.process_deposit(i as u64, Deposit::random_for_test(rng))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,10 +7,9 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
network = { path = "../network" }
|
network = { path = "../network" }
|
||||||
store = { path = "../store" }
|
|
||||||
http_server = { path = "../http_server" }
|
http_server = { path = "../http_server" }
|
||||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
|
||||||
rpc = { path = "../rpc" }
|
rpc = { path = "../rpc" }
|
||||||
|
rest_api = { path = "../rest_api" }
|
||||||
prometheus = "^0.6"
|
prometheus = "^0.6"
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
||||||
@ -19,11 +18,10 @@ slot_clock = { path = "../../eth2/utils/slot_clock" }
|
|||||||
serde = "1.0.93"
|
serde = "1.0.93"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
error-chain = "0.12.0"
|
error-chain = "0.12.0"
|
||||||
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
serde_yaml = "0.8"
|
||||||
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
||||||
slog-async = "^2.3.0"
|
slog-async = "^2.3.0"
|
||||||
slog-json = "^2.3"
|
slog-json = "^2.3"
|
||||||
slog-term = "^2.4.0"
|
|
||||||
tokio = "0.1.15"
|
tokio = "0.1.15"
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
dirs = "1.0.3"
|
dirs = "1.0.3"
|
||||||
|
@ -1,27 +1,31 @@
|
|||||||
|
use crate::error::Result;
|
||||||
|
use crate::{config::GenesisState, ClientConfig};
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
lmd_ghost::{LmdGhost, ThreadSafeReducedTree},
|
lmd_ghost::{LmdGhost, ThreadSafeReducedTree},
|
||||||
slot_clock::SystemTimeSlotClock,
|
slot_clock::SystemTimeSlotClock,
|
||||||
store::Store,
|
store::Store,
|
||||||
BeaconChain, BeaconChainTypes,
|
BeaconChain, BeaconChainTypes,
|
||||||
};
|
};
|
||||||
use slog::{info, Logger};
|
use slog::{crit, info, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
|
use std::fs::File;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::SystemTime;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, ChainSpec, EthSpec, Hash256};
|
use types::{
|
||||||
|
test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256,
|
||||||
/// The number initial validators when starting the `Minimal`.
|
};
|
||||||
const TESTNET_VALIDATOR_COUNT: usize = 16;
|
|
||||||
|
|
||||||
/// Provides a new, initialized `BeaconChain`
|
/// Provides a new, initialized `BeaconChain`
|
||||||
pub trait InitialiseBeaconChain<T: BeaconChainTypes> {
|
pub trait InitialiseBeaconChain<T: BeaconChainTypes> {
|
||||||
fn initialise_beacon_chain(
|
fn initialise_beacon_chain(
|
||||||
store: Arc<T::Store>,
|
store: Arc<T::Store>,
|
||||||
|
config: &ClientConfig,
|
||||||
spec: ChainSpec,
|
spec: ChainSpec,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> BeaconChain<T> {
|
) -> Result<BeaconChain<T>> {
|
||||||
maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, spec, log)
|
maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, config, spec, log)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,43 +46,109 @@ impl<T: Store, E: EthSpec, X: BeaconChainTypes> InitialiseBeaconChain<X> for Cli
|
|||||||
/// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis.
|
/// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis.
|
||||||
fn maybe_load_from_store_for_testnet<T, U: Store, V: EthSpec>(
|
fn maybe_load_from_store_for_testnet<T, U: Store, V: EthSpec>(
|
||||||
store: Arc<U>,
|
store: Arc<U>,
|
||||||
|
config: &ClientConfig,
|
||||||
spec: ChainSpec,
|
spec: ChainSpec,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> BeaconChain<T>
|
) -> Result<BeaconChain<T>>
|
||||||
where
|
where
|
||||||
T: BeaconChainTypes<Store = U, EthSpec = V>,
|
T: BeaconChainTypes<Store = U, EthSpec = V>,
|
||||||
T::LmdGhost: LmdGhost<U, V>,
|
T::LmdGhost: LmdGhost<U, V>,
|
||||||
{
|
{
|
||||||
if let Ok(Some(beacon_chain)) = BeaconChain::from_store(store.clone(), spec.clone()) {
|
let genesis_state = match &config.genesis_state {
|
||||||
info!(
|
GenesisState::Mainnet => {
|
||||||
log,
|
crit!(log, "This release does not support mainnet genesis state.");
|
||||||
"Loaded BeaconChain from store";
|
return Err("Mainnet is unsupported".into());
|
||||||
"slot" => beacon_chain.head().beacon_state.slot,
|
}
|
||||||
"best_slot" => beacon_chain.best_slot(),
|
GenesisState::RecentGenesis { validator_count } => {
|
||||||
);
|
generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec)
|
||||||
|
}
|
||||||
|
GenesisState::Generated {
|
||||||
|
validator_count,
|
||||||
|
genesis_time,
|
||||||
|
} => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec),
|
||||||
|
GenesisState::Yaml { file } => {
|
||||||
|
let file = File::open(file).map_err(|e| {
|
||||||
|
format!("Unable to open YAML genesis state file {:?}: {:?}", file, e)
|
||||||
|
})?;
|
||||||
|
|
||||||
beacon_chain
|
serde_yaml::from_reader(file)
|
||||||
|
.map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut genesis_block = BeaconBlock::empty(&spec);
|
||||||
|
genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
|
||||||
|
let genesis_block_root = genesis_block.canonical_root();
|
||||||
|
|
||||||
|
// Slot clock
|
||||||
|
let slot_clock = T::SlotClock::new(
|
||||||
|
spec.genesis_slot,
|
||||||
|
genesis_state.genesis_time,
|
||||||
|
spec.seconds_per_slot,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Try load an existing `BeaconChain` from the store. If unable, create a new one.
|
||||||
|
if let Ok(Some(beacon_chain)) =
|
||||||
|
BeaconChain::from_store(store.clone(), spec.clone(), log.clone())
|
||||||
|
{
|
||||||
|
// Here we check to ensure that the `BeaconChain` loaded from store has the expected
|
||||||
|
// genesis block.
|
||||||
|
//
|
||||||
|
// Without this check, it's possible that there will be an existing DB with a `BeaconChain`
|
||||||
|
// that has different parameters than provided to this executable.
|
||||||
|
if beacon_chain.genesis_block_root == genesis_block_root {
|
||||||
|
info!(
|
||||||
|
log,
|
||||||
|
"Loaded BeaconChain from store";
|
||||||
|
"slot" => beacon_chain.head().beacon_state.slot,
|
||||||
|
"best_slot" => beacon_chain.best_slot(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(beacon_chain)
|
||||||
|
} else {
|
||||||
|
crit!(
|
||||||
|
log,
|
||||||
|
"The BeaconChain loaded from disk has an incorrect genesis root. \
|
||||||
|
This may be caused by an old database in located in datadir."
|
||||||
|
);
|
||||||
|
Err("Incorrect genesis root".into())
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
info!(log, "Initializing new BeaconChain from genesis");
|
BeaconChain::from_genesis(
|
||||||
let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(
|
store,
|
||||||
TESTNET_VALIDATOR_COUNT,
|
slot_clock,
|
||||||
&spec,
|
genesis_state,
|
||||||
);
|
genesis_block,
|
||||||
let (genesis_state, _keypairs) = state_builder.build();
|
spec,
|
||||||
|
log.clone(),
|
||||||
let mut genesis_block = BeaconBlock::empty(&spec);
|
)
|
||||||
genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
|
.map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e).into())
|
||||||
|
|
||||||
// Slot clock
|
|
||||||
let slot_clock = T::SlotClock::new(
|
|
||||||
spec.genesis_slot,
|
|
||||||
genesis_state.genesis_time,
|
|
||||||
spec.seconds_per_slot,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Genesis chain
|
|
||||||
//TODO: Handle error correctly
|
|
||||||
BeaconChain::from_genesis(store, slot_clock, genesis_state, genesis_block, spec)
|
|
||||||
.expect("Terminate if beacon chain generation fails")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn generate_testnet_genesis_state<E: EthSpec>(
|
||||||
|
validator_count: usize,
|
||||||
|
genesis_time: u64,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> BeaconState<E> {
|
||||||
|
let (mut genesis_state, _keypairs) =
|
||||||
|
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
genesis_state.genesis_time = genesis_time;
|
||||||
|
|
||||||
|
genesis_state
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the system time, mod 30 minutes.
|
||||||
|
///
|
||||||
|
/// Used for easily creating testnets.
|
||||||
|
fn recent_genesis_time() -> u64 {
|
||||||
|
let now = SystemTime::now()
|
||||||
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs();
|
||||||
|
let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0);
|
||||||
|
// genesis is now the last 30 minute block.
|
||||||
|
now - secs_after_last_period
|
||||||
|
}
|
||||||
|
@ -7,6 +7,12 @@ use std::fs::{self, OpenOptions};
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Mutex;
|
use std::sync::Mutex;
|
||||||
|
|
||||||
|
/// The number initial validators when starting the `Minimal`.
|
||||||
|
const TESTNET_VALIDATOR_COUNT: usize = 16;
|
||||||
|
|
||||||
|
/// The number initial validators when starting the `Minimal`.
|
||||||
|
const TESTNET_SPEC_CONSTANTS: &str = "minimal";
|
||||||
|
|
||||||
/// The core configuration of a Lighthouse beacon node.
|
/// The core configuration of a Lighthouse beacon node.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
@ -14,9 +20,33 @@ pub struct Config {
|
|||||||
pub db_type: String,
|
pub db_type: String,
|
||||||
db_name: String,
|
db_name: String,
|
||||||
pub log_file: PathBuf,
|
pub log_file: PathBuf,
|
||||||
|
pub spec_constants: String,
|
||||||
|
pub genesis_state: GenesisState,
|
||||||
pub network: network::NetworkConfig,
|
pub network: network::NetworkConfig,
|
||||||
pub rpc: rpc::RPCConfig,
|
pub rpc: rpc::RPCConfig,
|
||||||
pub http: HttpServerConfig,
|
pub http: HttpServerConfig,
|
||||||
|
pub rest_api: rest_api::APIConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
#[serde(tag = "type")]
|
||||||
|
pub enum GenesisState {
|
||||||
|
/// Use the mainnet genesis state.
|
||||||
|
///
|
||||||
|
/// Mainnet genesis state is not presently known, so this is a place-holder.
|
||||||
|
Mainnet,
|
||||||
|
/// Generate a state with `validator_count` validators, all with well-known secret keys.
|
||||||
|
///
|
||||||
|
/// Set the genesis time to be the start of the previous 30-minute window.
|
||||||
|
RecentGenesis { validator_count: usize },
|
||||||
|
/// Generate a state with `genesis_time` and `validator_count` validators, all with well-known
|
||||||
|
/// secret keys.
|
||||||
|
Generated {
|
||||||
|
validator_count: usize,
|
||||||
|
genesis_time: u64,
|
||||||
|
},
|
||||||
|
/// Load a YAML-encoded genesis state from a file.
|
||||||
|
Yaml { file: PathBuf },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -31,6 +61,11 @@ impl Default for Config {
|
|||||||
network: NetworkConfig::new(),
|
network: NetworkConfig::new(),
|
||||||
rpc: rpc::RPCConfig::default(),
|
rpc: rpc::RPCConfig::default(),
|
||||||
http: HttpServerConfig::default(),
|
http: HttpServerConfig::default(),
|
||||||
|
rest_api: rest_api::APIConfig::default(),
|
||||||
|
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
||||||
|
genesis_state: GenesisState::RecentGenesis {
|
||||||
|
validator_count: TESTNET_VALIDATOR_COUNT,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -101,6 +136,7 @@ impl Config {
|
|||||||
self.network.apply_cli_args(args)?;
|
self.network.apply_cli_args(args)?;
|
||||||
self.rpc.apply_cli_args(args)?;
|
self.rpc.apply_cli_args(args)?;
|
||||||
self.http.apply_cli_args(args)?;
|
self.http.apply_cli_args(args)?;
|
||||||
|
self.rest_api.apply_cli_args(args)?;
|
||||||
|
|
||||||
if let Some(log_file) = args.value_of("logfile") {
|
if let Some(log_file) = args.value_of("logfile") {
|
||||||
self.log_file = PathBuf::from(log_file);
|
self.log_file = PathBuf::from(log_file);
|
||||||
|
@ -2,6 +2,7 @@ extern crate slog;
|
|||||||
|
|
||||||
mod beacon_chain_types;
|
mod beacon_chain_types;
|
||||||
mod config;
|
mod config;
|
||||||
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod notifier;
|
pub mod notifier;
|
||||||
|
|
||||||
@ -39,6 +40,8 @@ pub struct Client<T: BeaconChainTypes> {
|
|||||||
pub http_exit_signal: Option<Signal>,
|
pub http_exit_signal: Option<Signal>,
|
||||||
/// Signal to terminate the slot timer.
|
/// Signal to terminate the slot timer.
|
||||||
pub slot_timer_exit_signal: Option<Signal>,
|
pub slot_timer_exit_signal: Option<Signal>,
|
||||||
|
/// Signal to terminate the API
|
||||||
|
pub api_exit_signal: Option<Signal>,
|
||||||
/// The clients logger.
|
/// The clients logger.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
/// Marker to pin the beacon chain generics.
|
/// Marker to pin the beacon chain generics.
|
||||||
@ -64,9 +67,10 @@ where
|
|||||||
// Load a `BeaconChain` from the store, or create a new one if it does not exist.
|
// Load a `BeaconChain` from the store, or create a new one if it does not exist.
|
||||||
let beacon_chain = Arc::new(T::initialise_beacon_chain(
|
let beacon_chain = Arc::new(T::initialise_beacon_chain(
|
||||||
store,
|
store,
|
||||||
|
&client_config,
|
||||||
eth2_config.spec.clone(),
|
eth2_config.spec.clone(),
|
||||||
log.clone(),
|
log.clone(),
|
||||||
));
|
)?);
|
||||||
// Registry all beacon chain metrics with the global registry.
|
// Registry all beacon chain metrics with the global registry.
|
||||||
beacon_chain
|
beacon_chain
|
||||||
.metrics
|
.metrics
|
||||||
@ -87,7 +91,7 @@ where
|
|||||||
let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap();
|
let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap();
|
||||||
info!(
|
info!(
|
||||||
log,
|
log,
|
||||||
"Initializing state";
|
"BeaconState cache init";
|
||||||
"state_slot" => state_slot,
|
"state_slot" => state_slot,
|
||||||
"wall_clock_slot" => wall_clock_slot,
|
"wall_clock_slot" => wall_clock_slot,
|
||||||
"slots_since_genesis" => slots_since_genesis,
|
"slots_since_genesis" => slots_since_genesis,
|
||||||
@ -95,12 +99,6 @@ where
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
do_state_catchup(&beacon_chain, &log);
|
do_state_catchup(&beacon_chain, &log);
|
||||||
info!(
|
|
||||||
log,
|
|
||||||
"State initialized";
|
|
||||||
"state_slot" => beacon_chain.head().beacon_state.slot,
|
|
||||||
"wall_clock_slot" => beacon_chain.read_slot_clock().unwrap(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Start the network service, libp2p and syncing threads
|
// Start the network service, libp2p and syncing threads
|
||||||
// TODO: Add beacon_chain reference to network parameters
|
// TODO: Add beacon_chain reference to network parameters
|
||||||
@ -143,6 +141,24 @@ where
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Start the `rest_api` service
|
||||||
|
let api_exit_signal = if client_config.rest_api.enabled {
|
||||||
|
match rest_api::start_server(
|
||||||
|
&client_config.rest_api,
|
||||||
|
executor,
|
||||||
|
beacon_chain.clone(),
|
||||||
|
&log,
|
||||||
|
) {
|
||||||
|
Ok(s) => Some(s),
|
||||||
|
Err(e) => {
|
||||||
|
error!(log, "API service failed to start."; "error" => format!("{:?}",e));
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let (slot_timer_exit_signal, exit) = exit_future::signal();
|
let (slot_timer_exit_signal, exit) = exit_future::signal();
|
||||||
if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() {
|
if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() {
|
||||||
// set up the validator work interval - start at next slot and proceed every slot
|
// set up the validator work interval - start at next slot and proceed every slot
|
||||||
@ -175,6 +191,7 @@ where
|
|||||||
http_exit_signal,
|
http_exit_signal,
|
||||||
rpc_exit_signal,
|
rpc_exit_signal,
|
||||||
slot_timer_exit_signal: Some(slot_timer_exit_signal),
|
slot_timer_exit_signal: Some(slot_timer_exit_signal),
|
||||||
|
api_exit_signal,
|
||||||
log,
|
log,
|
||||||
network,
|
network,
|
||||||
phantom: PhantomData,
|
phantom: PhantomData,
|
||||||
@ -190,29 +207,38 @@ impl<T: BeaconChainTypes> Drop for Client<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn do_state_catchup<T: BeaconChainTypes>(chain: &Arc<BeaconChain<T>>, log: &slog::Logger) {
|
fn do_state_catchup<T: BeaconChainTypes>(chain: &Arc<BeaconChain<T>>, log: &slog::Logger) {
|
||||||
if let Some(genesis_height) = chain.slots_since_genesis() {
|
// Only attempt to `catchup_state` if we can read the slot clock.
|
||||||
let result = chain.catchup_state();
|
if let Some(current_slot) = chain.read_slot_clock() {
|
||||||
|
let state_catchup_result = chain.catchup_state();
|
||||||
|
|
||||||
|
let best_slot = chain.head().beacon_block.slot;
|
||||||
|
let latest_block_root = chain.head().beacon_block_root;
|
||||||
|
|
||||||
let common = o!(
|
let common = o!(
|
||||||
"best_slot" => chain.head().beacon_block.slot,
|
"skip_slots" => current_slot.saturating_sub(best_slot),
|
||||||
"latest_block_root" => format!("{}", chain.head().beacon_block_root),
|
"best_block_root" => format!("{}", latest_block_root),
|
||||||
"wall_clock_slot" => chain.read_slot_clock().unwrap(),
|
"best_block_slot" => best_slot,
|
||||||
"state_slot" => chain.head().beacon_state.slot,
|
"slot" => current_slot,
|
||||||
"slots_since_genesis" => genesis_height,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
match result {
|
if let Err(e) = state_catchup_result {
|
||||||
Ok(_) => info!(
|
error!(
|
||||||
log,
|
log,
|
||||||
"NewSlot";
|
"State catchup failed";
|
||||||
common
|
|
||||||
),
|
|
||||||
Err(e) => error!(
|
|
||||||
log,
|
|
||||||
"StateCatchupFailed";
|
|
||||||
"error" => format!("{:?}", e),
|
"error" => format!("{:?}", e),
|
||||||
common
|
common
|
||||||
),
|
)
|
||||||
};
|
} else {
|
||||||
}
|
info!(
|
||||||
|
log,
|
||||||
|
"Slot start";
|
||||||
|
common
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!(
|
||||||
|
log,
|
||||||
|
"Beacon chain running whilst slot clock is unavailable."
|
||||||
|
);
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@ use crate::Client;
|
|||||||
use beacon_chain::BeaconChainTypes;
|
use beacon_chain::BeaconChainTypes;
|
||||||
use exit_future::Exit;
|
use exit_future::Exit;
|
||||||
use futures::{Future, Stream};
|
use futures::{Future, Stream};
|
||||||
use slog::{debug, o};
|
use slog::{debug, o, warn};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
use tokio::timer::Interval;
|
use tokio::timer::Interval;
|
||||||
@ -10,6 +10,9 @@ use tokio::timer::Interval;
|
|||||||
/// The interval between heartbeat events.
|
/// The interval between heartbeat events.
|
||||||
pub const HEARTBEAT_INTERVAL_SECONDS: u64 = 15;
|
pub const HEARTBEAT_INTERVAL_SECONDS: u64 = 15;
|
||||||
|
|
||||||
|
/// Create a warning log whenever the peer count is at or below this value.
|
||||||
|
pub const WARN_PEER_COUNT: usize = 1;
|
||||||
|
|
||||||
/// Spawns a thread that can be used to run code periodically, on `HEARTBEAT_INTERVAL_SECONDS`
|
/// Spawns a thread that can be used to run code periodically, on `HEARTBEAT_INTERVAL_SECONDS`
|
||||||
/// durations.
|
/// durations.
|
||||||
///
|
///
|
||||||
@ -30,9 +33,16 @@ pub fn run<T: BeaconChainTypes + Send + Sync + 'static>(
|
|||||||
let libp2p = client.network.libp2p_service();
|
let libp2p = client.network.libp2p_service();
|
||||||
|
|
||||||
let heartbeat = move |_| {
|
let heartbeat = move |_| {
|
||||||
// Notify the number of connected nodes
|
// Number of libp2p (not discv5) peers connected.
|
||||||
// Panic if libp2p is poisoned
|
//
|
||||||
debug!(log, ""; "Connected Peers" => libp2p.lock().swarm.connected_peers());
|
// Panics if libp2p is poisoned.
|
||||||
|
let connected_peer_count = libp2p.lock().swarm.connected_peers();
|
||||||
|
|
||||||
|
debug!(log, "libp2p"; "peer_count" => connected_peer_count);
|
||||||
|
|
||||||
|
if connected_peer_count <= WARN_PEER_COUNT {
|
||||||
|
warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
};
|
};
|
||||||
|
@ -5,7 +5,6 @@ authors = ["Age Manning <Age@AgeManning.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
#SigP repository
|
#SigP repository
|
||||||
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" }
|
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" }
|
||||||
|
@ -18,31 +18,31 @@ use slog::{o, trace, warn};
|
|||||||
use ssz::{ssz_encode, Decode, DecodeError, Encode};
|
use ssz::{ssz_encode, Decode, DecodeError, Encode};
|
||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU32;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use types::{Attestation, BeaconBlock};
|
use types::{Attestation, BeaconBlock, EthSpec};
|
||||||
|
|
||||||
/// Builds the network behaviour that manages the core protocols of eth2.
|
/// Builds the network behaviour that manages the core protocols of eth2.
|
||||||
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
|
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
|
||||||
/// behaviours.
|
/// behaviours.
|
||||||
#[derive(NetworkBehaviour)]
|
#[derive(NetworkBehaviour)]
|
||||||
#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")]
|
#[behaviour(out_event = "BehaviourEvent<E>", poll_method = "poll")]
|
||||||
pub struct Behaviour<TSubstream: AsyncRead + AsyncWrite> {
|
pub struct Behaviour<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> {
|
||||||
/// The routing pub-sub mechanism for eth2.
|
/// The routing pub-sub mechanism for eth2.
|
||||||
gossipsub: Gossipsub<TSubstream>,
|
gossipsub: Gossipsub<TSubstream>,
|
||||||
/// The serenity RPC specified in the wire-0 protocol.
|
/// The serenity RPC specified in the wire-0 protocol.
|
||||||
serenity_rpc: RPC<TSubstream>,
|
serenity_rpc: RPC<TSubstream, E>,
|
||||||
/// Keep regular connection to peers and disconnect if absent.
|
/// Keep regular connection to peers and disconnect if absent.
|
||||||
ping: Ping<TSubstream>,
|
ping: Ping<TSubstream>,
|
||||||
/// Kademlia for peer discovery.
|
/// Kademlia for peer discovery.
|
||||||
discovery: Discovery<TSubstream>,
|
discovery: Discovery<TSubstream>,
|
||||||
#[behaviour(ignore)]
|
#[behaviour(ignore)]
|
||||||
/// The events generated by this behaviour to be consumed in the swarm poll.
|
/// The events generated by this behaviour to be consumed in the swarm poll.
|
||||||
events: Vec<BehaviourEvent>,
|
events: Vec<BehaviourEvent<E>>,
|
||||||
/// Logger for behaviour actions.
|
/// Logger for behaviour actions.
|
||||||
#[behaviour(ignore)]
|
#[behaviour(ignore)]
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
local_key: &Keypair,
|
local_key: &Keypair,
|
||||||
net_conf: &NetworkConfig,
|
net_conf: &NetworkConfig,
|
||||||
@ -68,8 +68,8 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubEvent>
|
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<GossipsubEvent>
|
||||||
for Behaviour<TSubstream>
|
for Behaviour<TSubstream, E>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, event: GossipsubEvent) {
|
fn inject_event(&mut self, event: GossipsubEvent) {
|
||||||
match event {
|
match event {
|
||||||
@ -101,8 +101,8 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubE
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<RPCMessage>
|
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<RPCMessage>
|
||||||
for Behaviour<TSubstream>
|
for Behaviour<TSubstream, E>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, event: RPCMessage) {
|
fn inject_event(&mut self, event: RPCMessage) {
|
||||||
match event {
|
match event {
|
||||||
@ -119,19 +119,19 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<RPCMessage
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<PingEvent>
|
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<PingEvent>
|
||||||
for Behaviour<TSubstream>
|
for Behaviour<TSubstream, E>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, _event: PingEvent) {
|
fn inject_event(&mut self, _event: PingEvent) {
|
||||||
// not interested in ping responses at the moment.
|
// not interested in ping responses at the moment.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
||||||
/// Consumes the events list when polled.
|
/// Consumes the events list when polled.
|
||||||
fn poll<TBehaviourIn>(
|
fn poll<TBehaviourIn>(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> Async<NetworkBehaviourAction<TBehaviourIn, BehaviourEvent>> {
|
) -> Async<NetworkBehaviourAction<TBehaviourIn, BehaviourEvent<E>>> {
|
||||||
if !self.events.is_empty() {
|
if !self.events.is_empty() {
|
||||||
return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0)));
|
return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0)));
|
||||||
}
|
}
|
||||||
@ -140,8 +140,8 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<Discv5Event>
|
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<Discv5Event>
|
||||||
for Behaviour<TSubstream>
|
for Behaviour<TSubstream, E>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, _event: Discv5Event) {
|
fn inject_event(&mut self, _event: Discv5Event) {
|
||||||
// discv5 has no events to inject
|
// discv5 has no events to inject
|
||||||
@ -149,7 +149,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<Discv5Even
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the combined behaviour for the libp2p service.
|
/// Implements the combined behaviour for the libp2p service.
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
||||||
/* Pubsub behaviour functions */
|
/* Pubsub behaviour functions */
|
||||||
|
|
||||||
/// Subscribes to a gossipsub topic.
|
/// Subscribes to a gossipsub topic.
|
||||||
@ -158,7 +158,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Publishes a message on the pubsub (gossipsub) behaviour.
|
/// Publishes a message on the pubsub (gossipsub) behaviour.
|
||||||
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage) {
|
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage<E>) {
|
||||||
let message_bytes = ssz_encode(&message);
|
let message_bytes = ssz_encode(&message);
|
||||||
for topic in topics {
|
for topic in topics {
|
||||||
self.gossipsub.publish(topic, message_bytes.clone());
|
self.gossipsub.publish(topic, message_bytes.clone());
|
||||||
@ -179,28 +179,28 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The types of events than can be obtained from polling the behaviour.
|
/// The types of events than can be obtained from polling the behaviour.
|
||||||
pub enum BehaviourEvent {
|
pub enum BehaviourEvent<E: EthSpec> {
|
||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
PeerDialed(PeerId),
|
PeerDialed(PeerId),
|
||||||
PeerDisconnected(PeerId),
|
PeerDisconnected(PeerId),
|
||||||
GossipMessage {
|
GossipMessage {
|
||||||
source: PeerId,
|
source: PeerId,
|
||||||
topics: Vec<TopicHash>,
|
topics: Vec<TopicHash>,
|
||||||
message: Box<PubsubMessage>,
|
message: Box<PubsubMessage<E>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Messages that are passed to and from the pubsub (Gossipsub) behaviour.
|
/// Messages that are passed to and from the pubsub (Gossipsub) behaviour.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum PubsubMessage {
|
pub enum PubsubMessage<E: EthSpec> {
|
||||||
/// Gossipsub message providing notification of a new block.
|
/// Gossipsub message providing notification of a new block.
|
||||||
Block(BeaconBlock),
|
Block(BeaconBlock<E>),
|
||||||
/// Gossipsub message providing notification of a new attestation.
|
/// Gossipsub message providing notification of a new attestation.
|
||||||
Attestation(Attestation),
|
Attestation(Attestation<E>),
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: Correctly encode/decode enums. Prefixing with integer for now.
|
//TODO: Correctly encode/decode enums. Prefixing with integer for now.
|
||||||
impl Encode for PubsubMessage {
|
impl<E: EthSpec> Encode for PubsubMessage<E> {
|
||||||
fn is_ssz_fixed_len() -> bool {
|
fn is_ssz_fixed_len() -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@ -229,7 +229,7 @@ impl Encode for PubsubMessage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decode for PubsubMessage {
|
impl<E: EthSpec> Decode for PubsubMessage<E> {
|
||||||
fn is_ssz_fixed_len() -> bool {
|
fn is_ssz_fixed_len() -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@ -264,7 +264,9 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ssz_encoding() {
|
fn ssz_encoding() {
|
||||||
let original = PubsubMessage::Block(BeaconBlock::empty(&MainnetEthSpec::default_spec()));
|
let original = PubsubMessage::Block(BeaconBlock::<MainnetEthSpec>::empty(
|
||||||
|
&MainnetEthSpec::default_spec(),
|
||||||
|
));
|
||||||
|
|
||||||
let encoded = ssz_encode(&original);
|
let encoded = ssz_encode(&original);
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ impl Default for Config {
|
|||||||
network_dir.push("network");
|
network_dir.push("network");
|
||||||
Config {
|
Config {
|
||||||
network_dir,
|
network_dir,
|
||||||
listen_address: "127.0.0.1".parse().expect("vaild ip address"),
|
listen_address: "127.0.0.1".parse().expect("valid ip address"),
|
||||||
libp2p_port: 9000,
|
libp2p_port: 9000,
|
||||||
discovery_address: "127.0.0.1".parse().expect("valid ip address"),
|
discovery_address: "127.0.0.1".parse().expect("valid ip address"),
|
||||||
discovery_port: 9000,
|
discovery_port: 9000,
|
||||||
@ -79,10 +79,16 @@ impl Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), String> {
|
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), String> {
|
||||||
|
// If a `datadir` has been specified, set the network dir to be inside it.
|
||||||
if let Some(dir) = args.value_of("datadir") {
|
if let Some(dir) = args.value_of("datadir") {
|
||||||
self.network_dir = PathBuf::from(dir).join("network");
|
self.network_dir = PathBuf::from(dir).join("network");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// If a network dir has been specified, override the `datadir` definition.
|
||||||
|
if let Some(dir) = args.value_of("network-dir") {
|
||||||
|
self.network_dir = PathBuf::from(dir);
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(listen_address_str) = args.value_of("listen-address") {
|
if let Some(listen_address_str) = args.value_of("listen-address") {
|
||||||
let listen_address = listen_address_str
|
let listen_address = listen_address_str
|
||||||
.parse()
|
.parse()
|
||||||
|
@ -271,7 +271,7 @@ fn load_enr(
|
|||||||
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
|
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
|
||||||
// majority of our peers.
|
// majority of our peers.
|
||||||
let mut local_enr = EnrBuilder::new()
|
let mut local_enr = EnrBuilder::new()
|
||||||
.ip(config.discovery_address.into())
|
.ip(config.discovery_address)
|
||||||
.tcp(config.libp2p_port)
|
.tcp(config.libp2p_port)
|
||||||
.udp(config.discovery_port)
|
.udp(config.discovery_port)
|
||||||
.build(&local_key)
|
.build(&local_key)
|
||||||
@ -318,7 +318,7 @@ fn load_enr(
|
|||||||
Ok(local_enr)
|
Ok(local_enr)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) -> () {
|
fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) {
|
||||||
let _ = std::fs::create_dir_all(dir);
|
let _ = std::fs::create_dir_all(dir);
|
||||||
match File::create(dir.join(Path::new(ENR_FILENAME)))
|
match File::create(dir.join(Path::new(ENR_FILENAME)))
|
||||||
.and_then(|mut f| f.write_all(&enr.to_base64().as_bytes()))
|
.and_then(|mut f| f.write_all(&enr.to_base64().as_bytes()))
|
||||||
|
@ -65,7 +65,7 @@ where
|
|||||||
dst.clear();
|
dst.clear();
|
||||||
dst.reserve(1);
|
dst.reserve(1);
|
||||||
dst.put_u8(item.as_u8());
|
dst.put_u8(item.as_u8());
|
||||||
return self.inner.encode(item, dst);
|
self.inner.encode(item, dst)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,16 +120,14 @@ where
|
|||||||
|
|
||||||
if RPCErrorResponse::is_response(response_code) {
|
if RPCErrorResponse::is_response(response_code) {
|
||||||
// decode an actual response
|
// decode an actual response
|
||||||
return self
|
self.inner
|
||||||
.inner
|
|
||||||
.decode(src)
|
.decode(src)
|
||||||
.map(|r| r.map(|resp| RPCErrorResponse::Success(resp)));
|
.map(|r| r.map(RPCErrorResponse::Success))
|
||||||
} else {
|
} else {
|
||||||
// decode an error
|
// decode an error
|
||||||
return self
|
self.inner
|
||||||
.inner
|
|
||||||
.decode_error(src)
|
.decode_error(src)
|
||||||
.map(|r| r.map(|resp| RPCErrorResponse::from_error(response_code, resp)));
|
.map(|r| r.map(|resp| RPCErrorResponse::from_error(response_code, resp)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ use super::methods::{RPCErrorResponse, RPCResponse, RequestId};
|
|||||||
use super::protocol::{RPCError, RPCProtocol, RPCRequest};
|
use super::protocol::{RPCError, RPCProtocol, RPCRequest};
|
||||||
use super::RPCEvent;
|
use super::RPCEvent;
|
||||||
use crate::rpc::protocol::{InboundFramed, OutboundFramed};
|
use crate::rpc::protocol::{InboundFramed, OutboundFramed};
|
||||||
|
use core::marker::PhantomData;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p::core::protocols_handler::{
|
use libp2p::core::protocols_handler::{
|
||||||
@ -11,14 +12,16 @@ use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade};
|
|||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
use tokio_io::{AsyncRead, AsyncWrite};
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
/// The time (in seconds) before a substream that is awaiting a response times out.
|
/// The time (in seconds) before a substream that is awaiting a response times out.
|
||||||
pub const RESPONSE_TIMEOUT: u64 = 9;
|
pub const RESPONSE_TIMEOUT: u64 = 9;
|
||||||
|
|
||||||
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
||||||
pub struct RPCHandler<TSubstream>
|
pub struct RPCHandler<TSubstream, E>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
/// The upgrade for inbound substreams.
|
/// The upgrade for inbound substreams.
|
||||||
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
||||||
@ -52,6 +55,9 @@ where
|
|||||||
|
|
||||||
/// After the given duration has elapsed, an inactive connection will shutdown.
|
/// After the given duration has elapsed, an inactive connection will shutdown.
|
||||||
inactive_timeout: Duration,
|
inactive_timeout: Duration,
|
||||||
|
|
||||||
|
/// Phantom EthSpec.
|
||||||
|
_phantom: PhantomData<E>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An outbound substream is waiting a response from the user.
|
/// An outbound substream is waiting a response from the user.
|
||||||
@ -84,9 +90,10 @@ where
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream> RPCHandler<TSubstream>
|
impl<TSubstream, E> RPCHandler<TSubstream, E>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
||||||
@ -104,6 +111,7 @@ where
|
|||||||
max_dial_negotiated: 8,
|
max_dial_negotiated: 8,
|
||||||
keep_alive: KeepAlive::Yes,
|
keep_alive: KeepAlive::Yes,
|
||||||
inactive_timeout,
|
inactive_timeout,
|
||||||
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,18 +145,20 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream> Default for RPCHandler<TSubstream>
|
impl<TSubstream, E> Default for RPCHandler<TSubstream, E>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30))
|
RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream> ProtocolsHandler for RPCHandler<TSubstream>
|
impl<TSubstream, E> ProtocolsHandler for RPCHandler<TSubstream, E>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
type InEvent = RPCEvent;
|
type InEvent = RPCEvent;
|
||||||
type OutEvent = RPCEvent;
|
type OutEvent = RPCEvent;
|
||||||
@ -276,13 +286,8 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// remove any streams that have expired
|
// remove any streams that have expired
|
||||||
self.waiting_substreams.retain(|_k, waiting_stream| {
|
self.waiting_substreams
|
||||||
if Instant::now() > waiting_stream.timeout {
|
.retain(|_k, waiting_stream| Instant::now() <= waiting_stream.timeout);
|
||||||
false
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// drive streams that need to be processed
|
// drive streams that need to be processed
|
||||||
for n in (0..self.substreams.len()).rev() {
|
for n in (0..self.substreams.len()).rev() {
|
||||||
@ -334,7 +339,7 @@ where
|
|||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
|
||||||
RPCEvent::Error(rpc_event.id(), e.into()),
|
RPCEvent::Error(rpc_event.id(), e),
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use ssz::{impl_decode_via_from, impl_encode_via_from};
|
use ssz::{impl_decode_via_from, impl_encode_via_from};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use types::{BeaconBlockBody, Epoch, Hash256, Slot};
|
use types::{BeaconBlockBody, Epoch, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
/* Request/Response data structures for RPC methods */
|
/* Request/Response data structures for RPC methods */
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ pub enum GoodbyeReason {
|
|||||||
ClientShutdown = 1,
|
ClientShutdown = 1,
|
||||||
|
|
||||||
/// Incompatible networks.
|
/// Incompatible networks.
|
||||||
IrreleventNetwork = 2,
|
IrrelevantNetwork = 2,
|
||||||
|
|
||||||
/// Error/fault in the RPC.
|
/// Error/fault in the RPC.
|
||||||
Fault = 3,
|
Fault = 3,
|
||||||
@ -56,7 +56,7 @@ impl From<u64> for GoodbyeReason {
|
|||||||
fn from(id: u64) -> GoodbyeReason {
|
fn from(id: u64) -> GoodbyeReason {
|
||||||
match id {
|
match id {
|
||||||
1 => GoodbyeReason::ClientShutdown,
|
1 => GoodbyeReason::ClientShutdown,
|
||||||
2 => GoodbyeReason::IrreleventNetwork,
|
2 => GoodbyeReason::IrrelevantNetwork,
|
||||||
3 => GoodbyeReason::Fault,
|
3 => GoodbyeReason::Fault,
|
||||||
_ => GoodbyeReason::Unknown,
|
_ => GoodbyeReason::Unknown,
|
||||||
}
|
}
|
||||||
@ -154,11 +154,11 @@ pub struct BeaconBlockBodiesResponse {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`.
|
/// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`.
|
||||||
pub struct DecodedBeaconBlockBodiesResponse {
|
pub struct DecodedBeaconBlockBodiesResponse<E: EthSpec> {
|
||||||
/// The list of hashes sent in the request to get this response.
|
/// The list of hashes sent in the request to get this response.
|
||||||
pub block_roots: Vec<Hash256>,
|
pub block_roots: Vec<Hash256>,
|
||||||
/// The valid decoded block bodies.
|
/// The valid decoded block bodies.
|
||||||
pub block_bodies: Vec<BeaconBlockBody>,
|
pub block_bodies: Vec<BeaconBlockBody<E>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request values for tree hashes which yield a blocks `state_root`.
|
/// Request values for tree hashes which yield a blocks `state_root`.
|
||||||
|
@ -16,6 +16,7 @@ pub use protocol::{RPCError, RPCProtocol, RPCRequest};
|
|||||||
use slog::o;
|
use slog::o;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
pub(crate) mod codec;
|
pub(crate) mod codec;
|
||||||
mod handler;
|
mod handler;
|
||||||
@ -49,16 +50,16 @@ impl RPCEvent {
|
|||||||
|
|
||||||
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
||||||
/// logic.
|
/// logic.
|
||||||
pub struct RPC<TSubstream> {
|
pub struct RPC<TSubstream, E: EthSpec> {
|
||||||
/// Queue of events to processed.
|
/// Queue of events to processed.
|
||||||
events: Vec<NetworkBehaviourAction<RPCEvent, RPCMessage>>,
|
events: Vec<NetworkBehaviourAction<RPCEvent, RPCMessage>>,
|
||||||
/// Pins the generic substream.
|
/// Pins the generic substream.
|
||||||
marker: PhantomData<TSubstream>,
|
marker: PhantomData<(TSubstream, E)>,
|
||||||
/// Slog logger for RPC behaviour.
|
/// Slog logger for RPC behaviour.
|
||||||
_log: slog::Logger,
|
_log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream> RPC<TSubstream> {
|
impl<TSubstream, E: EthSpec> RPC<TSubstream, E> {
|
||||||
pub fn new(log: &slog::Logger) -> Self {
|
pub fn new(log: &slog::Logger) -> Self {
|
||||||
let log = log.new(o!("Service" => "Libp2p-RPC"));
|
let log = log.new(o!("Service" => "Libp2p-RPC"));
|
||||||
RPC {
|
RPC {
|
||||||
@ -79,11 +80,12 @@ impl<TSubstream> RPC<TSubstream> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream> NetworkBehaviour for RPC<TSubstream>
|
impl<TSubstream, E> NetworkBehaviour for RPC<TSubstream, E>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
type ProtocolsHandler = RPCHandler<TSubstream>;
|
type ProtocolsHandler = RPCHandler<TSubstream, E>;
|
||||||
type OutEvent = RPCMessage;
|
type OutEvent = RPCMessage;
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||||
|
@ -21,24 +21,25 @@ use std::fs::File;
|
|||||||
use std::io::prelude::*;
|
use std::io::prelude::*;
|
||||||
use std::io::{Error, ErrorKind};
|
use std::io::{Error, ErrorKind};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
|
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
|
||||||
type Libp2pBehaviour = Behaviour<Substream<StreamMuxerBox>>;
|
type Libp2pBehaviour<E> = Behaviour<Substream<StreamMuxerBox>, E>;
|
||||||
|
|
||||||
const NETWORK_KEY_FILENAME: &str = "key";
|
const NETWORK_KEY_FILENAME: &str = "key";
|
||||||
|
|
||||||
/// The configuration and state of the libp2p components for the beacon node.
|
/// The configuration and state of the libp2p components for the beacon node.
|
||||||
pub struct Service {
|
pub struct Service<E: EthSpec> {
|
||||||
/// The libp2p Swarm handler.
|
/// The libp2p Swarm handler.
|
||||||
//TODO: Make this private
|
//TODO: Make this private
|
||||||
pub swarm: Swarm<Libp2pStream, Libp2pBehaviour>,
|
pub swarm: Swarm<Libp2pStream, Libp2pBehaviour<E>>,
|
||||||
/// This node's PeerId.
|
/// This node's PeerId.
|
||||||
_local_peer_id: PeerId,
|
_local_peer_id: PeerId,
|
||||||
/// The libp2p logger handle.
|
/// The libp2p logger handle.
|
||||||
pub log: slog::Logger,
|
pub log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl<E: EthSpec> Service<E> {
|
||||||
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
|
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
|
||||||
debug!(log, "Network-libp2p Service starting");
|
debug!(log, "Network-libp2p Service starting");
|
||||||
|
|
||||||
@ -103,8 +104,8 @@ impl Service {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Stream for Service {
|
impl<E: EthSpec> Stream for Service<E> {
|
||||||
type Item = Libp2pEvent;
|
type Item = Libp2pEvent<E>;
|
||||||
type Error = crate::error::Error;
|
type Error = crate::error::Error;
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||||
@ -178,7 +179,7 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Events that can be obtained from polling the Libp2p Service.
|
/// Events that can be obtained from polling the Libp2p Service.
|
||||||
pub enum Libp2pEvent {
|
pub enum Libp2pEvent<E: EthSpec> {
|
||||||
/// An RPC response request has been received on the swarm.
|
/// An RPC response request has been received on the swarm.
|
||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
/// Initiated the connection to a new peer.
|
/// Initiated the connection to a new peer.
|
||||||
@ -189,7 +190,7 @@ pub enum Libp2pEvent {
|
|||||||
PubsubMessage {
|
PubsubMessage {
|
||||||
source: PeerId,
|
source: PeerId,
|
||||||
topics: Vec<TopicHash>,
|
topics: Vec<TopicHash>,
|
||||||
message: Box<PubsubMessage>,
|
message: Box<PubsubMessage<E>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,30 +5,19 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../../eth2/utils/bls" }
|
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
iron = "^0.6"
|
iron = "^0.6"
|
||||||
router = "^0.6"
|
router = "^0.6"
|
||||||
network = { path = "../network" }
|
network = { path = "../network" }
|
||||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
|
||||||
version = { path = "../version" }
|
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||||
protos = { path = "../../protos" }
|
|
||||||
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
|
|
||||||
persistent = "^0.4"
|
persistent = "^0.4"
|
||||||
protobuf = "2.0.2"
|
|
||||||
prometheus = { version = "^0.6", features = ["process"] }
|
prometheus = { version = "^0.6", features = ["process"] }
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
store = { path = "../store" }
|
|
||||||
dirs = "1.0.3"
|
|
||||||
futures = "0.1.23"
|
futures = "0.1.23"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
||||||
slog-term = "^2.4.0"
|
|
||||||
slog-async = "^2.3.0"
|
|
||||||
tokio = "0.1.17"
|
tokio = "0.1.17"
|
||||||
exit-future = "0.1.4"
|
exit-future = "0.1.4"
|
||||||
|
@ -76,7 +76,7 @@ pub fn create_iron_http_server<T: BeaconChainTypes + 'static>(
|
|||||||
pub fn start_service<T: BeaconChainTypes + 'static>(
|
pub fn start_service<T: BeaconChainTypes + 'static>(
|
||||||
config: &HttpServerConfig,
|
config: &HttpServerConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
_network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
_network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
db_path: PathBuf,
|
db_path: PathBuf,
|
||||||
metrics_registry: Registry,
|
metrics_registry: Registry,
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
use prometheus::{IntGauge, Opts, Registry};
|
use prometheus::{IntGauge, Opts, Registry};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::fs::File;
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::{EthSpec, Slot};
|
use types::{EthSpec, Slot};
|
||||||
|
|
||||||
@ -13,6 +13,9 @@ pub struct LocalMetrics {
|
|||||||
present_slot: IntGauge,
|
present_slot: IntGauge,
|
||||||
present_epoch: IntGauge,
|
present_epoch: IntGauge,
|
||||||
best_slot: IntGauge,
|
best_slot: IntGauge,
|
||||||
|
best_beacon_block_root: IntGauge,
|
||||||
|
justified_beacon_block_root: IntGauge,
|
||||||
|
finalized_beacon_block_root: IntGauge,
|
||||||
validator_count: IntGauge,
|
validator_count: IntGauge,
|
||||||
justified_epoch: IntGauge,
|
justified_epoch: IntGauge,
|
||||||
finalized_epoch: IntGauge,
|
finalized_epoch: IntGauge,
|
||||||
@ -36,6 +39,24 @@ impl LocalMetrics {
|
|||||||
let opts = Opts::new("best_slot", "slot_of_block_at_chain_head");
|
let opts = Opts::new("best_slot", "slot_of_block_at_chain_head");
|
||||||
IntGauge::with_opts(opts)?
|
IntGauge::with_opts(opts)?
|
||||||
},
|
},
|
||||||
|
best_beacon_block_root: {
|
||||||
|
let opts = Opts::new("best_beacon_block_root", "root_of_block_at_chain_head");
|
||||||
|
IntGauge::with_opts(opts)?
|
||||||
|
},
|
||||||
|
justified_beacon_block_root: {
|
||||||
|
let opts = Opts::new(
|
||||||
|
"justified_beacon_block_root",
|
||||||
|
"root_of_block_at_justified_head",
|
||||||
|
);
|
||||||
|
IntGauge::with_opts(opts)?
|
||||||
|
},
|
||||||
|
finalized_beacon_block_root: {
|
||||||
|
let opts = Opts::new(
|
||||||
|
"finalized_beacon_block_root",
|
||||||
|
"root_of_block_at_finalized_head",
|
||||||
|
);
|
||||||
|
IntGauge::with_opts(opts)?
|
||||||
|
},
|
||||||
validator_count: {
|
validator_count: {
|
||||||
let opts = Opts::new("validator_count", "number_of_validators");
|
let opts = Opts::new("validator_count", "number_of_validators");
|
||||||
IntGauge::with_opts(opts)?
|
IntGauge::with_opts(opts)?
|
||||||
@ -64,6 +85,9 @@ impl LocalMetrics {
|
|||||||
registry.register(Box::new(self.present_slot.clone()))?;
|
registry.register(Box::new(self.present_slot.clone()))?;
|
||||||
registry.register(Box::new(self.present_epoch.clone()))?;
|
registry.register(Box::new(self.present_epoch.clone()))?;
|
||||||
registry.register(Box::new(self.best_slot.clone()))?;
|
registry.register(Box::new(self.best_slot.clone()))?;
|
||||||
|
registry.register(Box::new(self.best_beacon_block_root.clone()))?;
|
||||||
|
registry.register(Box::new(self.justified_beacon_block_root.clone()))?;
|
||||||
|
registry.register(Box::new(self.finalized_beacon_block_root.clone()))?;
|
||||||
registry.register(Box::new(self.validator_count.clone()))?;
|
registry.register(Box::new(self.validator_count.clone()))?;
|
||||||
registry.register(Box::new(self.finalized_epoch.clone()))?;
|
registry.register(Box::new(self.finalized_epoch.clone()))?;
|
||||||
registry.register(Box::new(self.justified_epoch.clone()))?;
|
registry.register(Box::new(self.justified_epoch.clone()))?;
|
||||||
@ -87,20 +111,44 @@ impl LocalMetrics {
|
|||||||
.set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64);
|
.set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64);
|
||||||
|
|
||||||
self.best_slot.set(state.slot.as_u64() as i64);
|
self.best_slot.set(state.slot.as_u64() as i64);
|
||||||
self.validator_count
|
self.best_beacon_block_root
|
||||||
.set(state.validator_registry.len() as i64);
|
.set(beacon_chain.head().beacon_block_root.to_low_u64_le() as i64);
|
||||||
|
self.justified_beacon_block_root.set(
|
||||||
|
beacon_chain
|
||||||
|
.head()
|
||||||
|
.beacon_state
|
||||||
|
.current_justified_checkpoint
|
||||||
|
.root
|
||||||
|
.to_low_u64_le() as i64,
|
||||||
|
);
|
||||||
|
self.finalized_beacon_block_root.set(
|
||||||
|
beacon_chain
|
||||||
|
.head()
|
||||||
|
.beacon_state
|
||||||
|
.finalized_checkpoint
|
||||||
|
.root
|
||||||
|
.to_low_u64_le() as i64,
|
||||||
|
);
|
||||||
|
self.validator_count.set(state.validators.len() as i64);
|
||||||
self.justified_epoch
|
self.justified_epoch
|
||||||
.set(state.current_justified_epoch.as_u64() as i64);
|
.set(state.current_justified_checkpoint.epoch.as_u64() as i64);
|
||||||
self.finalized_epoch
|
self.finalized_epoch
|
||||||
.set(state.finalized_epoch.as_u64() as i64);
|
.set(state.finalized_checkpoint.epoch.as_u64() as i64);
|
||||||
if SHOULD_SUM_VALIDATOR_BALANCES {
|
if SHOULD_SUM_VALIDATOR_BALANCES {
|
||||||
self.validator_balances_sum
|
self.validator_balances_sum
|
||||||
.set(state.balances.iter().sum::<u64>() as i64);
|
.set(state.balances.iter().sum::<u64>() as i64);
|
||||||
}
|
}
|
||||||
let db_size = File::open(db_path)
|
let db_size = if let Ok(iter) = fs::read_dir(db_path) {
|
||||||
.and_then(|f| f.metadata())
|
iter.filter_map(Result::ok)
|
||||||
.and_then(|m| Ok(m.len()))
|
.map(size_of_dir_entry)
|
||||||
.unwrap_or(0);
|
.fold(0_u64, |sum, val| sum + val)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
self.database_size.set(db_size as i64);
|
self.database_size.set(db_size as i64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn size_of_dir_entry(dir: fs::DirEntry) -> u64 {
|
||||||
|
dir.metadata().map(|m| m.len()).unwrap_or(0)
|
||||||
|
}
|
||||||
|
@ -11,7 +11,6 @@ sloggers = "0.3.2"
|
|||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||||
version = { path = "../version" }
|
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
||||||
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
|
@ -14,7 +14,7 @@ use slog::{debug, warn};
|
|||||||
use ssz::{Decode, DecodeError};
|
use ssz::{Decode, DecodeError};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::BeaconBlockHeader;
|
use types::{BeaconBlockHeader, EthSpec};
|
||||||
|
|
||||||
/// Handles messages received from the network and client and organises syncing.
|
/// Handles messages received from the network and client and organises syncing.
|
||||||
pub struct MessageHandler<T: BeaconChainTypes> {
|
pub struct MessageHandler<T: BeaconChainTypes> {
|
||||||
@ -23,14 +23,14 @@ pub struct MessageHandler<T: BeaconChainTypes> {
|
|||||||
/// The syncing framework.
|
/// The syncing framework.
|
||||||
sync: SimpleSync<T>,
|
sync: SimpleSync<T>,
|
||||||
/// The context required to send messages to, and process messages from peers.
|
/// The context required to send messages to, and process messages from peers.
|
||||||
network_context: NetworkContext,
|
network_context: NetworkContext<T::EthSpec>,
|
||||||
/// The `MessageHandler` logger.
|
/// The `MessageHandler` logger.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Types of messages the handler can receive.
|
/// Types of messages the handler can receive.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum HandlerMessage {
|
pub enum HandlerMessage<E: EthSpec> {
|
||||||
/// We have initiated a connection to a new peer.
|
/// We have initiated a connection to a new peer.
|
||||||
PeerDialed(PeerId),
|
PeerDialed(PeerId),
|
||||||
/// Peer has disconnected,
|
/// Peer has disconnected,
|
||||||
@ -38,17 +38,17 @@ pub enum HandlerMessage {
|
|||||||
/// An RPC response/request has been received.
|
/// An RPC response/request has been received.
|
||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
/// A gossip message has been received.
|
/// A gossip message has been received.
|
||||||
PubsubMessage(PeerId, Box<PubsubMessage>),
|
PubsubMessage(PeerId, Box<PubsubMessage<E>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
||||||
/// Initializes and runs the MessageHandler.
|
/// Initializes and runs the MessageHandler.
|
||||||
pub fn spawn(
|
pub fn spawn(
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
executor: &tokio::runtime::TaskExecutor,
|
executor: &tokio::runtime::TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> error::Result<mpsc::UnboundedSender<HandlerMessage>> {
|
) -> error::Result<mpsc::UnboundedSender<HandlerMessage<T::EthSpec>>> {
|
||||||
debug!(log, "Service starting");
|
debug!(log, "Service starting");
|
||||||
|
|
||||||
let (handler_send, handler_recv) = mpsc::unbounded_channel();
|
let (handler_send, handler_recv) = mpsc::unbounded_channel();
|
||||||
@ -78,7 +78,7 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handle all messages incoming from the network service.
|
/// Handle all messages incoming from the network service.
|
||||||
fn handle_message(&mut self, message: HandlerMessage) {
|
fn handle_message(&mut self, message: HandlerMessage<T::EthSpec>) {
|
||||||
match message {
|
match message {
|
||||||
// we have initiated a connection to a peer
|
// we have initiated a connection to a peer
|
||||||
HandlerMessage::PeerDialed(peer_id) => {
|
HandlerMessage::PeerDialed(peer_id) => {
|
||||||
@ -222,7 +222,7 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
fn decode_block_bodies(
|
fn decode_block_bodies(
|
||||||
&self,
|
&self,
|
||||||
bodies_response: BeaconBlockBodiesResponse,
|
bodies_response: BeaconBlockBodiesResponse,
|
||||||
) -> Result<DecodedBeaconBlockBodiesResponse, DecodeError> {
|
) -> Result<DecodedBeaconBlockBodiesResponse<T::EthSpec>, DecodeError> {
|
||||||
//TODO: Implement faster block verification before decoding entirely
|
//TODO: Implement faster block verification before decoding entirely
|
||||||
let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?;
|
let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?;
|
||||||
Ok(DecodedBeaconBlockBodiesResponse {
|
Ok(DecodedBeaconBlockBodiesResponse {
|
||||||
@ -249,10 +249,10 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handle RPC messages
|
/// Handle RPC messages
|
||||||
fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) {
|
fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage<T::EthSpec>) {
|
||||||
match gossip_message {
|
match gossip_message {
|
||||||
PubsubMessage::Block(message) => {
|
PubsubMessage::Block(message) => {
|
||||||
let _should_foward_on =
|
let _should_forward_on =
|
||||||
self.sync
|
self.sync
|
||||||
.on_block_gossip(peer_id, message, &mut self.network_context);
|
.on_block_gossip(peer_id, message, &mut self.network_context);
|
||||||
}
|
}
|
||||||
@ -265,15 +265,15 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: RPC Rewrite makes this struct fairly pointless
|
// TODO: RPC Rewrite makes this struct fairly pointless
|
||||||
pub struct NetworkContext {
|
pub struct NetworkContext<E: EthSpec> {
|
||||||
/// The network channel to relay messages to the Network service.
|
/// The network channel to relay messages to the Network service.
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
network_send: mpsc::UnboundedSender<NetworkMessage<E>>,
|
||||||
/// The `MessageHandler` logger.
|
/// The `MessageHandler` logger.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkContext {
|
impl<E: EthSpec> NetworkContext<E> {
|
||||||
pub fn new(network_send: mpsc::UnboundedSender<NetworkMessage>, log: slog::Logger) -> Self {
|
pub fn new(network_send: mpsc::UnboundedSender<NetworkMessage<E>>, log: slog::Logger) -> Self {
|
||||||
Self { network_send, log }
|
Self { network_send, log }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ use crate::error;
|
|||||||
use crate::message_handler::{HandlerMessage, MessageHandler};
|
use crate::message_handler::{HandlerMessage, MessageHandler};
|
||||||
use crate::NetworkConfig;
|
use crate::NetworkConfig;
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use core::marker::PhantomData;
|
||||||
use eth2_libp2p::Service as LibP2PService;
|
use eth2_libp2p::Service as LibP2PService;
|
||||||
use eth2_libp2p::Topic;
|
use eth2_libp2p::Topic;
|
||||||
use eth2_libp2p::{Libp2pEvent, PeerId};
|
use eth2_libp2p::{Libp2pEvent, PeerId};
|
||||||
@ -10,16 +11,16 @@ use futures::prelude::*;
|
|||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use slog::{debug, info, o, trace};
|
use slog::{debug, info, o, trace};
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
||||||
pub struct Service<T: BeaconChainTypes> {
|
pub struct Service<T: BeaconChainTypes> {
|
||||||
libp2p_service: Arc<Mutex<LibP2PService>>,
|
libp2p_service: Arc<Mutex<LibP2PService<T::EthSpec>>>,
|
||||||
_libp2p_exit: oneshot::Sender<()>,
|
_libp2p_exit: oneshot::Sender<()>,
|
||||||
_network_send: mpsc::UnboundedSender<NetworkMessage>,
|
_network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
_phantom: PhantomData<T>, //message_handler: MessageHandler,
|
_phantom: PhantomData<T>, //message_handler: MessageHandler,
|
||||||
//message_handler_send: Sender<HandlerMessage>
|
//message_handler_send: Sender<HandlerMessage>
|
||||||
}
|
}
|
||||||
@ -30,9 +31,9 @@ impl<T: BeaconChainTypes + 'static> Service<T> {
|
|||||||
config: &NetworkConfig,
|
config: &NetworkConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> error::Result<(Arc<Self>, mpsc::UnboundedSender<NetworkMessage>)> {
|
) -> error::Result<(Arc<Self>, mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>)> {
|
||||||
// build the network channel
|
// build the network channel
|
||||||
let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
|
let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage<_>>();
|
||||||
// launch message handler thread
|
// launch message handler thread
|
||||||
let message_handler_log = log.new(o!("Service" => "MessageHandler"));
|
let message_handler_log = log.new(o!("Service" => "MessageHandler"));
|
||||||
let message_handler_send = MessageHandler::spawn(
|
let message_handler_send = MessageHandler::spawn(
|
||||||
@ -64,15 +65,15 @@ impl<T: BeaconChainTypes + 'static> Service<T> {
|
|||||||
Ok((Arc::new(network_service), network_send))
|
Ok((Arc::new(network_service), network_send))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn libp2p_service(&self) -> Arc<Mutex<LibP2PService>> {
|
pub fn libp2p_service(&self) -> Arc<Mutex<LibP2PService<T::EthSpec>>> {
|
||||||
self.libp2p_service.clone()
|
self.libp2p_service.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_service(
|
fn spawn_service<E: EthSpec>(
|
||||||
libp2p_service: Arc<Mutex<LibP2PService>>,
|
libp2p_service: Arc<Mutex<LibP2PService<E>>>,
|
||||||
network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
|
network_recv: mpsc::UnboundedReceiver<NetworkMessage<E>>,
|
||||||
message_handler_send: mpsc::UnboundedSender<HandlerMessage>,
|
message_handler_send: mpsc::UnboundedSender<HandlerMessage<E>>,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> error::Result<tokio::sync::oneshot::Sender<()>> {
|
) -> error::Result<tokio::sync::oneshot::Sender<()>> {
|
||||||
@ -98,17 +99,15 @@ fn spawn_service(
|
|||||||
}
|
}
|
||||||
|
|
||||||
//TODO: Potentially handle channel errors
|
//TODO: Potentially handle channel errors
|
||||||
fn network_service(
|
fn network_service<E: EthSpec>(
|
||||||
libp2p_service: Arc<Mutex<LibP2PService>>,
|
libp2p_service: Arc<Mutex<LibP2PService<E>>>,
|
||||||
mut network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
|
mut network_recv: mpsc::UnboundedReceiver<NetworkMessage<E>>,
|
||||||
mut message_handler_send: mpsc::UnboundedSender<HandlerMessage>,
|
mut message_handler_send: mpsc::UnboundedSender<HandlerMessage<E>>,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> impl futures::Future<Item = (), Error = eth2_libp2p::error::Error> {
|
) -> impl futures::Future<Item = (), Error = eth2_libp2p::error::Error> {
|
||||||
futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> {
|
futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> {
|
||||||
// only end the loop once both major polls are not ready.
|
// if the network channel is not ready, try the swarm
|
||||||
let mut not_ready_count = 0;
|
loop {
|
||||||
while not_ready_count < 2 {
|
|
||||||
not_ready_count = 0;
|
|
||||||
// poll the network channel
|
// poll the network channel
|
||||||
match network_recv.poll() {
|
match network_recv.poll() {
|
||||||
Ok(Async::Ready(Some(message))) => match message {
|
Ok(Async::Ready(Some(message))) => match message {
|
||||||
@ -123,7 +122,7 @@ fn network_service(
|
|||||||
libp2p_service.lock().swarm.publish(topics, *message);
|
libp2p_service.lock().swarm.publish(topics, *message);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Ok(Async::NotReady) => not_ready_count += 1,
|
Ok(Async::NotReady) => break,
|
||||||
Ok(Async::Ready(None)) => {
|
Ok(Async::Ready(None)) => {
|
||||||
return Err(eth2_libp2p::error::Error::from("Network channel closed"));
|
return Err(eth2_libp2p::error::Error::from("Network channel closed"));
|
||||||
}
|
}
|
||||||
@ -131,7 +130,9 @@ fn network_service(
|
|||||||
return Err(eth2_libp2p::error::Error::from("Network channel error"));
|
return Err(eth2_libp2p::error::Error::from("Network channel error"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
// poll the swarm
|
// poll the swarm
|
||||||
match libp2p_service.lock().poll() {
|
match libp2p_service.lock().poll() {
|
||||||
Ok(Async::Ready(Some(event))) => match event {
|
Ok(Async::Ready(Some(event))) => match event {
|
||||||
@ -164,8 +165,8 @@ fn network_service(
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
Ok(Async::Ready(None)) => unreachable!("Stream never ends"),
|
Ok(Async::Ready(None)) => unreachable!("Stream never ends"),
|
||||||
Ok(Async::NotReady) => not_ready_count += 1,
|
Ok(Async::NotReady) => break,
|
||||||
Err(_) => not_ready_count += 1,
|
Err(_) => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,14 +176,14 @@ fn network_service(
|
|||||||
|
|
||||||
/// Types of messages that the network service can receive.
|
/// Types of messages that the network service can receive.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum NetworkMessage {
|
pub enum NetworkMessage<E: EthSpec> {
|
||||||
/// Send a message to libp2p service.
|
/// Send a message to libp2p service.
|
||||||
//TODO: Define typing for messages across the wire
|
//TODO: Define typing for messages across the wire
|
||||||
Send(PeerId, OutgoingMessage),
|
Send(PeerId, OutgoingMessage),
|
||||||
/// Publish a message to pubsub mechanism.
|
/// Publish a message to pubsub mechanism.
|
||||||
Publish {
|
Publish {
|
||||||
topics: Vec<Topic>,
|
topics: Vec<Topic>,
|
||||||
message: Box<PubsubMessage>,
|
message: Box<PubsubMessage<E>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ use std::collections::HashMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot};
|
use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
/// Provides a queue for fully and partially built `BeaconBlock`s.
|
/// Provides a queue for fully and partially built `BeaconBlock`s.
|
||||||
///
|
///
|
||||||
@ -23,7 +23,7 @@ use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot};
|
|||||||
pub struct ImportQueue<T: BeaconChainTypes> {
|
pub struct ImportQueue<T: BeaconChainTypes> {
|
||||||
pub chain: Arc<BeaconChain<T>>,
|
pub chain: Arc<BeaconChain<T>>,
|
||||||
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
|
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
|
||||||
partials: HashMap<Hash256, PartialBeaconBlock>,
|
partials: HashMap<Hash256, PartialBeaconBlock<T::EthSpec>>,
|
||||||
/// Time before a queue entry is considered state.
|
/// Time before a queue entry is considered state.
|
||||||
pub stale_time: Duration,
|
pub stale_time: Duration,
|
||||||
/// Logging
|
/// Logging
|
||||||
@ -50,7 +50,10 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
///
|
///
|
||||||
/// Returns an Enum with a `PartialBeaconBlockCompletion`.
|
/// Returns an Enum with a `PartialBeaconBlockCompletion`.
|
||||||
/// Does not remove the `block_root` from the `import_queue`.
|
/// Does not remove the `block_root` from the `import_queue`.
|
||||||
pub fn attempt_complete_block(&self, block_root: Hash256) -> PartialBeaconBlockCompletion {
|
pub fn attempt_complete_block(
|
||||||
|
&self,
|
||||||
|
block_root: Hash256,
|
||||||
|
) -> PartialBeaconBlockCompletion<T::EthSpec> {
|
||||||
if let Some(partial) = self.partials.get(&block_root) {
|
if let Some(partial) = self.partials.get(&block_root) {
|
||||||
partial.attempt_complete()
|
partial.attempt_complete()
|
||||||
} else {
|
} else {
|
||||||
@ -60,7 +63,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
|
|
||||||
/// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial
|
/// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial
|
||||||
/// if it exists.
|
/// if it exists.
|
||||||
pub fn remove(&mut self, block_root: Hash256) -> Option<PartialBeaconBlock> {
|
pub fn remove(&mut self, block_root: Hash256) -> Option<PartialBeaconBlock<T::EthSpec>> {
|
||||||
self.partials.remove(&block_root)
|
self.partials.remove(&block_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,11 +144,11 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
for header in headers {
|
for header in headers {
|
||||||
let block_root = Hash256::from_slice(&header.canonical_root()[..]);
|
let block_root = Hash256::from_slice(&header.canonical_root()[..]);
|
||||||
|
|
||||||
if self.chain_has_not_seen_block(&block_root) {
|
if self.chain_has_not_seen_block(&block_root)
|
||||||
if !self.insert_header(block_root, header, sender.clone()) {
|
&& !self.insert_header(block_root, header, sender.clone())
|
||||||
// If a body is empty
|
{
|
||||||
required_bodies.push(block_root);
|
// If a body is empty
|
||||||
}
|
required_bodies.push(block_root);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,7 +160,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
/// If there is no `header` for the `body`, the body is simply discarded.
|
/// If there is no `header` for the `body`, the body is simply discarded.
|
||||||
pub fn enqueue_bodies(
|
pub fn enqueue_bodies(
|
||||||
&mut self,
|
&mut self,
|
||||||
bodies: Vec<BeaconBlockBody>,
|
bodies: Vec<BeaconBlockBody<T::EthSpec>>,
|
||||||
sender: PeerId,
|
sender: PeerId,
|
||||||
) -> Option<Hash256> {
|
) -> Option<Hash256> {
|
||||||
let mut last_block_hash = None;
|
let mut last_block_hash = None;
|
||||||
@ -168,7 +171,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
last_block_hash
|
last_block_hash
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn enqueue_full_blocks(&mut self, blocks: Vec<BeaconBlock>, sender: PeerId) {
|
pub fn enqueue_full_blocks(&mut self, blocks: Vec<BeaconBlock<T::EthSpec>>, sender: PeerId) {
|
||||||
for block in blocks {
|
for block in blocks {
|
||||||
self.insert_full_block(block, sender.clone());
|
self.insert_full_block(block, sender.clone());
|
||||||
}
|
}
|
||||||
@ -211,13 +214,17 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
/// If the body already existed, the `inserted` time is set to `now`.
|
/// If the body already existed, the `inserted` time is set to `now`.
|
||||||
///
|
///
|
||||||
/// Returns the block hash of the inserted body
|
/// Returns the block hash of the inserted body
|
||||||
fn insert_body(&mut self, body: BeaconBlockBody, sender: PeerId) -> Option<Hash256> {
|
fn insert_body(
|
||||||
|
&mut self,
|
||||||
|
body: BeaconBlockBody<T::EthSpec>,
|
||||||
|
sender: PeerId,
|
||||||
|
) -> Option<Hash256> {
|
||||||
let body_root = Hash256::from_slice(&body.tree_hash_root()[..]);
|
let body_root = Hash256::from_slice(&body.tree_hash_root()[..]);
|
||||||
let mut last_root = None;
|
let mut last_root = None;
|
||||||
|
|
||||||
self.partials.iter_mut().for_each(|(root, mut p)| {
|
self.partials.iter_mut().for_each(|(root, mut p)| {
|
||||||
if let Some(header) = &mut p.header {
|
if let Some(header) = &mut p.header {
|
||||||
if body_root == header.block_body_root {
|
if body_root == header.body_root {
|
||||||
p.inserted = Instant::now();
|
p.inserted = Instant::now();
|
||||||
p.body = Some(body.clone());
|
p.body = Some(body.clone());
|
||||||
p.sender = sender.clone();
|
p.sender = sender.clone();
|
||||||
@ -232,7 +239,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
/// Updates an existing `partial` with the completed block, or adds a new (complete) partial.
|
/// Updates an existing `partial` with the completed block, or adds a new (complete) partial.
|
||||||
///
|
///
|
||||||
/// If the partial already existed, the `inserted` time is set to `now`.
|
/// If the partial already existed, the `inserted` time is set to `now`.
|
||||||
fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) {
|
fn insert_full_block(&mut self, block: BeaconBlock<T::EthSpec>, sender: PeerId) {
|
||||||
let block_root = Hash256::from_slice(&block.canonical_root()[..]);
|
let block_root = Hash256::from_slice(&block.canonical_root()[..]);
|
||||||
|
|
||||||
let partial = PartialBeaconBlock {
|
let partial = PartialBeaconBlock {
|
||||||
@ -254,12 +261,12 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
/// Individual components of a `BeaconBlock`, potentially all that are required to form a full
|
/// Individual components of a `BeaconBlock`, potentially all that are required to form a full
|
||||||
/// `BeaconBlock`.
|
/// `BeaconBlock`.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct PartialBeaconBlock {
|
pub struct PartialBeaconBlock<E: EthSpec> {
|
||||||
pub slot: Slot,
|
pub slot: Slot,
|
||||||
/// `BeaconBlock` root.
|
/// `BeaconBlock` root.
|
||||||
pub block_root: Hash256,
|
pub block_root: Hash256,
|
||||||
pub header: Option<BeaconBlockHeader>,
|
pub header: Option<BeaconBlockHeader>,
|
||||||
pub body: Option<BeaconBlockBody>,
|
pub body: Option<BeaconBlockBody<E>>,
|
||||||
/// The instant at which this record was created or last meaningfully modified. Used to
|
/// The instant at which this record was created or last meaningfully modified. Used to
|
||||||
/// determine if an entry is stale and should be removed.
|
/// determine if an entry is stale and should be removed.
|
||||||
pub inserted: Instant,
|
pub inserted: Instant,
|
||||||
@ -267,11 +274,11 @@ pub struct PartialBeaconBlock {
|
|||||||
pub sender: PeerId,
|
pub sender: PeerId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialBeaconBlock {
|
impl<E: EthSpec> PartialBeaconBlock<E> {
|
||||||
/// Attempts to build a block.
|
/// Attempts to build a block.
|
||||||
///
|
///
|
||||||
/// Does not comsume the `PartialBeaconBlock`.
|
/// Does not comsume the `PartialBeaconBlock`.
|
||||||
pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion {
|
pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion<E> {
|
||||||
if self.header.is_none() {
|
if self.header.is_none() {
|
||||||
PartialBeaconBlockCompletion::MissingHeader(self.slot)
|
PartialBeaconBlockCompletion::MissingHeader(self.slot)
|
||||||
} else if self.body.is_none() {
|
} else if self.body.is_none() {
|
||||||
@ -288,9 +295,9 @@ impl PartialBeaconBlock {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`.
|
/// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`.
|
||||||
pub enum PartialBeaconBlockCompletion {
|
pub enum PartialBeaconBlockCompletion<E: EthSpec> {
|
||||||
/// The partial contains a valid BeaconBlock.
|
/// The partial contains a valid BeaconBlock.
|
||||||
Complete(BeaconBlock),
|
Complete(BeaconBlock<E>),
|
||||||
/// The partial does not exist.
|
/// The partial does not exist.
|
||||||
MissingRoot,
|
MissingRoot,
|
||||||
/// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`.
|
/// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`.
|
||||||
|
@ -123,7 +123,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
/// Handle the connection of a new peer.
|
/// Handle the connection of a new peer.
|
||||||
///
|
///
|
||||||
/// Sends a `Hello` message to the peer.
|
/// Sends a `Hello` message to the peer.
|
||||||
pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) {
|
pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext<T::EthSpec>) {
|
||||||
info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id));
|
info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain)));
|
network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain)));
|
||||||
@ -137,7 +137,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id));
|
debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
@ -156,7 +156,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id));
|
debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
let remote = PeerSyncInfo::from(hello);
|
let remote = PeerSyncInfo::from(hello);
|
||||||
let local = PeerSyncInfo::from(&self.chain);
|
let local = PeerSyncInfo::from(&self.chain);
|
||||||
@ -186,10 +186,10 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
"reason" => "network_id"
|
"reason" => "network_id"
|
||||||
);
|
);
|
||||||
|
|
||||||
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
|
network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
|
||||||
} else if remote.latest_finalized_epoch <= local.latest_finalized_epoch
|
} else if remote.latest_finalized_epoch <= local.latest_finalized_epoch
|
||||||
&& remote.latest_finalized_root != self.chain.spec.zero_hash
|
&& remote.latest_finalized_root != Hash256::zero()
|
||||||
&& local.latest_finalized_root != self.chain.spec.zero_hash
|
&& local.latest_finalized_root != Hash256::zero()
|
||||||
&& (self.root_at_slot(start_slot(remote.latest_finalized_epoch))
|
&& (self.root_at_slot(start_slot(remote.latest_finalized_epoch))
|
||||||
!= Some(remote.latest_finalized_root))
|
!= Some(remote.latest_finalized_root))
|
||||||
{
|
{
|
||||||
@ -202,7 +202,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
"peer" => format!("{:?}", peer_id),
|
"peer" => format!("{:?}", peer_id),
|
||||||
"reason" => "different finalized chain"
|
"reason" => "different finalized chain"
|
||||||
);
|
);
|
||||||
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
|
network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork);
|
||||||
} else if remote.latest_finalized_epoch < local.latest_finalized_epoch {
|
} else if remote.latest_finalized_epoch < local.latest_finalized_epoch {
|
||||||
// The node has a lower finalized epoch, their chain is not useful to us. There are two
|
// The node has a lower finalized epoch, their chain is not useful to us. There are two
|
||||||
// cases where a node can have a lower finalized epoch:
|
// cases where a node can have a lower finalized epoch:
|
||||||
@ -226,7 +226,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
} else if self
|
} else if self
|
||||||
.chain
|
.chain
|
||||||
.store
|
.store
|
||||||
.exists::<BeaconBlock>(&remote.best_root)
|
.exists::<BeaconBlock<T::EthSpec>>(&remote.best_root)
|
||||||
.unwrap_or_else(|_| false)
|
.unwrap_or_else(|_| false)
|
||||||
{
|
{
|
||||||
// If the node's best-block is already known to us, we have nothing to request.
|
// If the node's best-block is already known to us, we have nothing to request.
|
||||||
@ -278,7 +278,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
req: BeaconBlockRootsRequest,
|
req: BeaconBlockRootsRequest,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -296,7 +296,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if roots.len() as u64 != req.count {
|
if roots.len() as u64 != req.count {
|
||||||
warn!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"BlockRootsRequest";
|
"BlockRootsRequest";
|
||||||
"peer" => format!("{:?}", peer_id),
|
"peer" => format!("{:?}", peer_id),
|
||||||
@ -323,7 +323,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
res: BeaconBlockRootsResponse,
|
res: BeaconBlockRootsResponse,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -387,7 +387,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
req: BeaconBlockHeadersRequest,
|
req: BeaconBlockHeadersRequest,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -416,7 +416,11 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.step_by(req.skip_slots as usize + 1)
|
.step_by(req.skip_slots as usize + 1)
|
||||||
.filter_map(|root| {
|
.filter_map(|root| {
|
||||||
let block = self.chain.store.get::<BeaconBlock>(&root).ok()?;
|
let block = self
|
||||||
|
.chain
|
||||||
|
.store
|
||||||
|
.get::<BeaconBlock<T::EthSpec>>(&root)
|
||||||
|
.ok()?;
|
||||||
Some(block?.block_header())
|
Some(block?.block_header())
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@ -436,7 +440,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
headers: Vec<BeaconBlockHeader>,
|
headers: Vec<BeaconBlockHeader>,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -468,13 +472,13 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
req: BeaconBlockBodiesRequest,
|
req: BeaconBlockBodiesRequest,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
let block_bodies: Vec<BeaconBlockBody> = req
|
let block_bodies: Vec<BeaconBlockBody<_>> = req
|
||||||
.block_roots
|
.block_roots
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|root| {
|
.filter_map(|root| {
|
||||||
if let Ok(Some(block)) = self.chain.store.get::<BeaconBlock>(root) {
|
if let Ok(Some(block)) = self.chain.store.get::<BeaconBlock<T::EthSpec>>(root) {
|
||||||
Some(block.body)
|
Some(block.body)
|
||||||
} else {
|
} else {
|
||||||
debug!(
|
debug!(
|
||||||
@ -513,8 +517,8 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
pub fn on_beacon_block_bodies_response(
|
pub fn on_beacon_block_bodies_response(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
res: DecodedBeaconBlockBodiesResponse,
|
res: DecodedBeaconBlockBodiesResponse<T::EthSpec>,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -529,14 +533,13 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
.import_queue
|
.import_queue
|
||||||
.enqueue_bodies(res.block_bodies, peer_id.clone());
|
.enqueue_bodies(res.block_bodies, peer_id.clone());
|
||||||
|
|
||||||
// Attempt to process all recieved bodies by recursively processing the latest block
|
// Attempt to process all received bodies by recursively processing the latest block
|
||||||
if let Some(root) = last_root {
|
if let Some(root) = last_root {
|
||||||
match self.attempt_process_partial_block(peer_id, root, network, &"rpc") {
|
if let Some(BlockProcessingOutcome::Processed { .. }) =
|
||||||
Some(BlockProcessingOutcome::Processed { block_root: _ }) => {
|
self.attempt_process_partial_block(peer_id, root, network, &"rpc")
|
||||||
// If processing is successful remove from `import_queue`
|
{
|
||||||
self.import_queue.remove(root);
|
// If processing is successful remove from `import_queue`
|
||||||
}
|
self.import_queue.remove(root);
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -553,8 +556,8 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
pub fn on_block_gossip(
|
pub fn on_block_gossip(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block: BeaconBlock,
|
block: BeaconBlock<T::EthSpec>,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
if let Some(outcome) =
|
if let Some(outcome) =
|
||||||
self.process_block(peer_id.clone(), block.clone(), network, &"gossip")
|
self.process_block(peer_id.clone(), block.clone(), network, &"gossip")
|
||||||
@ -577,7 +580,8 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
.chain
|
.chain
|
||||||
.head()
|
.head()
|
||||||
.beacon_state
|
.beacon_state
|
||||||
.finalized_epoch
|
.finalized_checkpoint
|
||||||
|
.epoch
|
||||||
.start_slot(T::EthSpec::slots_per_epoch());
|
.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
self.request_block_roots(
|
self.request_block_roots(
|
||||||
peer_id,
|
peer_id,
|
||||||
@ -606,7 +610,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
}
|
}
|
||||||
// Note: known blocks are forwarded on the gossip network.
|
// Note: known blocks are forwarded on the gossip network.
|
||||||
//
|
//
|
||||||
// We rely upon the lower layers (libp2p) to stop loops occuring from re-gossiped
|
// We rely upon the lower layers (libp2p) to stop loops occurring from re-gossiped
|
||||||
// blocks.
|
// blocks.
|
||||||
BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK,
|
BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK,
|
||||||
_ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK,
|
_ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK,
|
||||||
@ -622,8 +626,8 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
pub fn on_attestation_gossip(
|
pub fn on_attestation_gossip(
|
||||||
&mut self,
|
&mut self,
|
||||||
_peer_id: PeerId,
|
_peer_id: PeerId,
|
||||||
msg: Attestation,
|
msg: Attestation<T::EthSpec>,
|
||||||
_network: &mut NetworkContext,
|
_network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
match self.chain.process_attestation(msg) {
|
match self.chain.process_attestation(msg) {
|
||||||
Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"),
|
Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"),
|
||||||
@ -638,7 +642,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
req: BeaconBlockRootsRequest,
|
req: BeaconBlockRootsRequest,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
// Potentially set state to sync.
|
// Potentially set state to sync.
|
||||||
if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE {
|
if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE {
|
||||||
@ -662,7 +666,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
req: BeaconBlockHeadersRequest,
|
req: BeaconBlockHeadersRequest,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -679,7 +683,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
req: BeaconBlockBodiesRequest,
|
req: BeaconBlockBodiesRequest,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -715,7 +719,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
source: &str,
|
source: &str,
|
||||||
) -> Option<BlockProcessingOutcome> {
|
) -> Option<BlockProcessingOutcome> {
|
||||||
match self.import_queue.attempt_complete_block(block_root) {
|
match self.import_queue.attempt_complete_block(block_root) {
|
||||||
@ -807,8 +811,8 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
fn process_block(
|
fn process_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block: BeaconBlock,
|
block: BeaconBlock<T::EthSpec>,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext<T::EthSpec>,
|
||||||
source: &str,
|
source: &str,
|
||||||
) -> Option<BlockProcessingOutcome> {
|
) -> Option<BlockProcessingOutcome> {
|
||||||
let processing_result = self.chain.process_block(block.clone());
|
let processing_result = self.chain.process_block(block.clone());
|
||||||
@ -836,19 +840,18 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// If the parent is in the `import_queue` attempt to complete it then process it.
|
// If the parent is in the `import_queue` attempt to complete it then process it.
|
||||||
match self.attempt_process_partial_block(peer_id, parent, network, source) {
|
// All other cases leave `parent` in `import_queue` and return original outcome.
|
||||||
// If processing parent is sucessful, re-process block and remove parent from queue
|
if let Some(BlockProcessingOutcome::Processed { .. }) =
|
||||||
Some(BlockProcessingOutcome::Processed { block_root: _ }) => {
|
self.attempt_process_partial_block(peer_id, parent, network, source)
|
||||||
self.import_queue.remove(parent);
|
{
|
||||||
|
// If processing parent is successful, re-process block and remove parent from queue
|
||||||
|
self.import_queue.remove(parent);
|
||||||
|
|
||||||
// Attempt to process `block` again
|
// Attempt to process `block` again
|
||||||
match self.chain.process_block(block) {
|
match self.chain.process_block(block) {
|
||||||
Ok(outcome) => return Some(outcome),
|
Ok(outcome) => return Some(outcome),
|
||||||
Err(_) => return None,
|
Err(_) => return None,
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// All other cases leave `parent` in `import_queue` and return original outcome.
|
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BlockProcessingOutcome::FutureSlot {
|
BlockProcessingOutcome::FutureSlot {
|
||||||
@ -913,9 +916,9 @@ fn hello_message<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) -> HelloMes
|
|||||||
HelloMessage {
|
HelloMessage {
|
||||||
//TODO: Correctly define the chain/network id
|
//TODO: Correctly define the chain/network id
|
||||||
network_id: spec.chain_id,
|
network_id: spec.chain_id,
|
||||||
chain_id: spec.chain_id as u64,
|
chain_id: u64::from(spec.chain_id),
|
||||||
latest_finalized_root: state.finalized_root,
|
latest_finalized_root: state.finalized_checkpoint.root,
|
||||||
latest_finalized_epoch: state.finalized_epoch,
|
latest_finalized_epoch: state.finalized_checkpoint.epoch,
|
||||||
best_root: beacon_chain.head().beacon_block_root,
|
best_root: beacon_chain.head().beacon_block_root,
|
||||||
best_slot: state.slot,
|
best_slot: state.slot,
|
||||||
}
|
}
|
||||||
|
22
beacon_node/rest_api/Cargo.toml
Normal file
22
beacon_node/rest_api/Cargo.toml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
[package]
|
||||||
|
name = "rest_api"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Luke Anderson <luke@lukeanderson.com.au>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
[dependencies]
|
||||||
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
|
version = { path = "../version" }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "^1.0"
|
||||||
|
slog = "^2.2.3"
|
||||||
|
slog-term = "^2.4.0"
|
||||||
|
slog-async = "^2.3.0"
|
||||||
|
clap = "2.32.0"
|
||||||
|
http = "^0.1.17"
|
||||||
|
hyper = "0.12.32"
|
||||||
|
hyper-router = "^0.5"
|
||||||
|
futures = "0.1"
|
||||||
|
exit-future = "0.1.3"
|
||||||
|
tokio = "0.1.17"
|
65
beacon_node/rest_api/src/beacon_node.rs
Normal file
65
beacon_node/rest_api/src/beacon_node.rs
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use serde::Serialize;
|
||||||
|
use slog::info;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use version;
|
||||||
|
|
||||||
|
use super::{path_from_request, success_response, APIResult, APIService};
|
||||||
|
|
||||||
|
use hyper::{Body, Request, Response};
|
||||||
|
use hyper_router::{Route, RouterBuilder};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct BeaconNodeServiceInstance<T: BeaconChainTypes + 'static> {
|
||||||
|
pub marker: std::marker::PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A string which uniquely identifies the client implementation and its version; similar to [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3).
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub struct Version(String);
|
||||||
|
impl From<String> for Version {
|
||||||
|
fn from(x: String) -> Self {
|
||||||
|
Version(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The genesis_time configured for the beacon node, which is the unix time at which the Eth2.0 chain began.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
pub struct GenesisTime(u64);
|
||||||
|
impl From<u64> for GenesisTime {
|
||||||
|
fn from(x: u64) -> Self {
|
||||||
|
GenesisTime(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: BeaconChainTypes + 'static> APIService for BeaconNodeServiceInstance<T> {
|
||||||
|
fn add_routes(&mut self, router_builder: RouterBuilder) -> Result<RouterBuilder, hyper::Error> {
|
||||||
|
let router_builder = router_builder
|
||||||
|
.add(Route::get("/version").using(result_to_response!(get_version)))
|
||||||
|
.add(Route::get("/genesis_time").using(result_to_response!(get_genesis_time::<T>)));
|
||||||
|
Ok(router_builder)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read the version string from the current Lighthouse build.
|
||||||
|
fn get_version(_req: Request<Body>) -> APIResult {
|
||||||
|
let ver = Version::from(version::version());
|
||||||
|
let body = Body::from(
|
||||||
|
serde_json::to_string(&ver).expect("Version should always be serialializable as JSON."),
|
||||||
|
);
|
||||||
|
Ok(success_response(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read the genesis time from the current beacon chain state.
|
||||||
|
fn get_genesis_time<T: BeaconChainTypes + 'static>(req: Request<Body>) -> APIResult {
|
||||||
|
let beacon_chain = req.extensions().get::<Arc<BeaconChain<T>>>().unwrap();
|
||||||
|
let gen_time = {
|
||||||
|
let state = beacon_chain.current_state();
|
||||||
|
state.genesis_time
|
||||||
|
};
|
||||||
|
let body = Body::from(
|
||||||
|
serde_json::to_string(&gen_time)
|
||||||
|
.expect("Genesis should time always have a valid JSON serialization."),
|
||||||
|
);
|
||||||
|
Ok(success_response(body))
|
||||||
|
}
|
46
beacon_node/rest_api/src/config.rs
Normal file
46
beacon_node/rest_api/src/config.rs
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
use clap::ArgMatches;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::net::Ipv4Addr;
|
||||||
|
|
||||||
|
/// HTTP REST API Configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Config {
|
||||||
|
/// Enable the REST API server.
|
||||||
|
pub enabled: bool,
|
||||||
|
/// The IPv4 address the REST API HTTP server will listen on.
|
||||||
|
pub listen_address: Ipv4Addr,
|
||||||
|
/// The port the REST API HTTP server will listen on.
|
||||||
|
pub port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Self {
|
||||||
|
Config {
|
||||||
|
enabled: true, // rest_api enabled by default
|
||||||
|
listen_address: Ipv4Addr::new(127, 0, 0, 1),
|
||||||
|
port: 1248,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
|
||||||
|
if args.is_present("api") {
|
||||||
|
self.enabled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(rpc_address) = args.value_of("api-address") {
|
||||||
|
self.listen_address = rpc_address
|
||||||
|
.parse::<Ipv4Addr>()
|
||||||
|
.map_err(|_| "api-address is not a valid IPv4 address.")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(rpc_port) = args.value_of("api-port") {
|
||||||
|
self.port = rpc_port
|
||||||
|
.parse::<u16>()
|
||||||
|
.map_err(|_| "api-port is not a valid u16.")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
132
beacon_node/rest_api/src/lib.rs
Normal file
132
beacon_node/rest_api/src/lib.rs
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
extern crate futures;
|
||||||
|
extern crate hyper;
|
||||||
|
#[macro_use]
|
||||||
|
mod macros;
|
||||||
|
mod beacon_node;
|
||||||
|
pub mod config;
|
||||||
|
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
pub use config::Config as APIConfig;
|
||||||
|
|
||||||
|
use slog::{info, o, warn};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::runtime::TaskExecutor;
|
||||||
|
|
||||||
|
use crate::beacon_node::BeaconNodeServiceInstance;
|
||||||
|
use hyper::rt::Future;
|
||||||
|
use hyper::service::{service_fn, Service};
|
||||||
|
use hyper::{Body, Request, Response, Server, StatusCode};
|
||||||
|
use hyper_router::{RouterBuilder, RouterService};
|
||||||
|
|
||||||
|
pub enum APIError {
|
||||||
|
MethodNotAllowed { desc: String },
|
||||||
|
ServerError { desc: String },
|
||||||
|
NotImplemented { desc: String },
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type APIResult = Result<Response<Body>, APIError>;
|
||||||
|
|
||||||
|
impl Into<Response<Body>> for APIError {
|
||||||
|
fn into(self) -> Response<Body> {
|
||||||
|
let status_code: (StatusCode, String) = match self {
|
||||||
|
APIError::MethodNotAllowed { desc } => (StatusCode::METHOD_NOT_ALLOWED, desc),
|
||||||
|
APIError::ServerError { desc } => (StatusCode::INTERNAL_SERVER_ERROR, desc),
|
||||||
|
APIError::NotImplemented { desc } => (StatusCode::NOT_IMPLEMENTED, desc),
|
||||||
|
};
|
||||||
|
Response::builder()
|
||||||
|
.status(status_code.0)
|
||||||
|
.body(Body::from(status_code.1))
|
||||||
|
.expect("Response should always be created.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait APIService {
|
||||||
|
fn add_routes(&mut self, router_builder: RouterBuilder) -> Result<RouterBuilder, hyper::Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||||
|
config: &APIConfig,
|
||||||
|
executor: &TaskExecutor,
|
||||||
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> Result<exit_future::Signal, hyper::Error> {
|
||||||
|
let log = log.new(o!("Service" => "API"));
|
||||||
|
|
||||||
|
// build a channel to kill the HTTP server
|
||||||
|
let (exit_signal, exit) = exit_future::signal();
|
||||||
|
|
||||||
|
let exit_log = log.clone();
|
||||||
|
let server_exit = exit.and_then(move |_| {
|
||||||
|
info!(exit_log, "API service shutdown");
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
|
||||||
|
// Get the address to bind to
|
||||||
|
let bind_addr = (config.listen_address, config.port).into();
|
||||||
|
|
||||||
|
// Clone our stateful objects, for use in service closure.
|
||||||
|
let server_log = log.clone();
|
||||||
|
let server_bc = beacon_chain.clone();
|
||||||
|
|
||||||
|
// Create the service closure
|
||||||
|
let service = move || {
|
||||||
|
//TODO: This router must be moved out of this closure, so it isn't rebuilt for every connection.
|
||||||
|
let mut router = build_router_service::<T>();
|
||||||
|
|
||||||
|
// Clone our stateful objects, for use in handler closure
|
||||||
|
let service_log = server_log.clone();
|
||||||
|
let service_bc = server_bc.clone();
|
||||||
|
|
||||||
|
// Create a simple handler for the router, inject our stateful objects into the request.
|
||||||
|
service_fn(move |mut req| {
|
||||||
|
req.extensions_mut()
|
||||||
|
.insert::<slog::Logger>(service_log.clone());
|
||||||
|
req.extensions_mut()
|
||||||
|
.insert::<Arc<BeaconChain<T>>>(service_bc.clone());
|
||||||
|
router.call(req)
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
let server = Server::bind(&bind_addr)
|
||||||
|
.serve(service)
|
||||||
|
.with_graceful_shutdown(server_exit)
|
||||||
|
.map_err(move |e| {
|
||||||
|
warn!(
|
||||||
|
log,
|
||||||
|
"API failed to start, Unable to bind"; "address" => format!("{:?}", e)
|
||||||
|
)
|
||||||
|
});
|
||||||
|
|
||||||
|
executor.spawn(server);
|
||||||
|
|
||||||
|
Ok(exit_signal)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_router_service<T: BeaconChainTypes + 'static>() -> RouterService {
|
||||||
|
let mut router_builder = RouterBuilder::new();
|
||||||
|
|
||||||
|
let mut bn_service: BeaconNodeServiceInstance<T> = BeaconNodeServiceInstance {
|
||||||
|
marker: std::marker::PhantomData,
|
||||||
|
};
|
||||||
|
|
||||||
|
router_builder = bn_service
|
||||||
|
.add_routes(router_builder)
|
||||||
|
.expect("The routes should always be made.");
|
||||||
|
|
||||||
|
RouterService::new(router_builder.build())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn path_from_request(req: &Request<Body>) -> String {
|
||||||
|
req.uri()
|
||||||
|
.path_and_query()
|
||||||
|
.as_ref()
|
||||||
|
.map(|pq| String::from(pq.as_str()))
|
||||||
|
.unwrap_or(String::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn success_response(body: Body) -> Response<Body> {
|
||||||
|
Response::builder()
|
||||||
|
.status(StatusCode::OK)
|
||||||
|
.body(body)
|
||||||
|
.expect("We should always be able to make response from the success body.")
|
||||||
|
}
|
23
beacon_node/rest_api/src/macros.rs
Normal file
23
beacon_node/rest_api/src/macros.rs
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
macro_rules! result_to_response {
|
||||||
|
($handler: path) => {
|
||||||
|
|req: Request<Body>| -> Response<Body> {
|
||||||
|
let log = req
|
||||||
|
.extensions()
|
||||||
|
.get::<slog::Logger>()
|
||||||
|
.expect("Our logger should be on req.")
|
||||||
|
.clone();
|
||||||
|
let path = path_from_request(&req);
|
||||||
|
let result = $handler(req);
|
||||||
|
match result {
|
||||||
|
Ok(response) => {
|
||||||
|
info!(log, "Request successful: {:?}", path);
|
||||||
|
response
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
info!(log, "Request failure: {:?}", path);
|
||||||
|
e.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
@ -12,18 +12,12 @@ eth2-libp2p = { path = "../eth2-libp2p" }
|
|||||||
version = { path = "../version" }
|
version = { path = "../version" }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
|
||||||
protos = { path = "../../protos" }
|
protos = { path = "../../protos" }
|
||||||
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
|
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
|
||||||
protobuf = "2.0.2"
|
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
store = { path = "../store" }
|
|
||||||
dirs = "1.0.3"
|
|
||||||
futures = "0.1.23"
|
futures = "0.1.23"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
||||||
slog-term = "^2.4.0"
|
|
||||||
slog-async = "^2.3.0"
|
|
||||||
tokio = "0.1.17"
|
tokio = "0.1.17"
|
||||||
exit-future = "0.1.4"
|
exit-future = "0.1.4"
|
||||||
|
@ -19,7 +19,7 @@ use types::Attestation;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AttestationServiceInstance<T: BeaconChainTypes> {
|
pub struct AttestationServiceInstance<T: BeaconChainTypes> {
|
||||||
pub chain: Arc<BeaconChain<T>>,
|
pub chain: Arc<BeaconChain<T>>,
|
||||||
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
pub network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
pub log: slog::Logger,
|
pub log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
|
|||||||
let state = &self.chain.current_state();
|
let state = &self.chain.current_state();
|
||||||
|
|
||||||
// Start by performing some checks
|
// Start by performing some checks
|
||||||
// Check that the AttestionData is for the current slot (otherwise it will not be valid)
|
// Check that the AttestationData is for the current slot (otherwise it will not be valid)
|
||||||
if slot_requested > state.slot.as_u64() {
|
if slot_requested > state.slot.as_u64() {
|
||||||
let log_clone = self.log.clone();
|
let log_clone = self.log.clone();
|
||||||
let f = sink
|
let f = sink
|
||||||
|
@ -19,7 +19,7 @@ use types::{BeaconBlock, Signature, Slot};
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BeaconBlockServiceInstance<T: BeaconChainTypes> {
|
pub struct BeaconBlockServiceInstance<T: BeaconChainTypes> {
|
||||||
pub chain: Arc<BeaconChain<T>>,
|
pub chain: Arc<BeaconChain<T>>,
|
||||||
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
pub network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
pub log: Logger,
|
pub log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ use tokio::sync::mpsc;
|
|||||||
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||||
config: &RPCConfig,
|
config: &RPCConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> exit_future::Signal {
|
) -> exit_future::Signal {
|
||||||
|
@ -12,6 +12,7 @@ pub const DEFAULT_DATA_DIR: &str = ".lighthouse";
|
|||||||
|
|
||||||
pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml";
|
pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml";
|
||||||
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
|
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
|
||||||
|
pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml";
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
// debugging output for libp2p and external crates
|
// debugging output for libp2p and external crates
|
||||||
@ -21,7 +22,9 @@ fn main() {
|
|||||||
.version(version::version().as_str())
|
.version(version::version().as_str())
|
||||||
.author("Sigma Prime <contact@sigmaprime.io>")
|
.author("Sigma Prime <contact@sigmaprime.io>")
|
||||||
.about("Eth 2.0 Client")
|
.about("Eth 2.0 Client")
|
||||||
// file system related arguments
|
/*
|
||||||
|
* Configuration directory locations.
|
||||||
|
*/
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("datadir")
|
Arg::with_name("datadir")
|
||||||
.long("datadir")
|
.long("datadir")
|
||||||
@ -36,7 +39,16 @@ fn main() {
|
|||||||
.help("File path where output will be written.")
|
.help("File path where output will be written.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
// network related arguments
|
.arg(
|
||||||
|
Arg::with_name("network-dir")
|
||||||
|
.long("network-dir")
|
||||||
|
.value_name("NETWORK-DIR")
|
||||||
|
.help("Data directory for network keys.")
|
||||||
|
.takes_value(true)
|
||||||
|
)
|
||||||
|
/*
|
||||||
|
* Network parameters.
|
||||||
|
*/
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("listen-address")
|
Arg::with_name("listen-address")
|
||||||
.long("listen-address")
|
.long("listen-address")
|
||||||
@ -79,7 +91,9 @@ fn main() {
|
|||||||
.help("The IP address to broadcast to other peers on how to reach this node.")
|
.help("The IP address to broadcast to other peers on how to reach this node.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
// rpc related arguments
|
/*
|
||||||
|
* gRPC parameters.
|
||||||
|
*/
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("rpc")
|
Arg::with_name("rpc")
|
||||||
.long("rpc")
|
.long("rpc")
|
||||||
@ -100,7 +114,9 @@ fn main() {
|
|||||||
.help("Listen port for RPC endpoint.")
|
.help("Listen port for RPC endpoint.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
// HTTP related arguments
|
/*
|
||||||
|
* HTTP server parameters.
|
||||||
|
*/
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("http")
|
Arg::with_name("http")
|
||||||
.long("http")
|
.long("http")
|
||||||
@ -120,6 +136,31 @@ fn main() {
|
|||||||
.help("Listen port for the HTTP server.")
|
.help("Listen port for the HTTP server.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("api")
|
||||||
|
.long("api")
|
||||||
|
.value_name("API")
|
||||||
|
.help("Enable the RESTful HTTP API server.")
|
||||||
|
.takes_value(false),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("api-address")
|
||||||
|
.long("api-address")
|
||||||
|
.value_name("APIADDRESS")
|
||||||
|
.help("Set the listen address for the RESTful HTTP API server.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("api-port")
|
||||||
|
.long("api-port")
|
||||||
|
.value_name("APIPORT")
|
||||||
|
.help("Set the listen TCP port for the RESTful HTTP API server.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Database parameters.
|
||||||
|
*/
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("db")
|
Arg::with_name("db")
|
||||||
.long("db")
|
.long("db")
|
||||||
@ -129,12 +170,17 @@ fn main() {
|
|||||||
.possible_values(&["disk", "memory"])
|
.possible_values(&["disk", "memory"])
|
||||||
.default_value("memory"),
|
.default_value("memory"),
|
||||||
)
|
)
|
||||||
|
/*
|
||||||
|
* Specification/testnet params.
|
||||||
|
*/
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("spec-constants")
|
Arg::with_name("default-spec")
|
||||||
.long("spec-constants")
|
.long("default-spec")
|
||||||
.value_name("TITLE")
|
.value_name("TITLE")
|
||||||
.short("s")
|
.short("default-spec")
|
||||||
.help("The title of the spec constants for chain config.")
|
.help("Specifies the default eth2 spec to be used. Overridden by any spec loaded
|
||||||
|
from disk. A spec will be written to disk after this flag is used, so it is
|
||||||
|
primarily used for creating eth2 spec files.")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.possible_values(&["mainnet", "minimal"])
|
.possible_values(&["mainnet", "minimal"])
|
||||||
.default_value("minimal"),
|
.default_value("minimal"),
|
||||||
@ -145,6 +191,19 @@ fn main() {
|
|||||||
.short("r")
|
.short("r")
|
||||||
.help("When present, genesis will be within 30 minutes prior. Only for testing"),
|
.help("When present, genesis will be within 30 minutes prior. Only for testing"),
|
||||||
)
|
)
|
||||||
|
/*
|
||||||
|
* Logging.
|
||||||
|
*/
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("debug-level")
|
||||||
|
.long("debug-level")
|
||||||
|
.value_name("LEVEL")
|
||||||
|
.short("s")
|
||||||
|
.help("The title of the spec constants for chain config.")
|
||||||
|
.takes_value(true)
|
||||||
|
.possible_values(&["info", "debug", "trace", "warn", "error", "crit"])
|
||||||
|
.default_value("info"),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("verbosity")
|
Arg::with_name("verbosity")
|
||||||
.short("v")
|
.short("v")
|
||||||
@ -156,9 +215,20 @@ fn main() {
|
|||||||
|
|
||||||
// build the initial logger
|
// build the initial logger
|
||||||
let decorator = slog_term::TermDecorator::new().build();
|
let decorator = slog_term::TermDecorator::new().build();
|
||||||
let drain = slog_term::CompactFormat::new(decorator).build().fuse();
|
let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH);
|
||||||
|
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||||
let drain = slog_async::Async::new(drain).build();
|
let drain = slog_async::Async::new(drain).build();
|
||||||
|
|
||||||
|
let drain = match matches.value_of("debug-level") {
|
||||||
|
Some("info") => drain.filter_level(Level::Info),
|
||||||
|
Some("debug") => drain.filter_level(Level::Debug),
|
||||||
|
Some("trace") => drain.filter_level(Level::Trace),
|
||||||
|
Some("warn") => drain.filter_level(Level::Warning),
|
||||||
|
Some("error") => drain.filter_level(Level::Error),
|
||||||
|
Some("crit") => drain.filter_level(Level::Critical),
|
||||||
|
_ => unreachable!("guarded by clap"),
|
||||||
|
};
|
||||||
|
|
||||||
let drain = match matches.occurrences_of("verbosity") {
|
let drain = match matches.occurrences_of("verbosity") {
|
||||||
0 => drain.filter_level(Level::Info),
|
0 => drain.filter_level(Level::Info),
|
||||||
1 => drain.filter_level(Level::Debug),
|
1 => drain.filter_level(Level::Debug),
|
||||||
@ -183,7 +253,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
default_dir.push(DEFAULT_DATA_DIR);
|
default_dir.push(DEFAULT_DATA_DIR);
|
||||||
PathBuf::from(default_dir)
|
default_dir
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -237,7 +307,7 @@ fn main() {
|
|||||||
let mut eth2_config = match read_from_file::<Eth2Config>(eth2_config_path.clone()) {
|
let mut eth2_config = match read_from_file::<Eth2Config>(eth2_config_path.clone()) {
|
||||||
Ok(Some(c)) => c,
|
Ok(Some(c)) => c,
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
let default = match matches.value_of("spec-constants") {
|
let default = match matches.value_of("default-spec") {
|
||||||
Some("mainnet") => Eth2Config::mainnet(),
|
Some("mainnet") => Eth2Config::mainnet(),
|
||||||
Some("minimal") => Eth2Config::minimal(),
|
Some("minimal") => Eth2Config::minimal(),
|
||||||
_ => unreachable!(), // Guarded by slog.
|
_ => unreachable!(), // Guarded by slog.
|
||||||
@ -263,6 +333,7 @@ fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Start the node using a `tokio` executor.
|
||||||
match run::run_beacon_node(client_config, eth2_config, &log) {
|
match run::run_beacon_node(client_config, eth2_config, &log) {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(e) => crit!(log, "Beacon node failed to start"; "reason" => format!("{:}", e)),
|
Err(e) => crit!(log, "Beacon node failed to start"; "reason" => format!("{:}", e)),
|
||||||
|
@ -15,6 +15,12 @@ use tokio::runtime::TaskExecutor;
|
|||||||
use tokio_timer::clock::Clock;
|
use tokio_timer::clock::Clock;
|
||||||
use types::{MainnetEthSpec, MinimalEthSpec};
|
use types::{MainnetEthSpec, MinimalEthSpec};
|
||||||
|
|
||||||
|
/// Reads the configuration and initializes a `BeaconChain` with the required types and parameters.
|
||||||
|
///
|
||||||
|
/// Spawns an executor which performs syncing, networking, block production, etc.
|
||||||
|
///
|
||||||
|
/// Blocks the current thread, returning after the `BeaconChain` has exited or a `Ctrl+C`
|
||||||
|
/// signal.
|
||||||
pub fn run_beacon_node(
|
pub fn run_beacon_node(
|
||||||
client_config: ClientConfig,
|
client_config: ClientConfig,
|
||||||
eth2_config: Eth2Config,
|
eth2_config: Eth2Config,
|
||||||
@ -38,19 +44,20 @@ pub fn run_beacon_node(
|
|||||||
|
|
||||||
warn!(
|
warn!(
|
||||||
log,
|
log,
|
||||||
"This software is EXPERIMENTAL and provides no guarantees or warranties."
|
"Ethereum 2.0 is pre-release. This software is experimental."
|
||||||
);
|
);
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
log,
|
log,
|
||||||
"Starting beacon node";
|
"BeaconNode init";
|
||||||
"p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address),
|
"p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address),
|
||||||
"data_dir" => format!("{:?}", other_client_config.data_dir()),
|
"data_dir" => format!("{:?}", other_client_config.data_dir()),
|
||||||
|
"network_dir" => format!("{:?}", other_client_config.network.network_dir),
|
||||||
"spec_constants" => &spec_constants,
|
"spec_constants" => &spec_constants,
|
||||||
"db_type" => &other_client_config.db_type,
|
"db_type" => &other_client_config.db_type,
|
||||||
);
|
);
|
||||||
|
|
||||||
let result = match (db_type.as_str(), spec_constants.as_str()) {
|
match (db_type.as_str(), spec_constants.as_str()) {
|
||||||
("disk", "minimal") => run::<ClientType<DiskStore, MinimalEthSpec>>(
|
("disk", "minimal") => run::<ClientType<DiskStore, MinimalEthSpec>>(
|
||||||
&db_path,
|
&db_path,
|
||||||
client_config,
|
client_config,
|
||||||
@ -87,12 +94,11 @@ pub fn run_beacon_node(
|
|||||||
error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type);
|
error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type);
|
||||||
Err("Unknown specification and/or db_type.".into())
|
Err("Unknown specification and/or db_type.".into())
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run<T>(
|
/// Performs the type-generic parts of launching a `BeaconChain`.
|
||||||
|
fn run<T>(
|
||||||
db_path: &Path,
|
db_path: &Path,
|
||||||
client_config: ClientConfig,
|
client_config: ClientConfig,
|
||||||
eth2_config: Eth2Config,
|
eth2_config: Eth2Config,
|
||||||
@ -116,7 +122,7 @@ where
|
|||||||
ctrlc_send.send(()).expect("Error sending ctrl-c message");
|
ctrlc_send.send(()).expect("Error sending ctrl-c message");
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.map_err(|e| format!("Could not set ctrlc hander: {:?}", e))?;
|
.map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?;
|
||||||
|
|
||||||
let (exit_signal, exit) = exit_future::signal();
|
let (exit_signal, exit) = exit_future::signal();
|
||||||
|
|
||||||
|
@ -8,9 +8,6 @@ edition = "2018"
|
|||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
blake2-rfc = "0.2.18"
|
|
||||||
bls = { path = "../../eth2/utils/bls" }
|
|
||||||
bytes = "0.4.10"
|
|
||||||
db-key = "0.0.5"
|
db-key = "0.0.5"
|
||||||
leveldb = "0.8.4"
|
leveldb = "0.8.4"
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use ssz::{Decode, DecodeError};
|
use ssz::{Decode, DecodeError};
|
||||||
|
|
||||||
fn get_block_bytes<T: Store>(store: &T, root: Hash256) -> Result<Option<Vec<u8>>, Error> {
|
fn get_block_bytes<T: Store, E: EthSpec>(
|
||||||
store.get_bytes(BeaconBlock::db_column().into(), &root[..])
|
store: &T,
|
||||||
|
root: Hash256,
|
||||||
|
) -> Result<Option<Vec<u8>>, Error> {
|
||||||
|
store.get_bytes(BeaconBlock::<E>::db_column().into(), &root[..])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_slot_from_block_bytes(bytes: &[u8]) -> Result<Slot, DecodeError> {
|
fn read_slot_from_block_bytes(bytes: &[u8]) -> Result<Slot, DecodeError> {
|
||||||
@ -11,7 +14,7 @@ fn read_slot_from_block_bytes(bytes: &[u8]) -> Result<Slot, DecodeError> {
|
|||||||
Slot::from_ssz_bytes(&bytes[0..end])
|
Slot::from_ssz_bytes(&bytes[0..end])
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_previous_block_root_from_block_bytes(bytes: &[u8]) -> Result<Hash256, DecodeError> {
|
fn read_parent_root_from_block_bytes(bytes: &[u8]) -> Result<Hash256, DecodeError> {
|
||||||
let previous_bytes = Slot::ssz_fixed_len();
|
let previous_bytes = Slot::ssz_fixed_len();
|
||||||
let slice = bytes
|
let slice = bytes
|
||||||
.get(previous_bytes..previous_bytes + Hash256::ssz_fixed_len())
|
.get(previous_bytes..previous_bytes + Hash256::ssz_fixed_len())
|
||||||
@ -20,24 +23,26 @@ fn read_previous_block_root_from_block_bytes(bytes: &[u8]) -> Result<Hash256, De
|
|||||||
Hash256::from_ssz_bytes(slice)
|
Hash256::from_ssz_bytes(slice)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_block_at_preceeding_slot<T: Store>(
|
pub fn get_block_at_preceeding_slot<T: Store, E: EthSpec>(
|
||||||
store: &T,
|
store: &T,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
start_root: Hash256,
|
start_root: Hash256,
|
||||||
) -> Result<Option<(Hash256, BeaconBlock)>, Error> {
|
) -> Result<Option<(Hash256, BeaconBlock<E>)>, Error> {
|
||||||
Ok(match get_at_preceeding_slot(store, slot, start_root)? {
|
Ok(
|
||||||
Some((hash, bytes)) => Some((hash, BeaconBlock::from_ssz_bytes(&bytes)?)),
|
match get_at_preceeding_slot::<_, E>(store, slot, start_root)? {
|
||||||
None => None,
|
Some((hash, bytes)) => Some((hash, BeaconBlock::<E>::from_ssz_bytes(&bytes)?)),
|
||||||
})
|
None => None,
|
||||||
|
},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_at_preceeding_slot<T: Store>(
|
fn get_at_preceeding_slot<T: Store, E: EthSpec>(
|
||||||
store: &T,
|
store: &T,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
mut root: Hash256,
|
mut root: Hash256,
|
||||||
) -> Result<Option<(Hash256, Vec<u8>)>, Error> {
|
) -> Result<Option<(Hash256, Vec<u8>)>, Error> {
|
||||||
loop {
|
loop {
|
||||||
if let Some(bytes) = get_block_bytes(store, root)? {
|
if let Some(bytes) = get_block_bytes::<_, E>(store, root)? {
|
||||||
let this_slot = read_slot_from_block_bytes(&bytes)?;
|
let this_slot = read_slot_from_block_bytes(&bytes)?;
|
||||||
|
|
||||||
if this_slot == slot {
|
if this_slot == slot {
|
||||||
@ -45,7 +50,7 @@ fn get_at_preceeding_slot<T: Store>(
|
|||||||
} else if this_slot < slot {
|
} else if this_slot < slot {
|
||||||
break Ok(None);
|
break Ok(None);
|
||||||
} else {
|
} else {
|
||||||
root = read_previous_block_root_from_block_bytes(&bytes)?;
|
root = read_parent_root_from_block_bytes(&bytes)?;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
break Ok(None);
|
break Ok(None);
|
||||||
@ -59,6 +64,8 @@ mod tests {
|
|||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
|
|
||||||
|
type BeaconBlock = types::BeaconBlock<MinimalEthSpec>;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn read_slot() {
|
fn read_slot() {
|
||||||
let spec = MinimalEthSpec::default_spec();
|
let spec = MinimalEthSpec::default_spec();
|
||||||
@ -84,17 +91,14 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn read_previous_block_root() {
|
fn read_parent_root() {
|
||||||
let spec = MinimalEthSpec::default_spec();
|
let spec = MinimalEthSpec::default_spec();
|
||||||
|
|
||||||
let test_root = |root: Hash256| {
|
let test_root = |root: Hash256| {
|
||||||
let mut block = BeaconBlock::empty(&spec);
|
let mut block = BeaconBlock::empty(&spec);
|
||||||
block.previous_block_root = root;
|
block.parent_root = root;
|
||||||
let bytes = block.as_ssz_bytes();
|
let bytes = block.as_ssz_bytes();
|
||||||
assert_eq!(
|
assert_eq!(read_parent_root_from_block_bytes(&bytes).unwrap(), root);
|
||||||
read_previous_block_root_from_block_bytes(&bytes).unwrap(),
|
|
||||||
root
|
|
||||||
);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
test_root(Hash256::random());
|
test_root(Hash256::random());
|
||||||
@ -114,7 +118,7 @@ mod tests {
|
|||||||
block.slot = Slot::from(*slot);
|
block.slot = Slot::from(*slot);
|
||||||
|
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
block.previous_block_root = blocks_and_roots[i - 1].0;
|
block.parent_root = blocks_and_roots[i - 1].0;
|
||||||
}
|
}
|
||||||
|
|
||||||
let root = Hash256::from_slice(&block.tree_hash_root());
|
let root = Hash256::from_slice(&block.tree_hash_root());
|
||||||
@ -177,14 +181,14 @@ mod tests {
|
|||||||
// Slot that doesn't exist
|
// Slot that doesn't exist
|
||||||
let (source_root, _source_block) = &blocks_and_roots[3];
|
let (source_root, _source_block) = &blocks_and_roots[3];
|
||||||
assert!(store
|
assert!(store
|
||||||
.get_block_at_preceeding_slot(*source_root, Slot::new(3))
|
.get_block_at_preceeding_slot::<MinimalEthSpec>(*source_root, Slot::new(3))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.is_none());
|
.is_none());
|
||||||
|
|
||||||
// Slot too high
|
// Slot too high
|
||||||
let (source_root, _source_block) = &blocks_and_roots[3];
|
let (source_root, _source_block) = &blocks_and_roots[3];
|
||||||
assert!(store
|
assert!(store
|
||||||
.get_block_at_preceeding_slot(*source_root, Slot::new(3))
|
.get_block_at_preceeding_slot::<MinimalEthSpec>(*source_root, Slot::new(3))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.is_none());
|
.is_none());
|
||||||
}
|
}
|
||||||
|
@ -1,198 +0,0 @@
|
|||||||
extern crate rocksdb;
|
|
||||||
|
|
||||||
use super::{ClientDB, DBError, DBValue};
|
|
||||||
use rocksdb::Error as RocksError;
|
|
||||||
use rocksdb::{Options, DB};
|
|
||||||
use std::fs;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
/// A on-disk database which implements the ClientDB trait.
|
|
||||||
///
|
|
||||||
/// This implementation uses RocksDB with default options.
|
|
||||||
pub struct DiskStore {
|
|
||||||
db: DB,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DiskStore {
|
|
||||||
/// Open the RocksDB database, optionally supplying columns if required.
|
|
||||||
///
|
|
||||||
/// The RocksDB database will be contained in a directory titled
|
|
||||||
/// "database" in the supplied path.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the database is unable to be created.
|
|
||||||
pub fn open(path: &Path, columns: Option<&[&str]>) -> Self {
|
|
||||||
// Rocks options.
|
|
||||||
let mut options = Options::default();
|
|
||||||
options.create_if_missing(true);
|
|
||||||
|
|
||||||
// Ensure the path exists.
|
|
||||||
fs::create_dir_all(&path).unwrap_or_else(|_| panic!("Unable to create {:?}", &path));
|
|
||||||
let db_path = path.join("database");
|
|
||||||
|
|
||||||
let columns = columns.unwrap_or(&COLUMNS);
|
|
||||||
|
|
||||||
if db_path.exists() {
|
|
||||||
Self {
|
|
||||||
db: DB::open_cf(&options, db_path, &COLUMNS)
|
|
||||||
.expect("Unable to open local database"),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let mut db = Self {
|
|
||||||
db: DB::open(&options, db_path).expect("Unable to open local database"),
|
|
||||||
};
|
|
||||||
|
|
||||||
for cf in columns {
|
|
||||||
db.create_col(cf).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
db
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a RocksDB column family. Corresponds to the
|
|
||||||
/// `create_cf()` function on the RocksDB API.
|
|
||||||
#[allow(dead_code)]
|
|
||||||
fn create_col(&mut self, col: &str) -> Result<(), DBError> {
|
|
||||||
match self.db.create_cf(col, &Options::default()) {
|
|
||||||
Err(e) => Err(e.into()),
|
|
||||||
Ok(_) => Ok(()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<RocksError> for DBError {
|
|
||||||
fn from(e: RocksError) -> Self {
|
|
||||||
Self {
|
|
||||||
message: e.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClientDB for DiskStore {
|
|
||||||
/// Get the value for some key on some column.
|
|
||||||
///
|
|
||||||
/// Corresponds to the `get_cf()` method on the RocksDB API.
|
|
||||||
/// Will attempt to get the `ColumnFamily` and return an Err
|
|
||||||
/// if it fails.
|
|
||||||
fn get(&self, col: &str, key: &[u8]) -> Result<Option<DBValue>, DBError> {
|
|
||||||
match self.db.cf_handle(col) {
|
|
||||||
None => Err(DBError {
|
|
||||||
message: "Unknown column".to_string(),
|
|
||||||
}),
|
|
||||||
Some(handle) => match self.db.get_cf(handle, key)? {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(db_vec) => Ok(Some(DBValue::from(&*db_vec))),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set some value for some key on some column.
|
|
||||||
///
|
|
||||||
/// Corresponds to the `cf_handle()` method on the RocksDB API.
|
|
||||||
/// Will attempt to get the `ColumnFamily` and return an Err
|
|
||||||
/// if it fails.
|
|
||||||
fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> {
|
|
||||||
match self.db.cf_handle(col) {
|
|
||||||
None => Err(DBError {
|
|
||||||
message: "Unknown column".to_string(),
|
|
||||||
}),
|
|
||||||
Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return true if some key exists in some column.
|
|
||||||
fn exists(&self, col: &str, key: &[u8]) -> Result<bool, DBError> {
|
|
||||||
/*
|
|
||||||
* I'm not sure if this is the correct way to read if some
|
|
||||||
* block exists. Naively I would expect this to unncessarily
|
|
||||||
* copy some data, but I could be wrong.
|
|
||||||
*/
|
|
||||||
match self.db.cf_handle(col) {
|
|
||||||
None => Err(DBError {
|
|
||||||
message: "Unknown column".to_string(),
|
|
||||||
}),
|
|
||||||
Some(handle) => Ok(self.db.get_cf(handle, key)?.is_some()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete the value for some key on some column.
|
|
||||||
///
|
|
||||||
/// Corresponds to the `delete_cf()` method on the RocksDB API.
|
|
||||||
/// Will attempt to get the `ColumnFamily` and return an Err
|
|
||||||
/// if it fails.
|
|
||||||
fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> {
|
|
||||||
match self.db.cf_handle(col) {
|
|
||||||
None => Err(DBError {
|
|
||||||
message: "Unknown column".to_string(),
|
|
||||||
}),
|
|
||||||
Some(handle) => {
|
|
||||||
self.db.delete_cf(handle, key)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::super::ClientDB;
|
|
||||||
use super::*;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::{env, fs, thread};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn test_rocksdb_can_use_db() {
|
|
||||||
let pwd = env::current_dir().unwrap();
|
|
||||||
let path = pwd.join("testdb_please_remove");
|
|
||||||
let _ = fs::remove_dir_all(&path);
|
|
||||||
fs::create_dir_all(&path).unwrap();
|
|
||||||
|
|
||||||
let col_name: &str = "TestColumn";
|
|
||||||
let column_families = vec![col_name];
|
|
||||||
|
|
||||||
let mut db = DiskStore::open(&path, None);
|
|
||||||
|
|
||||||
for cf in column_families {
|
|
||||||
db.create_col(&cf).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
let db = Arc::new(db);
|
|
||||||
|
|
||||||
let thread_count = 10;
|
|
||||||
let write_count = 10;
|
|
||||||
|
|
||||||
// We're execting the product of these numbers to fit in one byte.
|
|
||||||
assert!(thread_count * write_count <= 255);
|
|
||||||
|
|
||||||
let mut handles = vec![];
|
|
||||||
for t in 0..thread_count {
|
|
||||||
let wc = write_count;
|
|
||||||
let db = db.clone();
|
|
||||||
let col = col_name.clone();
|
|
||||||
let handle = thread::spawn(move || {
|
|
||||||
for w in 0..wc {
|
|
||||||
let key = (t * w) as u8;
|
|
||||||
let val = 42;
|
|
||||||
db.put(&col, &vec![key], &vec![val]).unwrap();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
handles.push(handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
for handle in handles {
|
|
||||||
handle.join().unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
for t in 0..thread_count {
|
|
||||||
for w in 0..write_count {
|
|
||||||
let key = (t * w) as u8;
|
|
||||||
let val = db.get(&col_name, &vec![key]).unwrap().unwrap();
|
|
||||||
assert_eq!(vec![42], val);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs::remove_dir_all(&path).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
@ -3,7 +3,7 @@ use ssz::{Decode, Encode};
|
|||||||
|
|
||||||
mod beacon_state;
|
mod beacon_state;
|
||||||
|
|
||||||
impl StoreItem for BeaconBlock {
|
impl<T: EthSpec> StoreItem for BeaconBlock<T> {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconBlock
|
DBColumn::BeaconBlock
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,24 @@ use std::borrow::Cow;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot};
|
use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
|
/// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over.
|
||||||
|
pub trait AncestorIter<U: Store, I: Iterator> {
|
||||||
|
/// Returns an iterator over the roots of the ancestors of `self`.
|
||||||
|
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<I>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, U: Store, E: EthSpec> AncestorIter<U, BestBlockRootsIterator<'a, E, U>>
|
||||||
|
for BeaconBlock<E>
|
||||||
|
{
|
||||||
|
/// Iterates across all the prior block roots of `self`, starting at the most recent and ending
|
||||||
|
/// at genesis.
|
||||||
|
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<BestBlockRootsIterator<'a, E, U>> {
|
||||||
|
let state = store.get::<BeaconState<E>>(&self.state_root).ok()??;
|
||||||
|
|
||||||
|
Some(BestBlockRootsIterator::owned(store, state, self.slot))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
||||||
store: Arc<U>,
|
store: Arc<U>,
|
||||||
@ -82,7 +100,7 @@ impl<'a, T: EthSpec, U: Store> BlockIterator<'a, T, U> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> {
|
impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> {
|
||||||
type Item = BeaconBlock;
|
type Item = BeaconBlock<T>;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
let (root, _slot) = self.roots.next()?;
|
let (root, _slot) = self.roots.next()?;
|
||||||
@ -93,8 +111,8 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> {
|
|||||||
/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the
|
/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the
|
||||||
/// iterator returns `None` indefinitely.
|
/// iterator returns `None` indefinitely.
|
||||||
///
|
///
|
||||||
/// Uses the `latest_block_roots` field of `BeaconState` to as the source of block roots and will
|
/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will
|
||||||
/// perform a lookup on the `Store` for a prior `BeaconState` if `latest_block_roots` has been
|
/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been
|
||||||
/// exhausted.
|
/// exhausted.
|
||||||
///
|
///
|
||||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||||
@ -175,8 +193,8 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> {
|
|||||||
///
|
///
|
||||||
/// This is distinct from `BestBlockRootsIterator`.
|
/// This is distinct from `BestBlockRootsIterator`.
|
||||||
///
|
///
|
||||||
/// Uses the `latest_block_roots` field of `BeaconState` to as the source of block roots and will
|
/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will
|
||||||
/// perform a lookup on the `Store` for a prior `BeaconState` if `latest_block_roots` has been
|
/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been
|
||||||
/// exhausted.
|
/// exhausted.
|
||||||
///
|
///
|
||||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||||
@ -287,17 +305,17 @@ mod test {
|
|||||||
state_a.slot = Slot::from(slots_per_historical_root);
|
state_a.slot = Slot::from(slots_per_historical_root);
|
||||||
state_b.slot = Slot::from(slots_per_historical_root * 2);
|
state_b.slot = Slot::from(slots_per_historical_root * 2);
|
||||||
|
|
||||||
let mut hashes = (0..).into_iter().map(|i| Hash256::from(i));
|
let mut hashes = (0..).into_iter().map(|i| Hash256::from_low_u64_be(i));
|
||||||
|
|
||||||
for root in &mut state_a.latest_block_roots[..] {
|
for root in &mut state_a.block_roots[..] {
|
||||||
*root = hashes.next().unwrap()
|
*root = hashes.next().unwrap()
|
||||||
}
|
}
|
||||||
for root in &mut state_b.latest_block_roots[..] {
|
for root in &mut state_b.block_roots[..] {
|
||||||
*root = hashes.next().unwrap()
|
*root = hashes.next().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
let state_a_root = hashes.next().unwrap();
|
let state_a_root = hashes.next().unwrap();
|
||||||
state_b.latest_state_roots[0] = state_a_root;
|
state_b.state_roots[0] = state_a_root;
|
||||||
store.put(&state_a_root, &state_a).unwrap();
|
store.put(&state_a_root, &state_a).unwrap();
|
||||||
|
|
||||||
let iter = BlockRootsIterator::new(store.clone(), &state_b, state_b.slot - 1);
|
let iter = BlockRootsIterator::new(store.clone(), &state_b, state_b.slot - 1);
|
||||||
@ -315,7 +333,7 @@ mod test {
|
|||||||
assert_eq!(collected.len(), expected_len);
|
assert_eq!(collected.len(), expected_len);
|
||||||
|
|
||||||
for i in 0..expected_len {
|
for i in 0..expected_len {
|
||||||
assert_eq!(collected[i].0, Hash256::from(i as u64));
|
assert_eq!(collected[i].0, Hash256::from_low_u64_be(i as u64));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,17 +348,17 @@ mod test {
|
|||||||
state_a.slot = Slot::from(slots_per_historical_root);
|
state_a.slot = Slot::from(slots_per_historical_root);
|
||||||
state_b.slot = Slot::from(slots_per_historical_root * 2);
|
state_b.slot = Slot::from(slots_per_historical_root * 2);
|
||||||
|
|
||||||
let mut hashes = (0..).into_iter().map(|i| Hash256::from(i));
|
let mut hashes = (0..).into_iter().map(|i| Hash256::from_low_u64_be(i));
|
||||||
|
|
||||||
for root in &mut state_a.latest_block_roots[..] {
|
for root in &mut state_a.block_roots[..] {
|
||||||
*root = hashes.next().unwrap()
|
*root = hashes.next().unwrap()
|
||||||
}
|
}
|
||||||
for root in &mut state_b.latest_block_roots[..] {
|
for root in &mut state_b.block_roots[..] {
|
||||||
*root = hashes.next().unwrap()
|
*root = hashes.next().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
let state_a_root = hashes.next().unwrap();
|
let state_a_root = hashes.next().unwrap();
|
||||||
state_b.latest_state_roots[0] = state_a_root;
|
state_b.state_roots[0] = state_a_root;
|
||||||
store.put(&state_a_root, &state_a).unwrap();
|
store.put(&state_a_root, &state_a).unwrap();
|
||||||
|
|
||||||
let iter = BestBlockRootsIterator::new(store.clone(), &state_b, state_b.slot);
|
let iter = BestBlockRootsIterator::new(store.clone(), &state_b, state_b.slot);
|
||||||
@ -358,7 +376,7 @@ mod test {
|
|||||||
assert_eq!(collected.len(), expected_len);
|
assert_eq!(collected.len(), expected_len);
|
||||||
|
|
||||||
for i in 0..expected_len {
|
for i in 0..expected_len {
|
||||||
assert_eq!(collected[i].0, Hash256::from(i as u64));
|
assert_eq!(collected[i].0, Hash256::from_low_u64_be(i as u64));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,7 +391,7 @@ mod test {
|
|||||||
state_a.slot = Slot::from(slots_per_historical_root);
|
state_a.slot = Slot::from(slots_per_historical_root);
|
||||||
state_b.slot = Slot::from(slots_per_historical_root * 2);
|
state_b.slot = Slot::from(slots_per_historical_root * 2);
|
||||||
|
|
||||||
let mut hashes = (0..).into_iter().map(|i| Hash256::from(i));
|
let mut hashes = (0..).into_iter().map(|i| Hash256::from_low_u64_be(i));
|
||||||
|
|
||||||
for slot in 0..slots_per_historical_root {
|
for slot in 0..slots_per_historical_root {
|
||||||
state_a
|
state_a
|
||||||
@ -386,8 +404,8 @@ mod test {
|
|||||||
.expect(&format!("should set state_b slot {}", slot));
|
.expect(&format!("should set state_b slot {}", slot));
|
||||||
}
|
}
|
||||||
|
|
||||||
let state_a_root = Hash256::from(slots_per_historical_root as u64);
|
let state_a_root = Hash256::from_low_u64_be(slots_per_historical_root as u64);
|
||||||
let state_b_root = Hash256::from(slots_per_historical_root as u64 * 2);
|
let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2);
|
||||||
|
|
||||||
store.put(&state_a_root, &state_a).unwrap();
|
store.put(&state_a_root, &state_a).unwrap();
|
||||||
store.put(&state_b_root, &state_b).unwrap();
|
store.put(&state_b_root, &state_b).unwrap();
|
||||||
@ -411,7 +429,12 @@ mod test {
|
|||||||
|
|
||||||
assert_eq!(slot, i as u64, "slot mismatch at {}: {} vs {}", i, slot, i);
|
assert_eq!(slot, i as u64, "slot mismatch at {}: {} vs {}", i, slot, i);
|
||||||
|
|
||||||
assert_eq!(hash, Hash256::from(i as u64), "hash mismatch at {}", i);
|
assert_eq!(
|
||||||
|
hash,
|
||||||
|
Hash256::from_low_u64_be(i as u64),
|
||||||
|
"hash mismatch at {}",
|
||||||
|
i
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,12 +52,12 @@ pub trait Store: Sync + Send + Sized {
|
|||||||
///
|
///
|
||||||
/// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the
|
/// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the
|
||||||
/// slot of `start_block_root`.
|
/// slot of `start_block_root`.
|
||||||
fn get_block_at_preceeding_slot(
|
fn get_block_at_preceeding_slot<E: EthSpec>(
|
||||||
&self,
|
&self,
|
||||||
start_block_root: Hash256,
|
start_block_root: Hash256,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
) -> Result<Option<(Hash256, BeaconBlock)>, Error> {
|
) -> Result<Option<(Hash256, BeaconBlock<E>)>, Error> {
|
||||||
block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root)
|
block_at_slot::get_block_at_preceeding_slot::<_, E>(self, slot, start_block_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieve some bytes in `column` with `key`.
|
/// Retrieve some bytes in `column` with `key`.
|
||||||
|
@ -1,37 +0,0 @@
|
|||||||
use super::*;
|
|
||||||
|
|
||||||
pub type Vec<u8> = Vec<u8>;
|
|
||||||
|
|
||||||
pub trait Store: Sync + Send + Sized {
|
|
||||||
fn put(&self, key: &Hash256, item: &impl StoreItem) -> Result<(), Error> {
|
|
||||||
item.db_put(self, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> {
|
|
||||||
I::db_get(self, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error> {
|
|
||||||
I::db_exists(self, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete<I: StoreItem>(&self, key: &Hash256) -> Result<(), Error> {
|
|
||||||
I::db_delete(self, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_block_at_preceeding_slot(
|
|
||||||
&self,
|
|
||||||
start_block_root: Hash256,
|
|
||||||
slot: Slot,
|
|
||||||
) -> Result<Option<(Hash256, BeaconBlock)>, Error> {
|
|
||||||
block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error>;
|
|
||||||
|
|
||||||
fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>;
|
|
||||||
|
|
||||||
fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error>;
|
|
||||||
|
|
||||||
fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>;
|
|
||||||
}
|
|
69
docs/README.md
Normal file
69
docs/README.md
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# Lighthouse Documentation
|
||||||
|
|
||||||
|
_Lighthouse is a work-in-progress. Instructions are provided for running the
|
||||||
|
client, however these instructions are designed for developers and researchers
|
||||||
|
working on the project. We do not (yet) provide user-facing functionality._
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
- [Overview of Ethereum 2.0](serenity.md)
|
||||||
|
- [Development Environment Setup](env.md)
|
||||||
|
|
||||||
|
For client implementers looking to inter-op, see the [Inter-Op
|
||||||
|
Docs](interop.md).
|
||||||
|
|
||||||
|
## Command-line Interface
|
||||||
|
|
||||||
|
With the [development environment](env.md) configured, run `cargo build --all
|
||||||
|
--release` (this can take several minutes on the first build). Then,
|
||||||
|
navigate to the `target/release/` directory and read the CLI documentation
|
||||||
|
using:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ./beacon_node -h
|
||||||
|
```
|
||||||
|
|
||||||
|
The main [`README.md`](../README.md#simple-local-testnet) provides instructions
|
||||||
|
for running a small, local testnet.
|
||||||
|
|
||||||
|
## REST API
|
||||||
|
|
||||||
|
The beacon node provides a RESTful HTTP API which serves information about the
|
||||||
|
Beacon Chain, the P2P network and more.
|
||||||
|
|
||||||
|
This API is documented in the [`rest_oapi.yaml`](rest_oapi.yaml) Swagger YAML
|
||||||
|
file. There's an interactive version hosted on
|
||||||
|
[SwaggerHub](https://app.swaggerhub.com/apis/spble/lighthouse_rest_api/0.1.0).
|
||||||
|
|
||||||
|
The implementation of the Swagger API in Lighthouse is incomplete, we do not
|
||||||
|
(yet) guarantee that all routes are implemented.
|
||||||
|
|
||||||
|
## Configuration Files
|
||||||
|
|
||||||
|
Lighthouse uses [TOML](https://github.com/toml-lang/toml) files for
|
||||||
|
configuration. The following binaries use the following config files (they are
|
||||||
|
generated from defaults if they don't already exist):
|
||||||
|
|
||||||
|
- [Beacon Node](/beacon_node)
|
||||||
|
- [`~/.lighthouse/beacon_node.toml`](#beacon-nodetoml): the primary
|
||||||
|
configuration file for a beacon node.
|
||||||
|
- `~/.lighthouse/eth2-spec.toml`: defines chain-specific "constants" that
|
||||||
|
define an Ethereum 2.0 network.
|
||||||
|
- [Validator Client](/validator_client)
|
||||||
|
- `~/.lighthouse/validator_client.toml`: the primary configuration file for
|
||||||
|
a validator client.
|
||||||
|
- `~/.lighthouse/eth2-spec.toml`: defines chain-specific "constants" that
|
||||||
|
define an Ethereum 2.0 network.
|
||||||
|
|
||||||
|
_Note: default directories are shown, CLI flags can be used to override these
|
||||||
|
defaults._
|
||||||
|
|
||||||
|
#### `beacon-node.toml`
|
||||||
|
|
||||||
|
A TOML configuration file that defines the behaviour of the beacon node
|
||||||
|
runtime.
|
||||||
|
|
||||||
|
- Located in the `datadir` (default `~/.lighthouse`) as `beacon-node.toml`.
|
||||||
|
- Created from defaults if not present.
|
||||||
|
|
||||||
|
See the [example](config_examples/beacon-node.toml) for more information.
|
98
docs/config_examples/beacon-node.toml
Normal file
98
docs/config_examples/beacon-node.toml
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
#
|
||||||
|
# Beacon Node TOML configuration file.
|
||||||
|
#
|
||||||
|
# Defines the runtime configuration of a Lighthouse Beacon Node.
|
||||||
|
#
|
||||||
|
|
||||||
|
# The directory where beacon-node specific files will be placed. Includes the
|
||||||
|
# database and configuration files.
|
||||||
|
data_dir = ".lighthouse"
|
||||||
|
# The type of database used. Can be either:
|
||||||
|
#
|
||||||
|
# - "disk": LevelDB (almost always desired).
|
||||||
|
# - "memory": an in-memory hashmap (only used for testing).
|
||||||
|
db_type = "disk"
|
||||||
|
# The name of the LevelDB database directory, if any.
|
||||||
|
db_name = "chain_db"
|
||||||
|
# If specified, all logs will be written to this file.
|
||||||
|
log_file = ""
|
||||||
|
# Defines the Ethereum 2.0 specification set to be used:
|
||||||
|
#
|
||||||
|
# - "mainnet": parameters expected to be used for Eth2 mainnet.
|
||||||
|
# - "minimal": smaller, more efficient parameters used for testing.
|
||||||
|
spec_constants = "minimal"
|
||||||
|
|
||||||
|
#
|
||||||
|
# The "genesis_state" object defines how the genesis state should be created.
|
||||||
|
#
|
||||||
|
|
||||||
|
# The "RecentGenesis" type assumes that genesis started at the beginning of the
|
||||||
|
# most-recent 30 minute window (e.g., 08:00, 08:30, 09:00, ...).
|
||||||
|
[genesis_state]
|
||||||
|
type = "RecentGenesis"
|
||||||
|
validator_count = 16
|
||||||
|
|
||||||
|
# "Generated" is the same as "RecentGenesis", however allows for manual
|
||||||
|
# specification of the genesis_time.
|
||||||
|
#
|
||||||
|
# [genesis_state]
|
||||||
|
# type = "Generated"
|
||||||
|
# validator_count = 16
|
||||||
|
# genesis_time = 1564620118
|
||||||
|
|
||||||
|
# "Yaml" loads a full genesis state from YAML file.
|
||||||
|
#
|
||||||
|
# [genesis_state]
|
||||||
|
# type = "Yaml"
|
||||||
|
# file = "~/genesis_state.yaml"
|
||||||
|
|
||||||
|
#
|
||||||
|
# P2P networking configuration.
|
||||||
|
#
|
||||||
|
[network]
|
||||||
|
# The directory for storing p2p network related files. E.g., p2p keys, peer
|
||||||
|
# lists, etc.
|
||||||
|
network_dir = "/home/paul/.lighthouse/network"
|
||||||
|
# The address that libp2p should use for incoming connections.
|
||||||
|
listen_address = "127.0.0.1"
|
||||||
|
# The port that libp2p should use for incoming connections.
|
||||||
|
libp2p_port = 9000
|
||||||
|
# The address that should listen for UDP peer-discovery.
|
||||||
|
discovery_address = "127.0.0.1"
|
||||||
|
# The port that should listen for UDP peer-discovery.
|
||||||
|
discovery_port = 9000
|
||||||
|
# Maximum number of libp2p peers.
|
||||||
|
max_peers = 10
|
||||||
|
# Boot nodes for initial peer discovery.
|
||||||
|
boot_nodes = []
|
||||||
|
# The client version, may be customized.
|
||||||
|
client_version = "Lighthouse/v0.1.0-unstable/x86_64-linux"
|
||||||
|
# A list of libp2p topics. Purpose unknown.
|
||||||
|
topics = []
|
||||||
|
|
||||||
|
#
|
||||||
|
# gRPC configuration. To be removed.
|
||||||
|
#
|
||||||
|
[rpc]
|
||||||
|
enabled = false
|
||||||
|
listen_address = "127.0.0.1"
|
||||||
|
port = 5051
|
||||||
|
|
||||||
|
#
|
||||||
|
# Legacy HTTP server configuration. To be removed.
|
||||||
|
#
|
||||||
|
[http]
|
||||||
|
enabled = false
|
||||||
|
listen_address = "127.0.0.1"
|
||||||
|
listen_port = "5052"
|
||||||
|
|
||||||
|
#
|
||||||
|
# RESTful HTTP API server configuration.
|
||||||
|
#
|
||||||
|
[rest_api]
|
||||||
|
# Set to `true` to enable the gRPC server.
|
||||||
|
enabled = true
|
||||||
|
# The listen port for the HTTP server.
|
||||||
|
listen_address = "127.0.0.1"
|
||||||
|
# The listen port for the HTTP server.
|
||||||
|
port = 1248
|
52
docs/env.md
Normal file
52
docs/env.md
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Development Environment Setup
|
||||||
|
|
||||||
|
_This document describes how to setup a development environment. It is intended
|
||||||
|
for software developers and researchers who wish to contribute to development._
|
||||||
|
|
||||||
|
Lighthouse is a Rust project and [`cargo`](https://doc.rust-lang.org/cargo/) is
|
||||||
|
used extensively. As such, you'll need to install Rust in order to build the
|
||||||
|
project. Generally, Rust is installed using the
|
||||||
|
[rustup](https://www.rust-lang.org/tools/install) tool-chain manager.
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
A fully-featured development environment can be achieved with the following
|
||||||
|
steps:
|
||||||
|
|
||||||
|
1. Install [rustup](https://rustup.rs/).
|
||||||
|
1. Use the command `rustup show` to get information about the Rust
|
||||||
|
installation. You should see that the active tool-chain is the stable
|
||||||
|
version.
|
||||||
|
- Updates can be performed using` rustup update`, Lighthouse generally
|
||||||
|
requires a recent version of Rust.
|
||||||
|
1. Install build dependencies (Arch packages are listed here, your
|
||||||
|
distribution will likely be similar):
|
||||||
|
- `clang`: required by RocksDB.
|
||||||
|
- `protobuf`: required for protobuf serialization (gRPC).
|
||||||
|
- `cmake`: required for building protobuf
|
||||||
|
- `git-lfs`: The Git extension for [Large File
|
||||||
|
Support](https://git-lfs.github.com/) (required for Ethereum Foundation
|
||||||
|
test vectors).
|
||||||
|
1. Clone the repository with submodules: `git clone --recursive
|
||||||
|
https://github.com/sigp/lighthouse`. If you're already cloned the repo,
|
||||||
|
ensure testing submodules are present: `$ git submodule init; git
|
||||||
|
submodule update`
|
||||||
|
1. Change directory to the root of the repository.
|
||||||
|
1. Run the test suite with `cargo test --all --release`. The build and test
|
||||||
|
process can take several minutes. If you experience any failures on
|
||||||
|
`master`, please raise an
|
||||||
|
[issue](https://github.com/sigp/lighthouse/issues).
|
||||||
|
|
||||||
|
## Notes:
|
||||||
|
|
||||||
|
Lighthouse targets Rust `stable` but generally runs on `nightly` too.
|
||||||
|
|
||||||
|
### Note for Windows users:
|
||||||
|
|
||||||
|
Perl may also be required to build lighthouse. You can install [Strawberry
|
||||||
|
Perl](http://strawberryperl.com/), or alternatively use a choco install command
|
||||||
|
`choco install strawberryperl`.
|
||||||
|
|
||||||
|
Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues
|
||||||
|
compiling in Windows. You can specify a known working version by editing
|
||||||
|
version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`.
|
@ -1,40 +0,0 @@
|
|||||||
# Development Environment Setup
|
|
||||||
|
|
||||||
A few basic steps are needed to get set up (skip to #5 if you already have Rust
|
|
||||||
installed):
|
|
||||||
|
|
||||||
1. Install [rustup](https://rustup.rs/). It's a toolchain manager for Rust (Linux | macOS | Windows). For installation, download the script with `$ curl -f https://sh.rustup.rs > rustup.sh`, review its content (e.g. `$ less ./rustup.sh`) and run the script `$ ./rustup.sh` (you may need to change the permissions to allow execution, i.e. `$ chmod +x rustup.sh`)
|
|
||||||
2. (Linux & MacOS) To configure your current shell run: `$ source $HOME/.cargo/env`
|
|
||||||
3. Use the command `rustup show` to get information about the Rust installation. You should see that the
|
|
||||||
active toolchain is the stable version.
|
|
||||||
4. Run `rustc --version` to check the installation and version of rust.
|
|
||||||
- Updates can be performed using` rustup update` .
|
|
||||||
5. Install build dependencies (Arch packages are listed here, your distribution will likely be similar):
|
|
||||||
- `clang`: required by RocksDB.
|
|
||||||
- `protobuf`: required for protobuf serialization (gRPC).
|
|
||||||
- `cmake`: required for building protobuf
|
|
||||||
- `git-lfs`: The Git extension for [Large File Support](https://git-lfs.github.com/) (required for EF tests submodule).
|
|
||||||
6. If you haven't already, clone the repository with submodules: `git clone --recursive https://github.com/sigp/lighthouse`.
|
|
||||||
Alternatively, run `git submodule init` in a repository which was cloned without submodules.
|
|
||||||
7. Change directory to the root of the repository.
|
|
||||||
8. Run the test by using command `cargo test --all --release`. By running, it will pass all the required test cases.
|
|
||||||
If you are doing it for the first time, then you can grab a coffee in the meantime. Usually, it takes time
|
|
||||||
to build, compile and pass all test cases. If there is no error then it means everything is working properly
|
|
||||||
and it's time to get your hands dirty.
|
|
||||||
In case, if there is an error, then please raise the [issue](https://github.com/sigp/lighthouse/issues).
|
|
||||||
We will help you.
|
|
||||||
9. As an alternative to, or instead of the above step, you may also run benchmarks by using
|
|
||||||
the command `cargo bench --all`
|
|
||||||
|
|
||||||
## Notes:
|
|
||||||
|
|
||||||
Lighthouse targets Rust `stable` but _should_ run on `nightly`.
|
|
||||||
|
|
||||||
### Note for Windows users:
|
|
||||||
|
|
||||||
Perl may also be required to build lighthouse. You can install [Strawberry Perl](http://strawberryperl.com/),
|
|
||||||
or alternatively use a choco install command `choco install strawberryperl`.
|
|
||||||
|
|
||||||
Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues compiling in Windows. You can specify
|
|
||||||
a known working version by editing version in protos/Cargo.toml's "build-dependencies" section to
|
|
||||||
`protoc-grpcio = "<=0.3.0"`.
|
|
109
docs/interop.md
Normal file
109
docs/interop.md
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
# Lighthouse Inter-Op Docs
|
||||||
|
|
||||||
|
_These documents are intended for a highly technical audience, specifically
|
||||||
|
Ethereum 2.0 implementers._
|
||||||
|
|
||||||
|
This document provides details on how to use Lighthouse for inter-op testing.
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
_Note: binaries are compiled into the `target/release` directory of the
|
||||||
|
repository. In this example, we run binaries assuming the user is in this
|
||||||
|
directory. E.g., running the beacon node binary can be achieved with
|
||||||
|
`$ ./target/release/beacon_node`. Those familiar with `cargo` may use the
|
||||||
|
equivalent (and more-convenient) `cargo run --release --` commands._
|
||||||
|
|
||||||
|
1. Setup a Lighthouse [development environment](env.md).
|
||||||
|
1. Build all the binaries using `cargo build --all --release`
|
||||||
|
1. Create default configuration files by running `$ ./beacon_node` and pressing
|
||||||
|
Ctrl+C after the node has started.
|
||||||
|
1. Follow the steps in [Genesis](#genesis) to configure the genesis state.
|
||||||
|
1. Follow the steps in [Networking](#networking) to launch a node with
|
||||||
|
appropriate networking parameters.
|
||||||
|
|
||||||
|
## Genesis
|
||||||
|
|
||||||
|
Lighthouse supports the following methods for generating a genesis state:
|
||||||
|
|
||||||
|
- [`Yaml`](#yaml): loads the genesis state from some YAML file (recommended
|
||||||
|
method).
|
||||||
|
- [`Generated`](#generated): generates a state given a `(validator_count,
|
||||||
|
genesis_time)`
|
||||||
|
tuple. _Note: this method is not yet fully specified and the state
|
||||||
|
generated is almost certainly not identical to other implementations._
|
||||||
|
- [`RecentGenesis`](#recentgenesis): identical to `Generated`, however the
|
||||||
|
`genesis_time` is set
|
||||||
|
to the previous 30-minute window. For example, if a state is generated at
|
||||||
|
`0845`, the genesis time will be `0830`.
|
||||||
|
|
||||||
|
You may configure a `beacon_node` to use one of these methods using the
|
||||||
|
[`beacon_node.toml`](README.md#beacon-nodetoml). There is a [documented
|
||||||
|
example](config_examples/) configuration file which includes an example for
|
||||||
|
each of these methods (see the `genesis_state` object).
|
||||||
|
|
||||||
|
### Yaml
|
||||||
|
|
||||||
|
This method involves loading a `BeaconState` from a YAML file. We provide
|
||||||
|
instructions for generating that YAML file and starting from it. If starting
|
||||||
|
from a pre-existing YAML file, simply skip the generation steps.
|
||||||
|
|
||||||
|
#### Generating a YAML file
|
||||||
|
|
||||||
|
The [cli_util](/tests/cli_util) generate YAML genesis state files. You can run
|
||||||
|
`$ ./cli_util genesis_yaml -h` to see documentation. We provide an example to
|
||||||
|
generate a YAML file with the following properties:
|
||||||
|
|
||||||
|
- 10 initial validators, each with [deterministic
|
||||||
|
keypairs](https://github.com/ethereum/eth2.0-pm/issues/60#issuecomment-512157915).
|
||||||
|
- The genesis file is stored in `~/.lighthouse/`, the default data directory
|
||||||
|
(an absolute path must be supplied).
|
||||||
|
- Genesis time is set to the time when the command is run (it can be customized
|
||||||
|
with the `-g` flag).
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ./cli_util genesis_yaml -n 10 -f /home/user/.lighthouse/genesis_state.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Configuring the Beacon Node
|
||||||
|
|
||||||
|
Modify the [`beacon-node.toml`](README.md#beacon-nodetoml) file to have the
|
||||||
|
following `genesiss_state` object (choosing the `file`):
|
||||||
|
|
||||||
|
```
|
||||||
|
[genesis_state]
|
||||||
|
type = "Yaml"
|
||||||
|
file = "/home/user/.lighthouse/genesis_state.yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Generated
|
||||||
|
|
||||||
|
Modify the [`beacon-node.toml`](README.md#beacon-nodetoml) file to have the
|
||||||
|
following `genesis_state` object (choosing the `validator_count` and
|
||||||
|
`genesis_time`):
|
||||||
|
|
||||||
|
```
|
||||||
|
[genesis_state]
|
||||||
|
type = "Generated"
|
||||||
|
validator_count = 16
|
||||||
|
genesis_time = 1564620118
|
||||||
|
```
|
||||||
|
|
||||||
|
### RecentGenesis
|
||||||
|
|
||||||
|
Modify the [`beacon-node.toml`](README.md#beacon-nodetoml) file to have the
|
||||||
|
following `genesis_state` object (choosing the `validator_count`):
|
||||||
|
|
||||||
|
```
|
||||||
|
[genesis_state]
|
||||||
|
type = "RecentGenesis"
|
||||||
|
validator_count = 16
|
||||||
|
```
|
||||||
|
|
||||||
|
## Networking
|
||||||
|
|
||||||
|
_TODO: provide details on config required to connect to some IP address._
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
The BLS key generation method used should be identical to [this
|
||||||
|
implementation](https://github.com/ethereum/eth2.0-pm/issues/60#issuecomment-512157915).
|
@ -1,83 +0,0 @@
|
|||||||
# About Lighthouse
|
|
||||||
|
|
||||||
## Goals
|
|
||||||
|
|
||||||
The purpose of this project is to work alongside the Ethereum community to
|
|
||||||
implement a secure, trustworthy, open-source Ethereum Serenity client in Rust.
|
|
||||||
|
|
||||||
* **Security**: Lighthouse's main goal is to implement everything with a
|
|
||||||
security-first mindset. The goal is to ensure that all components of lighthouse
|
|
||||||
are thoroughly tested, checked and secure.
|
|
||||||
|
|
||||||
* **Trust** : As Ethereum Serenity is a Proof-of-Stake system, which
|
|
||||||
involves the interaction of the Ethereum protocol and user funds. Thus, a goal
|
|
||||||
of Lighthouse is to provide a client that is trustworthy.
|
|
||||||
|
|
||||||
All code can be tested and verified the goal of Lighthouse is to provide code
|
|
||||||
that is trusted.
|
|
||||||
|
|
||||||
* **Transparency**: Lighthouse aims at being as transparent as possible. This
|
|
||||||
goal is for Lighthouse to embrace the open-source community and allow for all
|
|
||||||
to understand the decisions, direction and changes in all aspects.
|
|
||||||
|
|
||||||
* **Error Resilience**: As Lighthouse embraces the "never `panic`" mindset, the
|
|
||||||
goal is to be resilient to errors that may occur. Providing a client that has
|
|
||||||
tolerance against errors provides further properties for a secure, trustworthy
|
|
||||||
client that Lighthouse aims to provide.
|
|
||||||
|
|
||||||
In addition to implementing a new client, the project seeks to maintain and
|
|
||||||
improve the Ethereum protocol wherever possible.
|
|
||||||
|
|
||||||
## Ideology
|
|
||||||
|
|
||||||
### Never Panic
|
|
||||||
|
|
||||||
Lighthouse will be the gateway interacting with the Proof-of-Stake system
|
|
||||||
employed by Ethereum. This requires the validation and proposal of blocks
|
|
||||||
and extremely timely responses. As part of this, Lighthouse aims to ensure
|
|
||||||
the most uptime as possible, meaning minimising the amount of
|
|
||||||
exceptions and gracefully handling any issues.
|
|
||||||
|
|
||||||
Rust's `panic` provides the ability to throw an exception and exit, this
|
|
||||||
will terminate the running processes. Thus, Lighthouse aims to use `panic`
|
|
||||||
as little as possible to minimise the possible termination cases.
|
|
||||||
|
|
||||||
### Security First Mindset
|
|
||||||
|
|
||||||
Lighthouse aims to provide a safe, secure Serenity client for the Ethereum
|
|
||||||
ecosystem. At each step of development, the aim is to have a security-first
|
|
||||||
mindset and always ensure you are following the safe, secure mindset. When
|
|
||||||
contributing to any part of the Lighthouse client, through any development,
|
|
||||||
always ensure you understand each aspect thoroughly and cover all potential
|
|
||||||
security considerations of your code.
|
|
||||||
|
|
||||||
### Functions aren't completed until they are tested
|
|
||||||
|
|
||||||
As part of the Security First mindset, we want to aim to cover as many distinct
|
|
||||||
cases. A function being developed is not considered "completed" until tests
|
|
||||||
exist for that function. The tests not only help show the correctness of the
|
|
||||||
function, but also provide a way for new developers to understand how the
|
|
||||||
function is to be called and how it works.
|
|
||||||
|
|
||||||
|
|
||||||
## Engineering Ethos
|
|
||||||
|
|
||||||
Lighthouse aims to produce many small easily-tested components, each separated
|
|
||||||
into individual crates wherever possible.
|
|
||||||
|
|
||||||
Generally, tests can be kept in the same file, as is typical in Rust.
|
|
||||||
Integration tests should be placed in the `tests` directory in the crate's
|
|
||||||
root. Particularly large (line-count) tests should be placed into a separate
|
|
||||||
file.
|
|
||||||
|
|
||||||
A function is not considered complete until a test exists for it. We produce
|
|
||||||
tests to protect against regression (accidentally breaking things) and to
|
|
||||||
provide examples that help readers of the code base understand how functions
|
|
||||||
should (or should not) be used.
|
|
||||||
|
|
||||||
Each pull request is to be reviewed by at least one "core developer" (i.e.,
|
|
||||||
someone with write-access to the repository). This helps to ensure bugs are
|
|
||||||
detected, consistency is maintained, and responsibility of errors is dispersed.
|
|
||||||
|
|
||||||
Discussion must be respectful and intellectual. Have fun and make jokes, but
|
|
||||||
always respect the limits of other people.
|
|
@ -1,233 +0,0 @@
|
|||||||
# Contributing to Lighthouse
|
|
||||||
|
|
||||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sigp/lighthouse?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
|
||||||
|
|
||||||
Lighthouse is an open-source Ethereum Serenity client built in
|
|
||||||
[Rust](https://www.rust-lang.org/).
|
|
||||||
|
|
||||||
Lighthouse welcomes all contributions with open arms. If you are interested in
|
|
||||||
contributing to the Ethereum ecosystem, and you want to learn Rust, Lighthouse
|
|
||||||
is a great project to work on.
|
|
||||||
|
|
||||||
This documentation aims to provide a smooth on-boarding for all who wish to
|
|
||||||
help contribute to Lighthouse. Whether it is helping with the mountain of
|
|
||||||
documentation, writing extra tests or developing components, all help is
|
|
||||||
appreciated and your contributions will help not only the community but all
|
|
||||||
the contributors.
|
|
||||||
|
|
||||||
We've bundled up our Goals, Ethos and Ideology into one document for you to
|
|
||||||
read through, please read our [About Lighthouse](lighthouse.md) docs. :smile:
|
|
||||||
|
|
||||||
Layer-1 infrastructure is a critical component for the ecosystem and relies
|
|
||||||
heavily on contributions from the community. Building Ethereum Serenity is a
|
|
||||||
huge task and we refuse to conduct an inappropriate ICO or charge licensing
|
|
||||||
fees. Instead, we fund development through grants and support from Sigma
|
|
||||||
Prime.
|
|
||||||
|
|
||||||
If you have any additional questions, please feel free to jump on the
|
|
||||||
[gitter](https://gitter.im/sigp/lighthouse) and have a chat with all of us.
|
|
||||||
|
|
||||||
**Pre-reading Materials:**
|
|
||||||
|
|
||||||
* [About Lighthouse](lighthouse.md)
|
|
||||||
* [Ethereum Serenity](serenity.md)
|
|
||||||
|
|
||||||
**Repository**
|
|
||||||
|
|
||||||
If you'd like to contribute, try having a look through the [open
|
|
||||||
issues](https://github.com/sigp/lighthouse/issues) (tip: look for the [good
|
|
||||||
first
|
|
||||||
issue](https://github.com/sigp/lighthouse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
|
|
||||||
tag) and ping us on the [gitter](https://gitter.im/sigp/lighthouse) channel. We need
|
|
||||||
your support!
|
|
||||||
|
|
||||||
## Understanding Serenity
|
|
||||||
|
|
||||||
Ethereum's Serenity is based on a Proof-of-Stake based sharded beacon chain.
|
|
||||||
|
|
||||||
(*If you don't know what that is, don't `panic`, that's what this documentation
|
|
||||||
is for!* :smile:)
|
|
||||||
|
|
||||||
Read through our [Understanding
|
|
||||||
Serenity](https://github.com/sigp/lighthouse/blob/master/docs/serenity.md) docs
|
|
||||||
to learn more! :smile: (*unless you've already read it.*)
|
|
||||||
|
|
||||||
The document explains the necessary fundamentals for understanding Ethereum,
|
|
||||||
Proof-of-Stake and the Serenity we are working towards.
|
|
||||||
|
|
||||||
## Development Onboarding
|
|
||||||
|
|
||||||
If you would like to contribute and develop Lighthouse, there are only a few
|
|
||||||
things to go through (and then you're on your way!).
|
|
||||||
|
|
||||||
### Understanding Rust
|
|
||||||
|
|
||||||
Rust is an extremely powerful, low-level programming language that provides
|
|
||||||
freedom and performance to create powerful projects. The [Rust
|
|
||||||
Book](https://doc.rust-lang.org/stable/book/) provides insight into the Rust
|
|
||||||
language and some of the coding style to follow (As well as acting as a great
|
|
||||||
introduction and tutorial for the language.)
|
|
||||||
|
|
||||||
Rust has a steep learning curve, but there are many resources to help you!
|
|
||||||
|
|
||||||
* [Rust Book](https://doc.rust-lang.org/stable/book/)
|
|
||||||
* [Rust by example](https://doc.rust-lang.org/stable/rust-by-example/)
|
|
||||||
* [Learning Rust With Entirely Too Many Linked Lists](http://cglab.ca/~abeinges/blah/too-many-lists/book/)
|
|
||||||
* [Rustlings](https://github.com/rustlings/rustlings)
|
|
||||||
* [Rust Exercism](https://exercism.io/tracks/rust)
|
|
||||||
* [Learn X in Y minutes - Rust](https://learnxinyminutes.com/docs/rust/)
|
|
||||||
|
|
||||||
|
|
||||||
#### Getting Started and installing Rust
|
|
||||||
|
|
||||||
We recommend installing Rust using [**rustup**](https://rustup.rs/). Rustup
|
|
||||||
allows you to easily install versions of rust.
|
|
||||||
|
|
||||||
**Linux/Unix/Mac:**
|
|
||||||
|
|
||||||
```
|
|
||||||
$ curl https://sh.rustup.rs -sSf | sh
|
|
||||||
```
|
|
||||||
|
|
||||||
**Windows (You need a bit more):**
|
|
||||||
* Install the Visual Studio 2015 with C++ support
|
|
||||||
* Install Rustup using: https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe
|
|
||||||
* You can then use the ``VS2015 x64 Native Tools Command Prompt`` and run:
|
|
||||||
|
|
||||||
```
|
|
||||||
rustup default stable-x86-64-pc-windows-msvc
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Getting ready with Cargo
|
|
||||||
|
|
||||||
[Cargo](https://doc.rust-lang.org/cargo/) is the package manager for Rust, and
|
|
||||||
allows to extend to a number of packages and external libraries. It's also extremely
|
|
||||||
handy for handling dependencies and helping to modularise your project better.
|
|
||||||
|
|
||||||
*Note: If you've installed rust through rustup, you should have ``cargo``
|
|
||||||
installed.*
|
|
||||||
|
|
||||||
#### Rust Terminology
|
|
||||||
|
|
||||||
When developing rust, you'll come across some terminology that differs to
|
|
||||||
other programming languages you may have used.
|
|
||||||
|
|
||||||
* **Trait**: A trait is a collection of methods defined for a type, they can be
|
|
||||||
implemented for any data type.
|
|
||||||
* **Struct**: A custom data type that lets us name and package together
|
|
||||||
multiple related values that make a meaninguful group.
|
|
||||||
* **Crate**: A crate is synonymous with a *library* or *package* in other
|
|
||||||
languages. They can produce an executable or library depending on the
|
|
||||||
project.
|
|
||||||
* **Module**: A collection of items: functions, structs, traits, and even other
|
|
||||||
modules. Modules allow you to hierarchically split code into logical units
|
|
||||||
and manage visibility.
|
|
||||||
* **Attribute**: Metadata applied to some module, crate or item.
|
|
||||||
* **Macros**: Macros are powerful meta-programming statements that get expanded
|
|
||||||
into source code that gets compiled with the rest of the code (Unlike `C`
|
|
||||||
macros that are pre-processed, Rust macros form an Abstract Syntax Tree).
|
|
||||||
|
|
||||||
|
|
||||||
Other good appendix resources:
|
|
||||||
|
|
||||||
* [Keywords](https://doc.rust-lang.org/book/appendix-01-keywords.html)
|
|
||||||
* [Operators/Symbols](https://doc.rust-lang.org/book/appendix-02-operators.html)
|
|
||||||
* [Traits](https://doc.rust-lang.org/book/appendix-03-derivable-traits.html)
|
|
||||||
|
|
||||||
|
|
||||||
### Understanding the Git Workflow
|
|
||||||
|
|
||||||
Lighthouse utilises git as the primary open-source development tool. To help
|
|
||||||
with your contributions, it is great to understand the processes used to ensure
|
|
||||||
everything remains in sync and there's as little conflict as possible when
|
|
||||||
working on similar files.
|
|
||||||
|
|
||||||
Lighthouse uses the **feature branch** workflow, where each issue, or each
|
|
||||||
feature, is developed on its own branch and then merged in via a pull-request.
|
|
||||||
|
|
||||||
* [Feature Branch Tutorial](https://www.atlassian.com/git/tutorials/comparing-workflows/feature-branch-workflow)
|
|
||||||
|
|
||||||
## Code Conventions/Styleguide and Ethos
|
|
||||||
|
|
||||||
### Ethos
|
|
||||||
|
|
||||||
**Pull Requests**
|
|
||||||
|
|
||||||
Pull requests should be reviewed by **at least** one "*core developer*"
|
|
||||||
(someone with write-access to the repo). This should ensure bugs are caught and
|
|
||||||
the code is kept in a consistent state that follows all conventions and style.
|
|
||||||
|
|
||||||
All discussion (whether in PRs or Issues or in the Gitter) should be respectful
|
|
||||||
and intellectual. Have fun, but always respect the limits of other people.
|
|
||||||
|
|
||||||
**Testing**
|
|
||||||
|
|
||||||
*"A function is not considered complete until tests exist for it."*
|
|
||||||
|
|
||||||
Generally, tests can be self-contained in the same file. Integration tests
|
|
||||||
should be added into the ``tests/`` directory in the crate's **root**.
|
|
||||||
|
|
||||||
Large line-count tests should be in a separate file.
|
|
||||||
|
|
||||||
### Rust StyleGuide
|
|
||||||
|
|
||||||
Lighthouse adheres to Rust code conventions as outlined in the [**Rust
|
|
||||||
Styleguide**](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/guide.md).
|
|
||||||
|
|
||||||
Ensure you use [Clippy](https://github.com/rust-lang/rust-clippy) to lint and
|
|
||||||
check your code.
|
|
||||||
|
|
||||||
| Code Aspect | Guideline Format |
|
|
||||||
|:--------------------|:-------------------------------|
|
|
||||||
| Types | ``UpperCamelCase`` |
|
|
||||||
| Enums/Enum Variants | ``UpperCamelCase`` |
|
|
||||||
| Struct Fields | ``snake_case`` |
|
|
||||||
| Function / Method | ``snake_case`` |
|
|
||||||
| Macro Names | ``snake_case`` |
|
|
||||||
| Constants | ``SCREAMING_SNAKE_CASE`` |
|
|
||||||
| Forbidden name | Trailing Underscore: ``name_`` |
|
|
||||||
|
|
||||||
Other general rust docs:
|
|
||||||
|
|
||||||
* [Rust Other Style Advice](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/advice.md)
|
|
||||||
* [Cargo.toml Conventions](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/cargo.md)
|
|
||||||
|
|
||||||
### TODOs
|
|
||||||
|
|
||||||
All `TODO` statements should be accompanied by a GitHub issue.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
pub fn my_function(&mut self, _something &[u8]) -> Result<String, Error> {
|
|
||||||
// TODO: something_here
|
|
||||||
// https://github.com/sigp/lighthouse/issues/XX
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Comments
|
|
||||||
|
|
||||||
**General Comments**
|
|
||||||
|
|
||||||
* Prefer line (``//``) comments to block comments (``/* ... */``)
|
|
||||||
* Comments can appear on the line prior to the item or after a trailing space.
|
|
||||||
```rust
|
|
||||||
// Comment for this struct
|
|
||||||
struct Lighthouse {}
|
|
||||||
|
|
||||||
fn make_blockchain() {} // A comment on the same line after a space
|
|
||||||
```
|
|
||||||
|
|
||||||
**Doc Comments**
|
|
||||||
|
|
||||||
* The ``///`` is used to generate comments for Docs.
|
|
||||||
* The comments should come before attributes.
|
|
||||||
|
|
||||||
```rust
|
|
||||||
/// Stores the core configuration for this Lighthouse instance.
|
|
||||||
/// This struct is general, other components may implement more
|
|
||||||
/// specialized config structs.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct LighthouseConfig {
|
|
||||||
pub data_dir: PathBuf,
|
|
||||||
pub p2p_listen_port: u16,
|
|
||||||
}
|
|
||||||
```
|
|
1379
docs/rest_oapi.yaml
Normal file
1379
docs/rest_oapi.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@ -14,8 +14,6 @@ Rust crates containing logic common across the Lighthouse project.
|
|||||||
`BeaconState`, etc).
|
`BeaconState`, etc).
|
||||||
- [`utils/`](utils/):
|
- [`utils/`](utils/):
|
||||||
- [`bls`](utils/bls/): A wrapper for an external BLS encryption library.
|
- [`bls`](utils/bls/): A wrapper for an external BLS encryption library.
|
||||||
- [`boolean-bitfield`](utils/boolean-bitfield/): Provides an expandable vector
|
|
||||||
of bools, specifically for use in Eth2.
|
|
||||||
- [`fisher-yates-shuffle`](utils/fisher-yates-shuffle/): shuffles a list
|
- [`fisher-yates-shuffle`](utils/fisher-yates-shuffle/): shuffles a list
|
||||||
pseudo-randomly.
|
pseudo-randomly.
|
||||||
- [`hashing`](utils/hashing/): A wrapper for external hashing libraries.
|
- [`hashing`](utils/hashing/): A wrapper for external hashing libraries.
|
||||||
|
@ -7,11 +7,7 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
store = { path = "../../beacon_node/store" }
|
store = { path = "../../beacon_node/store" }
|
||||||
eth2_ssz = { path = "../utils/ssz" }
|
|
||||||
state_processing = { path = "../state_processing" }
|
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
log = "0.4.6"
|
|
||||||
bit-vec = "0.5.0"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.2"
|
criterion = "0.2"
|
||||||
@ -21,3 +17,5 @@ bls = { path = "../utils/bls" }
|
|||||||
slot_clock = { path = "../utils/slot_clock" }
|
slot_clock = { path = "../utils/slot_clock" }
|
||||||
beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
||||||
env_logger = "0.6.0"
|
env_logger = "0.6.0"
|
||||||
|
lazy_static = "1.3.0"
|
||||||
|
rand = "0.7"
|
||||||
|
@ -10,7 +10,7 @@ pub type Result<T> = std::result::Result<T, String>;
|
|||||||
|
|
||||||
pub trait LmdGhost<S: Store, E: EthSpec>: Send + Sync {
|
pub trait LmdGhost<S: Store, E: EthSpec>: Send + Sync {
|
||||||
/// Create a new instance, with the given `store` and `finalized_root`.
|
/// Create a new instance, with the given `store` and `finalized_root`.
|
||||||
fn new(store: Arc<S>, finalized_block: &BeaconBlock, finalized_root: Hash256) -> Self;
|
fn new(store: Arc<S>, finalized_block: &BeaconBlock<E>, finalized_root: Hash256) -> Self;
|
||||||
|
|
||||||
/// Process an attestation message from some validator that attests to some `block_hash`
|
/// Process an attestation message from some validator that attests to some `block_hash`
|
||||||
/// representing a block at some `block_slot`.
|
/// representing a block at some `block_slot`.
|
||||||
@ -22,7 +22,7 @@ pub trait LmdGhost<S: Store, E: EthSpec>: Send + Sync {
|
|||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
/// Process a block that was seen on the network.
|
/// Process a block that was seen on the network.
|
||||||
fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> Result<()>;
|
fn process_block(&self, block: &BeaconBlock<E>, block_hash: Hash256) -> Result<()>;
|
||||||
|
|
||||||
/// Returns the head of the chain, starting the search at `start_block_root` and moving upwards
|
/// Returns the head of the chain, starting the search at `start_block_root` and moving upwards
|
||||||
/// (in block height).
|
/// (in block height).
|
||||||
@ -40,7 +40,7 @@ pub trait LmdGhost<S: Store, E: EthSpec>: Send + Sync {
|
|||||||
/// `finalized_block_root` must be the root of `finalized_block`.
|
/// `finalized_block_root` must be the root of `finalized_block`.
|
||||||
fn update_finalized_root(
|
fn update_finalized_root(
|
||||||
&self,
|
&self,
|
||||||
finalized_block: &BeaconBlock,
|
finalized_block: &BeaconBlock<E>,
|
||||||
finalized_block_root: Hash256,
|
finalized_block_root: Hash256,
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
|
@ -1,14 +1,15 @@
|
|||||||
//! An implementation of "reduced tree" LMD GHOST fork choice.
|
//! An implementation of "reduced tree" LMD GHOST fork choice.
|
||||||
//!
|
//!
|
||||||
//! This algorithm was concieved at IC3 Cornell, 2019.
|
//! This algorithm was conceived at IC3 Cornell, 2019.
|
||||||
//!
|
//!
|
||||||
//! This implementation is incomplete and has known bugs. Do not use in production.
|
//! This implementation is incomplete and has known bugs. Do not use in production.
|
||||||
use super::{LmdGhost, Result as SuperResult};
|
use super::{LmdGhost, Result as SuperResult};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::fmt;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{iter::BestBlockRootsIterator, Error as StoreError, Store};
|
use store::{iter::BlockRootsIterator, Error as StoreError, Store};
|
||||||
use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot};
|
use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
type Result<T> = std::result::Result<T, Error>;
|
type Result<T> = std::result::Result<T, Error>;
|
||||||
@ -35,12 +36,29 @@ pub struct ThreadSafeReducedTree<T, E> {
|
|||||||
core: RwLock<ReducedTree<T, E>>,
|
core: RwLock<ReducedTree<T, E>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T, E> fmt::Debug for ThreadSafeReducedTree<T, E> {
|
||||||
|
/// `Debug` just defers to the implementation of `self.core`.
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
self.core.fmt(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, E> ThreadSafeReducedTree<T, E>
|
||||||
|
where
|
||||||
|
T: Store,
|
||||||
|
E: EthSpec,
|
||||||
|
{
|
||||||
|
pub fn verify_integrity(&self) -> std::result::Result<(), String> {
|
||||||
|
self.core.read().verify_integrity()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T, E> LmdGhost<T, E> for ThreadSafeReducedTree<T, E>
|
impl<T, E> LmdGhost<T, E> for ThreadSafeReducedTree<T, E>
|
||||||
where
|
where
|
||||||
T: Store,
|
T: Store,
|
||||||
E: EthSpec,
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
fn new(store: Arc<T>, genesis_block: &BeaconBlock, genesis_root: Hash256) -> Self {
|
fn new(store: Arc<T>, genesis_block: &BeaconBlock<E>, genesis_root: Hash256) -> Self {
|
||||||
ThreadSafeReducedTree {
|
ThreadSafeReducedTree {
|
||||||
core: RwLock::new(ReducedTree::new(store, genesis_block, genesis_root)),
|
core: RwLock::new(ReducedTree::new(store, genesis_block, genesis_root)),
|
||||||
}
|
}
|
||||||
@ -59,7 +77,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Process a block that was seen on the network.
|
/// Process a block that was seen on the network.
|
||||||
fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> {
|
fn process_block(&self, block: &BeaconBlock<E>, block_hash: Hash256) -> SuperResult<()> {
|
||||||
self.core
|
self.core
|
||||||
.write()
|
.write()
|
||||||
.add_weightless_node(block.slot, block_hash)
|
.add_weightless_node(block.slot, block_hash)
|
||||||
@ -81,7 +99,11 @@ where
|
|||||||
.map_err(|e| format!("find_head failed: {:?}", e))
|
.map_err(|e| format!("find_head failed: {:?}", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_finalized_root(&self, new_block: &BeaconBlock, new_root: Hash256) -> SuperResult<()> {
|
fn update_finalized_root(
|
||||||
|
&self,
|
||||||
|
new_block: &BeaconBlock<E>,
|
||||||
|
new_root: Hash256,
|
||||||
|
) -> SuperResult<()> {
|
||||||
self.core
|
self.core
|
||||||
.write()
|
.write()
|
||||||
.update_root(new_block.slot, new_root)
|
.update_root(new_block.slot, new_root)
|
||||||
@ -106,12 +128,18 @@ struct ReducedTree<T, E> {
|
|||||||
_phantom: PhantomData<E>,
|
_phantom: PhantomData<E>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T, E> fmt::Debug for ReducedTree<T, E> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
self.nodes.fmt(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T, E> ReducedTree<T, E>
|
impl<T, E> ReducedTree<T, E>
|
||||||
where
|
where
|
||||||
T: Store,
|
T: Store,
|
||||||
E: EthSpec,
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
pub fn new(store: Arc<T>, genesis_block: &BeaconBlock, genesis_root: Hash256) -> Self {
|
pub fn new(store: Arc<T>, genesis_block: &BeaconBlock<E>, genesis_root: Hash256) -> Self {
|
||||||
let mut nodes = HashMap::new();
|
let mut nodes = HashMap::new();
|
||||||
|
|
||||||
// Insert the genesis node.
|
// Insert the genesis node.
|
||||||
@ -132,6 +160,10 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the root node (the node without any parents) to the given `new_slot` and `new_root`.
|
||||||
|
///
|
||||||
|
/// The given `new_root` must be in the block tree (but not necessarily in the reduced tree).
|
||||||
|
/// Any nodes which are not a descendant of `new_root` will be removed from the store.
|
||||||
pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> {
|
pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> {
|
||||||
if !self.nodes.contains_key(&new_root) {
|
if !self.nodes.contains_key(&new_root) {
|
||||||
let node = Node {
|
let node = Node {
|
||||||
@ -153,7 +185,7 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes `current_hash` and all decendants, except `subtree_hash` and all nodes
|
/// Removes `current_hash` and all descendants, except `subtree_hash` and all nodes
|
||||||
/// which have `subtree_hash` as an ancestor.
|
/// which have `subtree_hash` as an ancestor.
|
||||||
///
|
///
|
||||||
/// In effect, prunes the tree so that only decendants of `subtree_hash` exist.
|
/// In effect, prunes the tree so that only decendants of `subtree_hash` exist.
|
||||||
@ -289,55 +321,54 @@ where
|
|||||||
Ok(weight)
|
Ok(weight)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Removes the vote from `validator_index` from the reduced tree.
|
||||||
|
///
|
||||||
|
/// If the validator had a vote in the tree, the removal of that vote may cause a node to
|
||||||
|
/// become redundant and removed from the reduced tree.
|
||||||
fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> {
|
fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> {
|
||||||
if self.latest_votes.get(validator_index).is_some() {
|
if let Some(vote) = *self.latest_votes.get(validator_index) {
|
||||||
// Unwrap is safe as prior `if` statements ensures the result is `Some`.
|
self.get_mut_node(vote.hash)?.remove_voter(validator_index);
|
||||||
let vote = self.latest_votes.get(validator_index).unwrap();
|
let node = self.get_node(vote.hash)?.clone();
|
||||||
|
|
||||||
let should_delete = {
|
if let Some(parent_hash) = node.parent_hash {
|
||||||
self.get_mut_node(vote.hash)?.remove_voter(validator_index);
|
if node.has_votes() || node.children.len() > 1 {
|
||||||
let node = self.get_node(vote.hash)?.clone();
|
// A node with votes or more than one child is never removed.
|
||||||
|
} else if node.children.len() == 1 {
|
||||||
|
// A node which has only one child may be removed.
|
||||||
|
//
|
||||||
|
// Load the child of the node and set it's parent to be the parent of this
|
||||||
|
// node (viz., graft the node's child to the node's parent)
|
||||||
|
let child = self.get_mut_node(node.children[0])?;
|
||||||
|
child.parent_hash = node.parent_hash;
|
||||||
|
|
||||||
if let Some(parent_hash) = node.parent_hash {
|
// Graft the parent of this node to it's child.
|
||||||
if node.has_votes() || node.children.len() > 1 {
|
if let Some(parent_hash) = node.parent_hash {
|
||||||
// A node with votes or more than one child is never removed.
|
let parent = self.get_mut_node(parent_hash)?;
|
||||||
false
|
parent.replace_child(node.block_hash, node.children[0])?;
|
||||||
} else if node.children.len() == 1 {
|
|
||||||
// A node which has only one child may be removed.
|
|
||||||
//
|
|
||||||
// Load the child of the node and set it's parent to be the parent of this
|
|
||||||
// node (viz., graft the node's child to the node's parent)
|
|
||||||
let child = self.get_mut_node(node.children[0])?;
|
|
||||||
child.parent_hash = node.parent_hash;
|
|
||||||
|
|
||||||
// Graft the parent of this node to it's child.
|
|
||||||
if let Some(parent_hash) = node.parent_hash {
|
|
||||||
let parent = self.get_mut_node(parent_hash)?;
|
|
||||||
parent.replace_child(node.block_hash, node.children[0])?;
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
} else if node.children.is_empty() {
|
|
||||||
// A node which has no children may be deleted and potentially it's parent
|
|
||||||
// too.
|
|
||||||
self.maybe_delete_node(parent_hash)?;
|
|
||||||
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
// It is impossible for a node to have a number of children that is not 0, 1 or
|
|
||||||
// greater than one.
|
|
||||||
//
|
|
||||||
// This code is strictly unnecessary, however we keep it for readability.
|
|
||||||
unreachable!();
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// A node without a parent is the genesis/finalized node and should never be removed.
|
|
||||||
false
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if should_delete {
|
self.nodes.remove(&vote.hash);
|
||||||
self.nodes.remove(&vote.hash);
|
} else if node.children.is_empty() {
|
||||||
|
// Remove the to-be-deleted node from it's parent.
|
||||||
|
if let Some(parent_hash) = node.parent_hash {
|
||||||
|
self.get_mut_node(parent_hash)?
|
||||||
|
.remove_child(node.block_hash)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.nodes.remove(&vote.hash);
|
||||||
|
|
||||||
|
// A node which has no children may be deleted and potentially it's parent
|
||||||
|
// too.
|
||||||
|
self.maybe_delete_node(parent_hash)?;
|
||||||
|
} else {
|
||||||
|
// It is impossible for a node to have a number of children that is not 0, 1 or
|
||||||
|
// greater than one.
|
||||||
|
//
|
||||||
|
// This code is strictly unnecessary, however we keep it for readability.
|
||||||
|
unreachable!();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// A node without a parent is the genesis/finalized node and should never be removed.
|
||||||
}
|
}
|
||||||
|
|
||||||
self.latest_votes.insert(validator_index, Some(vote));
|
self.latest_votes.insert(validator_index, Some(vote));
|
||||||
@ -346,23 +377,27 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Deletes a node if it is unnecessary.
|
||||||
|
///
|
||||||
|
/// Any node is unnecessary if all of the following are true:
|
||||||
|
///
|
||||||
|
/// - it is not the root node.
|
||||||
|
/// - it only has one child.
|
||||||
|
/// - it does not have any votes.
|
||||||
fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> {
|
fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> {
|
||||||
let should_delete = {
|
let should_delete = {
|
||||||
let node = self.get_node(hash)?.clone();
|
let node = self.get_node(hash)?.clone();
|
||||||
|
|
||||||
if let Some(parent_hash) = node.parent_hash {
|
if let Some(parent_hash) = node.parent_hash {
|
||||||
if (node.children.len() == 1) && !node.has_votes() {
|
if (node.children.len() == 1) && !node.has_votes() {
|
||||||
// Graft the child to it's grandparent.
|
let child_hash = node.children[0];
|
||||||
let child_hash = {
|
|
||||||
let child_node = self.get_mut_node(node.children[0])?;
|
|
||||||
child_node.parent_hash = node.parent_hash;
|
|
||||||
|
|
||||||
child_node.block_hash
|
// Graft the single descendant `node` to the `parent` of node.
|
||||||
};
|
self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash);
|
||||||
|
|
||||||
// Graft the grandparent to it's grandchild.
|
// Detach `node` from `parent`, replacing it with `child`.
|
||||||
let parent_node = self.get_mut_node(parent_hash)?;
|
self.get_mut_node(parent_hash)?
|
||||||
parent_node.replace_child(node.block_hash, child_hash)?;
|
.replace_child(hash, child_hash)?;
|
||||||
|
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
@ -398,7 +433,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> {
|
fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> {
|
||||||
if slot >= self.root_slot() && !self.nodes.contains_key(&hash) {
|
if slot > self.root_slot() && !self.nodes.contains_key(&hash) {
|
||||||
let node = Node {
|
let node = Node {
|
||||||
block_hash: hash,
|
block_hash: hash,
|
||||||
..Node::default()
|
..Node::default()
|
||||||
@ -406,6 +441,8 @@ where
|
|||||||
|
|
||||||
self.add_node(node)?;
|
self.add_node(node)?;
|
||||||
|
|
||||||
|
// Read the `parent_hash` from the newly created node. If it has a parent (i.e., it's
|
||||||
|
// not the root), see if it is superfluous.
|
||||||
if let Some(parent_hash) = self.get_node(hash)?.parent_hash {
|
if let Some(parent_hash) = self.get_node(hash)?.parent_hash {
|
||||||
self.maybe_delete_node(parent_hash)?;
|
self.maybe_delete_node(parent_hash)?;
|
||||||
}
|
}
|
||||||
@ -414,75 +451,108 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Add `node` to the reduced tree, returning an error if `node` is not rooted in the tree.
|
||||||
fn add_node(&mut self, mut node: Node) -> Result<()> {
|
fn add_node(&mut self, mut node: Node) -> Result<()> {
|
||||||
// Find the highest (by slot) ancestor of the given hash/block that is in the reduced tree.
|
// Find the highest (by slot) ancestor of the given node in the reduced tree.
|
||||||
let mut prev_in_tree = {
|
//
|
||||||
let hash = self
|
// If this node has no ancestor in the tree, exit early.
|
||||||
.find_prev_in_tree(node.block_hash)
|
let mut prev_in_tree = self
|
||||||
.ok_or_else(|| Error::NotInTree(node.block_hash))?;
|
.find_prev_in_tree(node.block_hash)
|
||||||
self.get_mut_node(hash)?.clone()
|
.ok_or_else(|| Error::NotInTree(node.block_hash))
|
||||||
};
|
.and_then(|hash| self.get_node(hash))?
|
||||||
|
.clone();
|
||||||
let mut added = false;
|
|
||||||
|
|
||||||
|
// If the ancestor of `node` has children, there are three possible operations:
|
||||||
|
//
|
||||||
|
// 1. Graft the `node` between two existing nodes.
|
||||||
|
// 2. Create another node that will be grafted between two existing nodes, then graft
|
||||||
|
// `node` to it.
|
||||||
|
// 3. Graft `node` to an existing node.
|
||||||
if !prev_in_tree.children.is_empty() {
|
if !prev_in_tree.children.is_empty() {
|
||||||
for &child_hash in &prev_in_tree.children {
|
for &child_hash in &prev_in_tree.children {
|
||||||
|
// 1. Graft the new node between two existing nodes.
|
||||||
|
//
|
||||||
|
// If `node` is a descendant of `prev_in_tree` but an ancestor of a child connected to
|
||||||
|
// `prev_in_tree`.
|
||||||
|
//
|
||||||
|
// This means that `node` can be grafted between `prev_in_tree` and the child that is a
|
||||||
|
// descendant of both `node` and `prev_in_tree`.
|
||||||
if self
|
if self
|
||||||
.iter_ancestors(child_hash)?
|
.iter_ancestors(child_hash)?
|
||||||
.any(|(ancestor, _slot)| ancestor == node.block_hash)
|
.any(|(ancestor, _slot)| ancestor == node.block_hash)
|
||||||
{
|
{
|
||||||
let child = self.get_mut_node(child_hash)?;
|
let child = self.get_mut_node(child_hash)?;
|
||||||
|
|
||||||
|
// Graft `child` to `node`.
|
||||||
child.parent_hash = Some(node.block_hash);
|
child.parent_hash = Some(node.block_hash);
|
||||||
|
// Graft `node` to `child`.
|
||||||
node.children.push(child_hash);
|
node.children.push(child_hash);
|
||||||
|
// Detach `child` from `prev_in_tree`, replacing it with `node`.
|
||||||
prev_in_tree.replace_child(child_hash, node.block_hash)?;
|
prev_in_tree.replace_child(child_hash, node.block_hash)?;
|
||||||
|
// Graft `node` to `prev_in_tree`.
|
||||||
node.parent_hash = Some(prev_in_tree.block_hash);
|
node.parent_hash = Some(prev_in_tree.block_hash);
|
||||||
|
|
||||||
added = true;
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !added {
|
// 2. Create another node that will be grafted between two existing nodes, then graft
|
||||||
|
// `node` to it.
|
||||||
|
//
|
||||||
|
// Note: given that `prev_in_tree` has children and that `node` is not an ancestor of
|
||||||
|
// any of the children of `prev_in_tree`, we know that `node` is on a different fork to
|
||||||
|
// all of the children of `prev_in_tree`.
|
||||||
|
if node.parent_hash.is_none() {
|
||||||
for &child_hash in &prev_in_tree.children {
|
for &child_hash in &prev_in_tree.children {
|
||||||
|
// Find the highest (by slot) common ancestor between `node` and `child`.
|
||||||
|
//
|
||||||
|
// The common ancestor is the last block before `node` and `child` forked.
|
||||||
let ancestor_hash =
|
let ancestor_hash =
|
||||||
self.find_least_common_ancestor(node.block_hash, child_hash)?;
|
self.find_highest_common_ancestor(node.block_hash, child_hash)?;
|
||||||
|
|
||||||
|
// If the block before `node` and `child` forked is _not_ `prev_in_tree` we
|
||||||
|
// must add this new block into the tree (because it is a decision node
|
||||||
|
// between two forks).
|
||||||
if ancestor_hash != prev_in_tree.block_hash {
|
if ancestor_hash != prev_in_tree.block_hash {
|
||||||
let child = self.get_mut_node(child_hash)?;
|
let child = self.get_mut_node(child_hash)?;
|
||||||
|
|
||||||
|
// Create a new `common_ancestor` node which represents the `ancestor_hash`
|
||||||
|
// block, has `prev_in_tree` as the parent and has both `node` and `child`
|
||||||
|
// as children.
|
||||||
let common_ancestor = Node {
|
let common_ancestor = Node {
|
||||||
block_hash: ancestor_hash,
|
block_hash: ancestor_hash,
|
||||||
parent_hash: Some(prev_in_tree.block_hash),
|
parent_hash: Some(prev_in_tree.block_hash),
|
||||||
children: vec![node.block_hash, child_hash],
|
children: vec![node.block_hash, child_hash],
|
||||||
..Node::default()
|
..Node::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Graft `child` and `node` to `common_ancestor`.
|
||||||
child.parent_hash = Some(common_ancestor.block_hash);
|
child.parent_hash = Some(common_ancestor.block_hash);
|
||||||
node.parent_hash = Some(common_ancestor.block_hash);
|
node.parent_hash = Some(common_ancestor.block_hash);
|
||||||
|
|
||||||
prev_in_tree.replace_child(child_hash, ancestor_hash)?;
|
// Detach `child` from `prev_in_tree`, replacing it with `common_ancestor`.
|
||||||
|
prev_in_tree.replace_child(child_hash, common_ancestor.block_hash)?;
|
||||||
|
|
||||||
|
// Store the new `common_ancestor` node.
|
||||||
self.nodes
|
self.nodes
|
||||||
.insert(common_ancestor.block_hash, common_ancestor);
|
.insert(common_ancestor.block_hash, common_ancestor);
|
||||||
|
|
||||||
added = true;
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !added {
|
if node.parent_hash.is_none() {
|
||||||
|
// 3. Graft `node` to an existing node.
|
||||||
|
//
|
||||||
|
// Graft `node` to `prev_in_tree` and `prev_in_tree` to `node`
|
||||||
node.parent_hash = Some(prev_in_tree.block_hash);
|
node.parent_hash = Some(prev_in_tree.block_hash);
|
||||||
prev_in_tree.children.push(node.block_hash);
|
prev_in_tree.children.push(node.block_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update `prev_in_tree`. A mutable reference was not maintained to satisfy the borrow
|
// Update `prev_in_tree`. A mutable reference was not maintained to satisfy the borrow
|
||||||
// checker.
|
// checker. Perhaps there's a better way?
|
||||||
//
|
|
||||||
// This is not an ideal solution and results in unnecessary memory copies -- a better
|
|
||||||
// solution is certainly possible.
|
|
||||||
self.nodes.insert(prev_in_tree.block_hash, prev_in_tree);
|
self.nodes.insert(prev_in_tree.block_hash, prev_in_tree);
|
||||||
self.nodes.insert(node.block_hash, node);
|
self.nodes.insert(node.block_hash, node);
|
||||||
|
|
||||||
@ -498,62 +568,112 @@ where
|
|||||||
.and_then(|(root, _slot)| Some(root))
|
.and_then(|(root, _slot)| Some(root))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For the given `child` block hash, return the block's ancestor at the given `target` slot.
|
|
||||||
fn find_ancestor_at_slot(&self, child: Hash256, target: Slot) -> Result<Hash256> {
|
|
||||||
let (root, slot) = self
|
|
||||||
.iter_ancestors(child)?
|
|
||||||
.find(|(_block, slot)| *slot <= target)
|
|
||||||
.ok_or_else(|| Error::NotInTree(child))?;
|
|
||||||
|
|
||||||
// Explicitly check that the slot is the target in the case that the given child has a slot
|
|
||||||
// above target.
|
|
||||||
if slot == target {
|
|
||||||
Ok(root)
|
|
||||||
} else {
|
|
||||||
Err(Error::NotInTree(child))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// For the two given block roots (`a_root` and `b_root`), find the first block they share in
|
/// For the two given block roots (`a_root` and `b_root`), find the first block they share in
|
||||||
/// the tree. Viz, find the block that these two distinct blocks forked from.
|
/// the tree. Viz, find the block that these two distinct blocks forked from.
|
||||||
fn find_least_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result<Hash256> {
|
fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result<Hash256> {
|
||||||
// If the blocks behind `a_root` and `b_root` are not at the same slot, take the highest
|
let mut a_iter = self.iter_ancestors(a_root)?;
|
||||||
// block (by slot) down to be equal with the lower slot.
|
let mut b_iter = self.iter_ancestors(b_root)?;
|
||||||
//
|
|
||||||
// The result is two roots which identify two blocks at the same height.
|
|
||||||
let (a_root, b_root) = {
|
|
||||||
let a = self.get_block(a_root)?;
|
|
||||||
let b = self.get_block(b_root)?;
|
|
||||||
|
|
||||||
if a.slot > b.slot {
|
// Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two
|
||||||
(self.find_ancestor_at_slot(a_root, b.slot)?, b_root)
|
// blocks at the same slot, or `None` if we have gone past genesis or the root of this tree.
|
||||||
} else if b.slot > a.slot {
|
let mut iter_blocks_at_same_height = || -> Option<(Hash256, Hash256)> {
|
||||||
(a_root, self.find_ancestor_at_slot(b_root, a.slot)?)
|
match (a_iter.next(), b_iter.next()) {
|
||||||
} else {
|
(Some((mut a_root, a_slot)), Some((mut b_root, b_slot))) => {
|
||||||
(a_root, b_root)
|
// If either of the slots are lower than the root of this tree, exit early.
|
||||||
|
if a_slot < self.root.1 || b_slot < self.root.1 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
if a_slot < b_slot {
|
||||||
|
for _ in a_slot.as_u64()..b_slot.as_u64() {
|
||||||
|
b_root = b_iter.next()?.0;
|
||||||
|
}
|
||||||
|
} else if a_slot > b_slot {
|
||||||
|
for _ in b_slot.as_u64()..a_slot.as_u64() {
|
||||||
|
a_root = a_iter.next()?.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((a_root, b_root))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let ((a_root, _a_slot), (_b_root, _b_slot)) = self
|
loop {
|
||||||
.iter_ancestors(a_root)?
|
match iter_blocks_at_same_height() {
|
||||||
.zip(self.iter_ancestors(b_root)?)
|
Some((a_root, b_root)) if a_root == b_root => break Ok(a_root),
|
||||||
.find(|((a_root, _), (b_root, _))| a_root == b_root)
|
Some(_) => (),
|
||||||
.ok_or_else(|| Error::NoCommonAncestor((a_root, b_root)))?;
|
None => break Err(Error::NoCommonAncestor((a_root, b_root))),
|
||||||
|
}
|
||||||
Ok(a_root)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn iter_ancestors(&self, child: Hash256) -> Result<BestBlockRootsIterator<E, T>> {
|
fn iter_ancestors(&self, child: Hash256) -> Result<BlockRootsIterator<E, T>> {
|
||||||
let block = self.get_block(child)?;
|
let block = self.get_block(child)?;
|
||||||
let state = self.get_state(block.state_root)?;
|
let state = self.get_state(block.state_root)?;
|
||||||
|
|
||||||
Ok(BestBlockRootsIterator::owned(
|
Ok(BlockRootsIterator::owned(
|
||||||
self.store.clone(),
|
self.store.clone(),
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Verify the integrity of `self`. Returns `Ok(())` if the tree has integrity, otherwise returns `Err(description)`.
|
||||||
|
///
|
||||||
|
/// Tries to detect the following erroneous conditions:
|
||||||
|
///
|
||||||
|
/// - Dangling references inside the tree.
|
||||||
|
/// - Any scenario where there's not exactly one root node.
|
||||||
|
///
|
||||||
|
/// ## Notes
|
||||||
|
///
|
||||||
|
/// Computationally intensive, likely only useful during testing.
|
||||||
|
pub fn verify_integrity(&self) -> std::result::Result<(), String> {
|
||||||
|
let num_root_nodes = self
|
||||||
|
.nodes
|
||||||
|
.iter()
|
||||||
|
.filter(|(_key, node)| node.parent_hash.is_none())
|
||||||
|
.count();
|
||||||
|
|
||||||
|
if num_root_nodes != 1 {
|
||||||
|
return Err(format!(
|
||||||
|
"Tree has {} roots, should have exactly one.",
|
||||||
|
num_root_nodes
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let verify_node_exists = |key: Hash256, msg: String| -> std::result::Result<(), String> {
|
||||||
|
if self.nodes.contains_key(&key) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(msg)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Iterate through all the nodes and ensure all references they store are valid.
|
||||||
|
self.nodes
|
||||||
|
.iter()
|
||||||
|
.map(|(_key, node)| {
|
||||||
|
if let Some(parent_hash) = node.parent_hash {
|
||||||
|
verify_node_exists(parent_hash, "parent must exist".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
node.children
|
||||||
|
.iter()
|
||||||
|
.map(|child| verify_node_exists(*child, "child_must_exist".to_string()))
|
||||||
|
.collect::<std::result::Result<(), String>>()?;
|
||||||
|
|
||||||
|
verify_node_exists(node.block_hash, "block hash must exist".to_string())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.collect::<std::result::Result<(), String>>()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn get_node(&self, hash: Hash256) -> Result<&Node> {
|
fn get_node(&self, hash: Hash256) -> Result<&Node> {
|
||||||
self.nodes
|
self.nodes
|
||||||
.get(&hash)
|
.get(&hash)
|
||||||
@ -566,9 +686,9 @@ where
|
|||||||
.ok_or_else(|| Error::MissingNode(hash))
|
.ok_or_else(|| Error::MissingNode(hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_block(&self, block_root: Hash256) -> Result<BeaconBlock> {
|
fn get_block(&self, block_root: Hash256) -> Result<BeaconBlock<E>> {
|
||||||
self.store
|
self.store
|
||||||
.get::<BeaconBlock>(&block_root)?
|
.get::<BeaconBlock<E>>(&block_root)?
|
||||||
.ok_or_else(|| Error::MissingBlock(block_root))
|
.ok_or_else(|| Error::MissingBlock(block_root))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -608,6 +728,18 @@ impl Node {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn remove_child(&mut self, child: Hash256) -> Result<()> {
|
||||||
|
let i = self
|
||||||
|
.children
|
||||||
|
.iter()
|
||||||
|
.position(|&c| c == child)
|
||||||
|
.ok_or_else(|| Error::MissingChild(child))?;
|
||||||
|
|
||||||
|
self.children.remove(i);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn remove_voter(&mut self, voter: usize) -> Option<usize> {
|
pub fn remove_voter(&mut self, voter: usize) -> Option<usize> {
|
||||||
let i = self.voters.iter().position(|&v| v == voter)?;
|
let i = self.voters.iter().position(|&v| v == voter)?;
|
||||||
Some(self.voters.remove(i))
|
Some(self.voters.remove(i))
|
||||||
|
359
eth2/lmd_ghost/tests/test.rs
Normal file
359
eth2/lmd_ghost/tests/test.rs
Normal file
@ -0,0 +1,359 @@
|
|||||||
|
#![cfg(not(debug_assertions))]
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate lazy_static;
|
||||||
|
|
||||||
|
use beacon_chain::test_utils::{
|
||||||
|
AttestationStrategy, BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy,
|
||||||
|
};
|
||||||
|
use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree};
|
||||||
|
use rand::{prelude::*, rngs::StdRng};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use store::{
|
||||||
|
iter::{AncestorIter, BestBlockRootsIterator},
|
||||||
|
MemoryStore, Store,
|
||||||
|
};
|
||||||
|
use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot};
|
||||||
|
|
||||||
|
// Should ideally be divisible by 3.
|
||||||
|
pub const VALIDATOR_COUNT: usize = 3 * 8;
|
||||||
|
|
||||||
|
type TestEthSpec = MinimalEthSpec;
|
||||||
|
type ThreadSafeReducedTree = BaseThreadSafeReducedTree<MemoryStore, TestEthSpec>;
|
||||||
|
type BeaconChainHarness = BaseBeaconChainHarness<ThreadSafeReducedTree, TestEthSpec>;
|
||||||
|
type RootAndSlot = (Hash256, Slot);
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
/// A lazy-static instance of a `BeaconChainHarness` that contains two forks.
|
||||||
|
///
|
||||||
|
/// Reduces test setup time by providing a common harness.
|
||||||
|
static ref FORKED_HARNESS: ForkedHarness = ForkedHarness::new();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Contains a `BeaconChainHarness` that has two forks, caused by a validator skipping a slot and
|
||||||
|
/// then some validators building on one head and some on the other.
|
||||||
|
///
|
||||||
|
/// Care should be taken to ensure that the `ForkedHarness` does not expose any interior mutability
|
||||||
|
/// from it's fields. This would cause cross-contamination between tests when used with
|
||||||
|
/// `lazy_static`.
|
||||||
|
struct ForkedHarness {
|
||||||
|
/// Private (not `pub`) because the `BeaconChainHarness` has interior mutability. We
|
||||||
|
/// don't expose it to avoid contamination between tests.
|
||||||
|
harness: BeaconChainHarness,
|
||||||
|
pub genesis_block_root: Hash256,
|
||||||
|
pub genesis_block: BeaconBlock<TestEthSpec>,
|
||||||
|
pub honest_head: RootAndSlot,
|
||||||
|
pub faulty_head: RootAndSlot,
|
||||||
|
pub honest_roots: Vec<RootAndSlot>,
|
||||||
|
pub faulty_roots: Vec<RootAndSlot>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ForkedHarness {
|
||||||
|
/// A new standard instance of with constant parameters.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
// let (harness, honest_roots, faulty_roots) = get_harness_containing_two_forks();
|
||||||
|
let harness = BeaconChainHarness::new(VALIDATOR_COUNT);
|
||||||
|
|
||||||
|
// Move past the zero slot.
|
||||||
|
harness.advance_slot();
|
||||||
|
|
||||||
|
let delay = TestEthSpec::default_spec().min_attestation_inclusion_delay as usize;
|
||||||
|
|
||||||
|
let initial_blocks = delay + 5;
|
||||||
|
|
||||||
|
// Build an initial chain where all validators agree.
|
||||||
|
harness.extend_chain(
|
||||||
|
initial_blocks,
|
||||||
|
BlockStrategy::OnCanonicalHead,
|
||||||
|
AttestationStrategy::AllValidators,
|
||||||
|
);
|
||||||
|
|
||||||
|
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||||
|
let honest_validators: Vec<usize> = (0..two_thirds).collect();
|
||||||
|
let faulty_validators: Vec<usize> = (two_thirds..VALIDATOR_COUNT).collect();
|
||||||
|
let honest_fork_blocks = delay + 5;
|
||||||
|
let faulty_fork_blocks = delay + 5;
|
||||||
|
|
||||||
|
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
|
||||||
|
&honest_validators,
|
||||||
|
&faulty_validators,
|
||||||
|
honest_fork_blocks,
|
||||||
|
faulty_fork_blocks,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut honest_roots =
|
||||||
|
get_ancestor_roots::<TestEthSpec, _>(harness.chain.store.clone(), honest_head);
|
||||||
|
|
||||||
|
honest_roots.insert(
|
||||||
|
0,
|
||||||
|
(honest_head, get_slot_for_block_root(&harness, honest_head)),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut faulty_roots =
|
||||||
|
get_ancestor_roots::<TestEthSpec, _>(harness.chain.store.clone(), faulty_head);
|
||||||
|
|
||||||
|
faulty_roots.insert(
|
||||||
|
0,
|
||||||
|
(faulty_head, get_slot_for_block_root(&harness, faulty_head)),
|
||||||
|
);
|
||||||
|
|
||||||
|
let genesis_block_root = harness.chain.genesis_block_root;
|
||||||
|
let genesis_block = harness
|
||||||
|
.chain
|
||||||
|
.store
|
||||||
|
.get::<BeaconBlock<TestEthSpec>>(&genesis_block_root)
|
||||||
|
.expect("Genesis block should exist")
|
||||||
|
.expect("DB should not error");
|
||||||
|
|
||||||
|
Self {
|
||||||
|
harness,
|
||||||
|
genesis_block_root,
|
||||||
|
genesis_block,
|
||||||
|
honest_head: *honest_roots.last().expect("Chain cannot be empty"),
|
||||||
|
faulty_head: *faulty_roots.last().expect("Chain cannot be empty"),
|
||||||
|
honest_roots,
|
||||||
|
faulty_roots,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn store_clone(&self) -> MemoryStore {
|
||||||
|
(*self.harness.chain.store).clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a brand-new, empty fork choice with a reference to `harness.store`.
|
||||||
|
pub fn new_fork_choice(&self) -> ThreadSafeReducedTree {
|
||||||
|
// Take a full clone of the store built by the harness.
|
||||||
|
//
|
||||||
|
// Taking a clone here ensures that each fork choice gets it's own store so there is no
|
||||||
|
// cross-contamination between tests.
|
||||||
|
let store: MemoryStore = self.store_clone();
|
||||||
|
|
||||||
|
ThreadSafeReducedTree::new(
|
||||||
|
Arc::new(store),
|
||||||
|
&self.genesis_block,
|
||||||
|
self.genesis_block_root,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn all_block_roots(&self) -> Vec<RootAndSlot> {
|
||||||
|
let mut all_roots = self.honest_roots.clone();
|
||||||
|
all_roots.append(&mut self.faulty_roots.clone());
|
||||||
|
|
||||||
|
all_roots.dedup();
|
||||||
|
|
||||||
|
all_roots
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn weight_function(_validator_index: usize) -> Option<u64> {
|
||||||
|
Some(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper: returns all the ancestor roots and slots for a given block_root.
|
||||||
|
fn get_ancestor_roots<E: EthSpec, U: Store>(
|
||||||
|
store: Arc<U>,
|
||||||
|
block_root: Hash256,
|
||||||
|
) -> Vec<(Hash256, Slot)> {
|
||||||
|
let block = store
|
||||||
|
.get::<BeaconBlock<TestEthSpec>>(&block_root)
|
||||||
|
.expect("block should exist")
|
||||||
|
.expect("store should not error");
|
||||||
|
|
||||||
|
<BeaconBlock<TestEthSpec> as AncestorIter<_, BestBlockRootsIterator<TestEthSpec, _>>>::try_iter_ancestor_roots(
|
||||||
|
&block, store,
|
||||||
|
)
|
||||||
|
.expect("should be able to create ancestor iter")
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper: returns the slot for some block_root.
|
||||||
|
fn get_slot_for_block_root(harness: &BeaconChainHarness, block_root: Hash256) -> Slot {
|
||||||
|
harness
|
||||||
|
.chain
|
||||||
|
.store
|
||||||
|
.get::<BeaconBlock<TestEthSpec>>(&block_root)
|
||||||
|
.expect("head block should exist")
|
||||||
|
.expect("DB should not error")
|
||||||
|
.slot
|
||||||
|
}
|
||||||
|
|
||||||
|
const RANDOM_ITERATIONS: usize = 50;
|
||||||
|
const RANDOM_ACTIONS_PER_ITERATION: usize = 100;
|
||||||
|
|
||||||
|
/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot)
|
||||||
|
/// down the chain.
|
||||||
|
#[test]
|
||||||
|
fn random_scenario() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
let block_roots = harness.all_block_roots();
|
||||||
|
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
||||||
|
let mut rng = StdRng::seed_from_u64(9375205782030385); // Keyboard mash.
|
||||||
|
|
||||||
|
for _ in 0..RANDOM_ITERATIONS {
|
||||||
|
let lmd = harness.new_fork_choice();
|
||||||
|
|
||||||
|
for _ in 0..RANDOM_ACTIONS_PER_ITERATION {
|
||||||
|
let (root, slot) = block_roots[rng.next_u64() as usize % block_roots.len()];
|
||||||
|
let validator_index = validators[rng.next_u64() as usize % validators.len()];
|
||||||
|
|
||||||
|
lmd.process_attestation(validator_index, root, slot)
|
||||||
|
.expect("fork choice should accept randomly-placed attestations");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"New tree should have integrity"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot)
|
||||||
|
/// down the chain.
|
||||||
|
#[test]
|
||||||
|
fn single_voter_persistent_instance_reverse_order() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
let lmd = harness.new_fork_choice();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"New tree should have integrity"
|
||||||
|
);
|
||||||
|
|
||||||
|
for (root, slot) in harness.honest_roots.iter().rev() {
|
||||||
|
lmd.process_attestation(0, *root, *slot)
|
||||||
|
.expect("fork choice should accept attestations to honest roots in reverse");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"Tree integrity should be maintained whilst processing attestations"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The honest head should be selected.
|
||||||
|
let (head_root, head_slot) = harness.honest_roots.first().unwrap();
|
||||||
|
let (finalized_root, _) = harness.honest_roots.last().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.find_head(*head_slot, *finalized_root, ForkedHarness::weight_function),
|
||||||
|
Ok(*head_root),
|
||||||
|
"Honest head should be selected"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A single validator applies a single vote to each block in the honest fork, using a new tree
|
||||||
|
/// each time.
|
||||||
|
#[test]
|
||||||
|
fn single_voter_many_instance_honest_blocks_voting_forwards() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
for (root, slot) in &harness.honest_roots {
|
||||||
|
let lmd = harness.new_fork_choice();
|
||||||
|
lmd.process_attestation(0, *root, *slot)
|
||||||
|
.expect("fork choice should accept attestations to honest roots");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"Tree integrity should be maintained whilst processing attestations"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Same as above, but in reverse order (votes on the highest honest block first).
|
||||||
|
#[test]
|
||||||
|
fn single_voter_many_instance_honest_blocks_voting_in_reverse() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
// Same as above, but in reverse order (votes on the highest honest block first).
|
||||||
|
for (root, slot) in harness.honest_roots.iter().rev() {
|
||||||
|
let lmd = harness.new_fork_choice();
|
||||||
|
lmd.process_attestation(0, *root, *slot)
|
||||||
|
.expect("fork choice should accept attestations to honest roots in reverse");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"Tree integrity should be maintained whilst processing attestations"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A single validator applies a single vote to each block in the faulty fork, using a new tree
|
||||||
|
/// each time.
|
||||||
|
#[test]
|
||||||
|
fn single_voter_many_instance_faulty_blocks_voting_forwards() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
for (root, slot) in &harness.faulty_roots {
|
||||||
|
let lmd = harness.new_fork_choice();
|
||||||
|
lmd.process_attestation(0, *root, *slot)
|
||||||
|
.expect("fork choice should accept attestations to faulty roots");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"Tree integrity should be maintained whilst processing attestations"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Same as above, but in reverse order (votes on the highest faulty block first).
|
||||||
|
#[test]
|
||||||
|
fn single_voter_many_instance_faulty_blocks_voting_in_reverse() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
for (root, slot) in harness.faulty_roots.iter().rev() {
|
||||||
|
let lmd = harness.new_fork_choice();
|
||||||
|
lmd.process_attestation(0, *root, *slot)
|
||||||
|
.expect("fork choice should accept attestations to faulty roots in reverse");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"Tree integrity should be maintained whilst processing attestations"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensures that the finalized root can be set to all values in `roots`.
|
||||||
|
fn test_update_finalized_root(roots: &[(Hash256, Slot)]) {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
let lmd = harness.new_fork_choice();
|
||||||
|
|
||||||
|
for (root, _slot) in roots.iter().rev() {
|
||||||
|
let block = harness
|
||||||
|
.store_clone()
|
||||||
|
.get::<BeaconBlock<TestEthSpec>>(root)
|
||||||
|
.expect("block should exist")
|
||||||
|
.expect("db should not error");
|
||||||
|
lmd.update_finalized_root(&block, *root)
|
||||||
|
.expect("finalized root should update for faulty fork");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
lmd.verify_integrity(),
|
||||||
|
Ok(()),
|
||||||
|
"Tree integrity should be maintained after updating the finalized root"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterates from low-to-high slot through the faulty roots, updating the finalized root.
|
||||||
|
#[test]
|
||||||
|
fn update_finalized_root_faulty() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
test_update_finalized_root(&harness.faulty_roots)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterates from low-to-high slot through the honest roots, updating the finalized root.
|
||||||
|
#[test]
|
||||||
|
fn update_finalized_root_honest() {
|
||||||
|
let harness = &FORKED_HARNESS;
|
||||||
|
|
||||||
|
test_update_finalized_root(&harness.honest_roots)
|
||||||
|
}
|
@ -5,7 +5,6 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
boolean-bitfield = { path = "../utils/boolean-bitfield" }
|
|
||||||
int_to_bytes = { path = "../utils/int_to_bytes" }
|
int_to_bytes = { path = "../utils/int_to_bytes" }
|
||||||
itertools = "0.8"
|
itertools = "0.8"
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
@ -13,3 +12,6 @@ types = { path = "../types" }
|
|||||||
state_processing = { path = "../state_processing" }
|
state_processing = { path = "../state_processing" }
|
||||||
eth2_ssz = { path = "../utils/ssz" }
|
eth2_ssz = { path = "../utils/ssz" }
|
||||||
eth2_ssz_derive = { path = "../utils/ssz_derive" }
|
eth2_ssz_derive = { path = "../utils/ssz_derive" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rand = "0.5.5"
|
||||||
|
@ -1,16 +1,18 @@
|
|||||||
use crate::max_cover::MaxCover;
|
use crate::max_cover::MaxCover;
|
||||||
use boolean_bitfield::BooleanBitfield;
|
use types::{Attestation, BeaconState, BitList, EthSpec};
|
||||||
use types::{Attestation, BeaconState, EthSpec};
|
|
||||||
|
|
||||||
pub struct AttMaxCover<'a> {
|
pub struct AttMaxCover<'a, T: EthSpec> {
|
||||||
/// Underlying attestation.
|
/// Underlying attestation.
|
||||||
att: &'a Attestation,
|
att: &'a Attestation<T>,
|
||||||
/// Bitfield of validators that are covered by this attestation.
|
/// Bitfield of validators that are covered by this attestation.
|
||||||
fresh_validators: BooleanBitfield,
|
fresh_validators: BitList<T::MaxValidatorsPerCommittee>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> AttMaxCover<'a> {
|
impl<'a, T: EthSpec> AttMaxCover<'a, T> {
|
||||||
pub fn new(att: &'a Attestation, fresh_validators: BooleanBitfield) -> Self {
|
pub fn new(
|
||||||
|
att: &'a Attestation<T>,
|
||||||
|
fresh_validators: BitList<T::MaxValidatorsPerCommittee>,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
att,
|
att,
|
||||||
fresh_validators,
|
fresh_validators,
|
||||||
@ -18,15 +20,15 @@ impl<'a> AttMaxCover<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> MaxCover for AttMaxCover<'a> {
|
impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> {
|
||||||
type Object = Attestation;
|
type Object = Attestation<T>;
|
||||||
type Set = BooleanBitfield;
|
type Set = BitList<T::MaxValidatorsPerCommittee>;
|
||||||
|
|
||||||
fn object(&self) -> Attestation {
|
fn object(&self) -> Attestation<T> {
|
||||||
self.att.clone()
|
self.att.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn covering_set(&self) -> &BooleanBitfield {
|
fn covering_set(&self) -> &BitList<T::MaxValidatorsPerCommittee> {
|
||||||
&self.fresh_validators
|
&self.fresh_validators
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,11 +39,11 @@ impl<'a> MaxCover for AttMaxCover<'a> {
|
|||||||
/// that a shard and epoch uniquely identify a committee.
|
/// that a shard and epoch uniquely identify a committee.
|
||||||
fn update_covering_set(
|
fn update_covering_set(
|
||||||
&mut self,
|
&mut self,
|
||||||
best_att: &Attestation,
|
best_att: &Attestation<T>,
|
||||||
covered_validators: &BooleanBitfield,
|
covered_validators: &BitList<T::MaxValidatorsPerCommittee>,
|
||||||
) {
|
) {
|
||||||
if self.att.data.shard == best_att.data.shard
|
if self.att.data.crosslink.shard == best_att.data.crosslink.shard
|
||||||
&& self.att.data.target_epoch == best_att.data.target_epoch
|
&& self.att.data.target.epoch == best_att.data.target.epoch
|
||||||
{
|
{
|
||||||
self.fresh_validators.difference_inplace(covered_validators);
|
self.fresh_validators.difference_inplace(covered_validators);
|
||||||
}
|
}
|
||||||
@ -58,22 +60,22 @@ impl<'a> MaxCover for AttMaxCover<'a> {
|
|||||||
/// of validators for which the included attestation is their first in the epoch. The attestation
|
/// of validators for which the included attestation is their first in the epoch. The attestation
|
||||||
/// is judged against the state's `current_epoch_attestations` or `previous_epoch_attestations`
|
/// is judged against the state's `current_epoch_attestations` or `previous_epoch_attestations`
|
||||||
/// depending on when it was created, and all those validators who have already attested are
|
/// depending on when it was created, and all those validators who have already attested are
|
||||||
/// removed from the `aggregation_bitfield` before returning it.
|
/// removed from the `aggregation_bits` before returning it.
|
||||||
// TODO: This could be optimised with a map from validator index to whether that validator has
|
// TODO: This could be optimised with a map from validator index to whether that validator has
|
||||||
// attested in each of the current and previous epochs. Currently quadratic in number of validators.
|
// attested in each of the current and previous epochs. Currently quadratic in number of validators.
|
||||||
pub fn earliest_attestation_validators<T: EthSpec>(
|
pub fn earliest_attestation_validators<T: EthSpec>(
|
||||||
attestation: &Attestation,
|
attestation: &Attestation<T>,
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
) -> BooleanBitfield {
|
) -> BitList<T::MaxValidatorsPerCommittee> {
|
||||||
// Bitfield of validators whose attestations are new/fresh.
|
// Bitfield of validators whose attestations are new/fresh.
|
||||||
let mut new_validators = attestation.aggregation_bitfield.clone();
|
let mut new_validators = attestation.aggregation_bits.clone();
|
||||||
|
|
||||||
let state_attestations = if attestation.data.target_epoch == state.current_epoch() {
|
let state_attestations = if attestation.data.target.epoch == state.current_epoch() {
|
||||||
&state.current_epoch_attestations
|
&state.current_epoch_attestations
|
||||||
} else if attestation.data.target_epoch == state.previous_epoch() {
|
} else if attestation.data.target.epoch == state.previous_epoch() {
|
||||||
&state.previous_epoch_attestations
|
&state.previous_epoch_attestations
|
||||||
} else {
|
} else {
|
||||||
return BooleanBitfield::from_elem(attestation.aggregation_bitfield.len(), false);
|
return BitList::with_capacity(0).unwrap();
|
||||||
};
|
};
|
||||||
|
|
||||||
state_attestations
|
state_attestations
|
||||||
@ -81,10 +83,12 @@ pub fn earliest_attestation_validators<T: EthSpec>(
|
|||||||
// In a single epoch, an attester should only be attesting for one shard.
|
// In a single epoch, an attester should only be attesting for one shard.
|
||||||
// TODO: we avoid including slashable attestations in the state here,
|
// TODO: we avoid including slashable attestations in the state here,
|
||||||
// but maybe we should do something else with them (like construct slashings).
|
// but maybe we should do something else with them (like construct slashings).
|
||||||
.filter(|existing_attestation| existing_attestation.data.shard == attestation.data.shard)
|
.filter(|existing_attestation| {
|
||||||
|
existing_attestation.data.crosslink.shard == attestation.data.crosslink.shard
|
||||||
|
})
|
||||||
.for_each(|existing_attestation| {
|
.for_each(|existing_attestation| {
|
||||||
// Remove the validators who have signed the existing attestation (they are not new)
|
// Remove the validators who have signed the existing attestation (they are not new)
|
||||||
new_validators.difference_inplace(&existing_attestation.aggregation_bitfield);
|
new_validators.difference_inplace(&existing_attestation.aggregation_bits);
|
||||||
});
|
});
|
||||||
|
|
||||||
new_validators
|
new_validators
|
||||||
|
@ -19,7 +19,7 @@ impl AttestationId {
|
|||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut bytes = ssz_encode(attestation);
|
let mut bytes = ssz_encode(attestation);
|
||||||
let epoch = attestation.target_epoch;
|
let epoch = attestation.target.epoch;
|
||||||
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
|
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
|
||||||
AttestationId { v: bytes }
|
AttestationId { v: bytes }
|
||||||
}
|
}
|
||||||
|
@ -15,30 +15,29 @@ use state_processing::per_block_processing::errors::{
|
|||||||
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
|
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
|
||||||
};
|
};
|
||||||
use state_processing::per_block_processing::{
|
use state_processing::per_block_processing::{
|
||||||
get_slashable_indices_modular, validate_attestation,
|
get_slashable_indices_modular, verify_attestation, verify_attestation_time_independent_only,
|
||||||
validate_attestation_time_independent_only, verify_attester_slashing, verify_exit,
|
verify_attester_slashing, verify_exit, verify_exit_time_independent_only,
|
||||||
verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer,
|
verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only,
|
||||||
verify_transfer_time_independent_only,
|
|
||||||
};
|
};
|
||||||
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
|
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, BeaconState, ChainSpec, Deposit, EthSpec, ProposerSlashing,
|
typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, ChainSpec, Deposit, EthSpec,
|
||||||
Transfer, Validator, VoluntaryExit,
|
ProposerSlashing, Transfer, Validator, VoluntaryExit,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
pub struct OperationPool<T: EthSpec + Default> {
|
pub struct OperationPool<T: EthSpec + Default> {
|
||||||
/// Map from attestation ID (see below) to vectors of attestations.
|
/// Map from attestation ID (see below) to vectors of attestations.
|
||||||
attestations: RwLock<HashMap<AttestationId, Vec<Attestation>>>,
|
attestations: RwLock<HashMap<AttestationId, Vec<Attestation<T>>>>,
|
||||||
/// Map from deposit index to deposit data.
|
/// Map from deposit index to deposit data.
|
||||||
// NOTE: We assume that there is only one deposit per index
|
// NOTE: We assume that there is only one deposit per index
|
||||||
// because the Eth1 data is updated (at most) once per epoch,
|
// because the Eth1 data is updated (at most) once per epoch,
|
||||||
// and the spec doesn't seem to accomodate for re-orgs on a time-frame
|
// and the spec doesn't seem to accommodate for re-orgs on a time-frame
|
||||||
// longer than an epoch
|
// longer than an epoch
|
||||||
deposits: RwLock<BTreeMap<u64, Deposit>>,
|
deposits: RwLock<BTreeMap<u64, Deposit>>,
|
||||||
/// Map from two attestation IDs to a slashing for those IDs.
|
/// Map from two attestation IDs to a slashing for those IDs.
|
||||||
attester_slashings: RwLock<HashMap<(AttestationId, AttestationId), AttesterSlashing>>,
|
attester_slashings: RwLock<HashMap<(AttestationId, AttestationId), AttesterSlashing<T>>>,
|
||||||
/// Map from proposer index to slashing.
|
/// Map from proposer index to slashing.
|
||||||
proposer_slashings: RwLock<HashMap<u64, ProposerSlashing>>,
|
proposer_slashings: RwLock<HashMap<u64, ProposerSlashing>>,
|
||||||
/// Map from exiting validator to their exit data.
|
/// Map from exiting validator to their exit data.
|
||||||
@ -67,12 +66,12 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
/// Insert an attestation into the pool, aggregating it with existing attestations if possible.
|
/// Insert an attestation into the pool, aggregating it with existing attestations if possible.
|
||||||
pub fn insert_attestation(
|
pub fn insert_attestation(
|
||||||
&self,
|
&self,
|
||||||
attestation: Attestation,
|
attestation: Attestation<T>,
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), AttestationValidationError> {
|
) -> Result<(), AttestationValidationError> {
|
||||||
// Check that attestation signatures are valid.
|
// Check that attestation signatures are valid.
|
||||||
validate_attestation_time_independent_only(state, &attestation, spec)?;
|
verify_attestation_time_independent_only(state, &attestation, spec)?;
|
||||||
|
|
||||||
let id = AttestationId::from_data(&attestation.data, state, spec);
|
let id = AttestationId::from_data(&attestation.data, state, spec);
|
||||||
|
|
||||||
@ -110,7 +109,11 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get a list of attestations for inclusion in a block.
|
/// Get a list of attestations for inclusion in a block.
|
||||||
pub fn get_attestations(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Vec<Attestation> {
|
pub fn get_attestations(
|
||||||
|
&self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Vec<Attestation<T>> {
|
||||||
// Attestations for the current fork, which may be from the current or previous epoch.
|
// Attestations for the current fork, which may be from the current or previous epoch.
|
||||||
let prev_epoch = state.previous_epoch();
|
let prev_epoch = state.previous_epoch();
|
||||||
let current_epoch = state.current_epoch();
|
let current_epoch = state.current_epoch();
|
||||||
@ -125,10 +128,10 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
})
|
})
|
||||||
.flat_map(|(_, attestations)| attestations)
|
.flat_map(|(_, attestations)| attestations)
|
||||||
// That are valid...
|
// That are valid...
|
||||||
.filter(|attestation| validate_attestation(state, attestation, spec).is_ok())
|
.filter(|attestation| verify_attestation(state, attestation, spec).is_ok())
|
||||||
.map(|att| AttMaxCover::new(att, earliest_attestation_validators(att, state)));
|
.map(|att| AttMaxCover::new(att, earliest_attestation_validators(att, state)));
|
||||||
|
|
||||||
maximum_cover(valid_attestations, spec.max_attestations as usize)
|
maximum_cover(valid_attestations, T::MaxAttestations::to_usize())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove attestations which are too old to be included in a block.
|
/// Remove attestations which are too old to be included in a block.
|
||||||
@ -141,7 +144,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
// All the attestations in this bucket have the same data, so we only need to
|
// All the attestations in this bucket have the same data, so we only need to
|
||||||
// check the first one.
|
// check the first one.
|
||||||
attestations.first().map_or(false, |att| {
|
attestations.first().map_or(false, |att| {
|
||||||
finalized_state.current_epoch() <= att.data.target_epoch + 1
|
finalized_state.current_epoch() <= att.data.target.epoch + 1
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -149,13 +152,15 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
/// Add a deposit to the pool.
|
/// Add a deposit to the pool.
|
||||||
///
|
///
|
||||||
/// No two distinct deposits should be added with the same index.
|
/// No two distinct deposits should be added with the same index.
|
||||||
|
// TODO: we need to rethink this entirely
|
||||||
pub fn insert_deposit(
|
pub fn insert_deposit(
|
||||||
&self,
|
&self,
|
||||||
|
index: u64,
|
||||||
deposit: Deposit,
|
deposit: Deposit,
|
||||||
) -> Result<DepositInsertStatus, DepositValidationError> {
|
) -> Result<DepositInsertStatus, DepositValidationError> {
|
||||||
use DepositInsertStatus::*;
|
use DepositInsertStatus::*;
|
||||||
|
|
||||||
match self.deposits.write().entry(deposit.index) {
|
match self.deposits.write().entry(index) {
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
entry.insert(deposit);
|
entry.insert(deposit);
|
||||||
Ok(Fresh)
|
Ok(Fresh)
|
||||||
@ -173,12 +178,12 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
/// Get an ordered list of deposits for inclusion in a block.
|
/// Get an ordered list of deposits for inclusion in a block.
|
||||||
///
|
///
|
||||||
/// Take at most the maximum number of deposits, beginning from the current deposit index.
|
/// Take at most the maximum number of deposits, beginning from the current deposit index.
|
||||||
pub fn get_deposits(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Vec<Deposit> {
|
pub fn get_deposits(&self, state: &BeaconState<T>) -> Vec<Deposit> {
|
||||||
// TODO: We need to update the Merkle proofs for existing deposits as more deposits
|
// TODO: We need to update the Merkle proofs for existing deposits as more deposits
|
||||||
// are added. It probably makes sense to construct the proofs from scratch when forming
|
// are added. It probably makes sense to construct the proofs from scratch when forming
|
||||||
// a block, using fresh info from the ETH1 chain for the current deposit root.
|
// a block, using fresh info from the ETH1 chain for the current deposit root.
|
||||||
let start_idx = state.deposit_index;
|
let start_idx = state.eth1_deposit_index;
|
||||||
(start_idx..start_idx + spec.max_deposits)
|
(start_idx..start_idx + T::MaxDeposits::to_u64())
|
||||||
.map(|idx| self.deposits.read().get(&idx).cloned())
|
.map(|idx| self.deposits.read().get(&idx).cloned())
|
||||||
.take_while(Option::is_some)
|
.take_while(Option::is_some)
|
||||||
.flatten()
|
.flatten()
|
||||||
@ -187,7 +192,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
|
|
||||||
/// Remove all deposits with index less than the deposit index of the latest finalised block.
|
/// Remove all deposits with index less than the deposit index of the latest finalised block.
|
||||||
pub fn prune_deposits(&self, state: &BeaconState<T>) -> BTreeMap<u64, Deposit> {
|
pub fn prune_deposits(&self, state: &BeaconState<T>) -> BTreeMap<u64, Deposit> {
|
||||||
let deposits_keep = self.deposits.write().split_off(&state.deposit_index);
|
let deposits_keep = self.deposits.write().split_off(&state.eth1_deposit_index);
|
||||||
std::mem::replace(&mut self.deposits.write(), deposits_keep)
|
std::mem::replace(&mut self.deposits.write(), deposits_keep)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,7 +221,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
///
|
///
|
||||||
/// Depends on the fork field of the state, but not on the state's epoch.
|
/// Depends on the fork field of the state, but not on the state's epoch.
|
||||||
fn attester_slashing_id(
|
fn attester_slashing_id(
|
||||||
slashing: &AttesterSlashing,
|
slashing: &AttesterSlashing<T>,
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> (AttestationId, AttestationId) {
|
) -> (AttestationId, AttestationId) {
|
||||||
@ -229,7 +234,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
/// Insert an attester slashing into the pool.
|
/// Insert an attester slashing into the pool.
|
||||||
pub fn insert_attester_slashing(
|
pub fn insert_attester_slashing(
|
||||||
&self,
|
&self,
|
||||||
slashing: AttesterSlashing,
|
slashing: AttesterSlashing<T>,
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), AttesterSlashingValidationError> {
|
) -> Result<(), AttesterSlashingValidationError> {
|
||||||
@ -248,16 +253,16 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
&self,
|
&self,
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing>) {
|
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing<T>>) {
|
||||||
let proposer_slashings = filter_limit_operations(
|
let proposer_slashings = filter_limit_operations(
|
||||||
self.proposer_slashings.read().values(),
|
self.proposer_slashings.read().values(),
|
||||||
|slashing| {
|
|slashing| {
|
||||||
state
|
state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(slashing.proposer_index as usize)
|
.get(slashing.proposer_index as usize)
|
||||||
.map_or(false, |validator| !validator.slashed)
|
.map_or(false, |validator| !validator.slashed)
|
||||||
},
|
},
|
||||||
spec.max_proposer_slashings,
|
T::MaxProposerSlashings::to_usize(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Set of validators to be slashed, so we don't attempt to construct invalid attester
|
// Set of validators to be slashed, so we don't attempt to construct invalid attester
|
||||||
@ -291,7 +296,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.take(spec.max_attester_slashings as usize)
|
.take(T::MaxAttesterSlashings::to_usize())
|
||||||
.map(|(_, slashing)| slashing.clone())
|
.map(|(_, slashing)| slashing.clone())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
@ -347,7 +352,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
filter_limit_operations(
|
filter_limit_operations(
|
||||||
self.voluntary_exits.read().values(),
|
self.voluntary_exits.read().values(),
|
||||||
|exit| verify_exit(state, exit, spec).is_ok(),
|
|exit| verify_exit(state, exit, spec).is_ok(),
|
||||||
spec.max_voluntary_exits,
|
T::MaxVoluntaryExits::to_usize(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -384,7 +389,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
.iter()
|
.iter()
|
||||||
.filter(|transfer| verify_transfer(state, transfer, spec).is_ok())
|
.filter(|transfer| verify_transfer(state, transfer, spec).is_ok())
|
||||||
.sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee))
|
.sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee))
|
||||||
.take(spec.max_transfers as usize)
|
.take(T::MaxTransfers::to_usize())
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
@ -408,7 +413,7 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Filter up to a maximum number of operations out of an iterator.
|
/// Filter up to a maximum number of operations out of an iterator.
|
||||||
fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec<T>
|
fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: usize) -> Vec<T>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = &'a T>,
|
I: IntoIterator<Item = &'a T>,
|
||||||
F: Fn(&T) -> bool,
|
F: Fn(&T) -> bool,
|
||||||
@ -417,7 +422,7 @@ where
|
|||||||
operations
|
operations
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|x| filter(*x))
|
.filter(|x| filter(*x))
|
||||||
.take(limit as usize)
|
.take(limit)
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
@ -436,7 +441,7 @@ fn prune_validator_hash_map<T, F, E: EthSpec>(
|
|||||||
{
|
{
|
||||||
map.retain(|&validator_index, _| {
|
map.retain(|&validator_index, _| {
|
||||||
finalized_state
|
finalized_state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(validator_index as usize)
|
.get(validator_index as usize)
|
||||||
.map_or(true, |validator| !prune_if(validator))
|
.map_or(true, |validator| !prune_if(validator))
|
||||||
});
|
});
|
||||||
@ -458,6 +463,7 @@ impl<T: EthSpec + Default> PartialEq for OperationPool<T> {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::DepositInsertStatus::*;
|
use super::DepositInsertStatus::*;
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use rand::Rng;
|
||||||
use types::test_utils::*;
|
use types::test_utils::*;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
@ -466,13 +472,16 @@ mod tests {
|
|||||||
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||||
let op_pool = OperationPool::<MinimalEthSpec>::new();
|
let op_pool = OperationPool::<MinimalEthSpec>::new();
|
||||||
let deposit1 = make_deposit(rng);
|
let deposit1 = make_deposit(rng);
|
||||||
let mut deposit2 = make_deposit(rng);
|
let deposit2 = make_deposit(rng);
|
||||||
deposit2.index = deposit1.index;
|
let index = rng.gen();
|
||||||
|
|
||||||
assert_eq!(op_pool.insert_deposit(deposit1.clone()), Ok(Fresh));
|
assert_eq!(op_pool.insert_deposit(index, deposit1.clone()), Ok(Fresh));
|
||||||
assert_eq!(op_pool.insert_deposit(deposit1.clone()), Ok(Duplicate));
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
op_pool.insert_deposit(deposit2),
|
op_pool.insert_deposit(index, deposit1.clone()),
|
||||||
|
Ok(Duplicate)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
op_pool.insert_deposit(index, deposit2),
|
||||||
Ok(Replaced(Box::new(deposit1)))
|
Ok(Replaced(Box::new(deposit1)))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -480,28 +489,29 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn get_deposits_max() {
|
fn get_deposits_max() {
|
||||||
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||||
let (spec, mut state) = test_state(rng);
|
let (_, mut state) = test_state(rng);
|
||||||
let op_pool = OperationPool::new();
|
let op_pool = OperationPool::new();
|
||||||
let start = 10000;
|
let start = 10000;
|
||||||
let max_deposits = spec.max_deposits;
|
let max_deposits = <MainnetEthSpec as EthSpec>::MaxDeposits::to_u64();
|
||||||
let extra = 5;
|
let extra = 5;
|
||||||
let offset = 1;
|
let offset = 1;
|
||||||
assert!(offset <= extra);
|
assert!(offset <= extra);
|
||||||
|
|
||||||
let deposits = dummy_deposits(rng, start, max_deposits + extra);
|
let deposits = dummy_deposits(rng, start, max_deposits + extra);
|
||||||
|
|
||||||
for deposit in &deposits {
|
for (i, deposit) in &deposits {
|
||||||
assert_eq!(op_pool.insert_deposit(deposit.clone()), Ok(Fresh));
|
assert_eq!(op_pool.insert_deposit(*i, deposit.clone()), Ok(Fresh));
|
||||||
}
|
}
|
||||||
|
|
||||||
state.deposit_index = start + offset;
|
state.eth1_deposit_index = start + offset;
|
||||||
let deposits_for_block = op_pool.get_deposits(&state, &spec);
|
let deposits_for_block = op_pool.get_deposits(&state);
|
||||||
|
|
||||||
assert_eq!(deposits_for_block.len() as u64, max_deposits);
|
assert_eq!(deposits_for_block.len() as u64, max_deposits);
|
||||||
assert_eq!(
|
let expected = deposits[offset as usize..(offset + max_deposits) as usize]
|
||||||
deposits_for_block[..],
|
.iter()
|
||||||
deposits[offset as usize..(offset + max_deposits) as usize]
|
.map(|(_, d)| d.clone())
|
||||||
);
|
.collect::<Vec<_>>();
|
||||||
|
assert_eq!(deposits_for_block[..], expected[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -518,20 +528,20 @@ mod tests {
|
|||||||
let deposits1 = dummy_deposits(rng, start1, count);
|
let deposits1 = dummy_deposits(rng, start1, count);
|
||||||
let deposits2 = dummy_deposits(rng, start2, count);
|
let deposits2 = dummy_deposits(rng, start2, count);
|
||||||
|
|
||||||
for d in deposits1.into_iter().chain(deposits2) {
|
for (i, d) in deposits1.into_iter().chain(deposits2) {
|
||||||
assert!(op_pool.insert_deposit(d).is_ok());
|
assert!(op_pool.insert_deposit(i, d).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(op_pool.num_deposits(), 2 * count as usize);
|
assert_eq!(op_pool.num_deposits(), 2 * count as usize);
|
||||||
|
|
||||||
let mut state = BeaconState::random_for_test(rng);
|
let mut state = BeaconState::random_for_test(rng);
|
||||||
state.deposit_index = start1;
|
state.eth1_deposit_index = start1;
|
||||||
|
|
||||||
// Pruning the first bunch of deposits in batches of 5 should work.
|
// Pruning the first bunch of deposits in batches of 5 should work.
|
||||||
let step = 5;
|
let step = 5;
|
||||||
let mut pool_size = step + 2 * count as usize;
|
let mut pool_size = step + 2 * count as usize;
|
||||||
for i in (start1..=(start1 + count)).step_by(step) {
|
for i in (start1..=(start1 + count)).step_by(step) {
|
||||||
state.deposit_index = i;
|
state.eth1_deposit_index = i;
|
||||||
op_pool.prune_deposits(&state);
|
op_pool.prune_deposits(&state);
|
||||||
pool_size -= step;
|
pool_size -= step;
|
||||||
assert_eq!(op_pool.num_deposits(), pool_size);
|
assert_eq!(op_pool.num_deposits(), pool_size);
|
||||||
@ -539,14 +549,14 @@ mod tests {
|
|||||||
assert_eq!(pool_size, count as usize);
|
assert_eq!(pool_size, count as usize);
|
||||||
// Pruning in the gap should do nothing.
|
// Pruning in the gap should do nothing.
|
||||||
for i in (start1 + count..start2).step_by(step) {
|
for i in (start1 + count..start2).step_by(step) {
|
||||||
state.deposit_index = i;
|
state.eth1_deposit_index = i;
|
||||||
op_pool.prune_deposits(&state);
|
op_pool.prune_deposits(&state);
|
||||||
assert_eq!(op_pool.num_deposits(), count as usize);
|
assert_eq!(op_pool.num_deposits(), count as usize);
|
||||||
}
|
}
|
||||||
// Same again for the later deposits.
|
// Same again for the later deposits.
|
||||||
pool_size += step;
|
pool_size += step;
|
||||||
for i in (start2..=(start2 + count)).step_by(step) {
|
for i in (start2..=(start2 + count)).step_by(step) {
|
||||||
state.deposit_index = i;
|
state.eth1_deposit_index = i;
|
||||||
op_pool.prune_deposits(&state);
|
op_pool.prune_deposits(&state);
|
||||||
pool_size -= step;
|
pool_size -= step;
|
||||||
assert_eq!(op_pool.num_deposits(), pool_size);
|
assert_eq!(op_pool.num_deposits(), pool_size);
|
||||||
@ -560,13 +570,13 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create `count` dummy deposits with sequential deposit IDs beginning from `start`.
|
// Create `count` dummy deposits with sequential deposit IDs beginning from `start`.
|
||||||
fn dummy_deposits(rng: &mut XorShiftRng, start: u64, count: u64) -> Vec<Deposit> {
|
fn dummy_deposits(rng: &mut XorShiftRng, start: u64, count: u64) -> Vec<(u64, Deposit)> {
|
||||||
let proto_deposit = make_deposit(rng);
|
let proto_deposit = make_deposit(rng);
|
||||||
(start..start + count)
|
(start..start + count)
|
||||||
.map(|index| {
|
.map(|index| {
|
||||||
let mut deposit = proto_deposit.clone();
|
let mut deposit = proto_deposit.clone();
|
||||||
deposit.index = index;
|
deposit.data.amount = index * 1000;
|
||||||
deposit
|
(index, deposit)
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
@ -596,11 +606,11 @@ mod tests {
|
|||||||
state: &BeaconState<E>,
|
state: &BeaconState<E>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
extra_signer: Option<usize>,
|
extra_signer: Option<usize>,
|
||||||
) -> Attestation {
|
) -> Attestation<E> {
|
||||||
let mut builder = TestingAttestationBuilder::new(state, committee, slot, shard, spec);
|
let mut builder = TestingAttestationBuilder::new(state, committee, slot, shard, spec);
|
||||||
let signers = &committee[signing_range];
|
let signers = &committee[signing_range];
|
||||||
let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::<Vec<_>>();
|
let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::<Vec<_>>();
|
||||||
builder.sign(signers, &committee_keys, &state.fork, spec);
|
builder.sign(signers, &committee_keys, &state.fork, spec, false);
|
||||||
extra_signer.map(|c_idx| {
|
extra_signer.map(|c_idx| {
|
||||||
let validator_index = committee[c_idx];
|
let validator_index = committee[c_idx];
|
||||||
builder.sign(
|
builder.sign(
|
||||||
@ -608,6 +618,7 @@ mod tests {
|
|||||||
&[&keypairs[validator_index].sk],
|
&[&keypairs[validator_index].sk],
|
||||||
&state.fork,
|
&state.fork,
|
||||||
spec,
|
spec,
|
||||||
|
false,
|
||||||
)
|
)
|
||||||
});
|
});
|
||||||
builder.build()
|
builder.build()
|
||||||
@ -668,15 +679,18 @@ mod tests {
|
|||||||
);
|
);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
att1.aggregation_bitfield.num_set_bits(),
|
att1.aggregation_bits.num_set_bits(),
|
||||||
earliest_attestation_validators(&att1, state).num_set_bits()
|
earliest_attestation_validators(&att1, state).num_set_bits()
|
||||||
);
|
);
|
||||||
state.current_epoch_attestations.push(PendingAttestation {
|
state
|
||||||
aggregation_bitfield: att1.aggregation_bitfield.clone(),
|
.current_epoch_attestations
|
||||||
data: att1.data.clone(),
|
.push(PendingAttestation {
|
||||||
inclusion_delay: 0,
|
aggregation_bits: att1.aggregation_bits.clone(),
|
||||||
proposer_index: 0,
|
data: att1.data.clone(),
|
||||||
});
|
inclusion_delay: 0,
|
||||||
|
proposer_index: 0,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
cc.committee.len() - 2,
|
cc.committee.len() - 2,
|
||||||
@ -728,6 +742,7 @@ mod tests {
|
|||||||
assert_eq!(op_pool.num_attestations(), committees.len());
|
assert_eq!(op_pool.num_attestations(), committees.len());
|
||||||
|
|
||||||
// Before the min attestation inclusion delay, get_attestations shouldn't return anything.
|
// Before the min attestation inclusion delay, get_attestations shouldn't return anything.
|
||||||
|
state.slot -= 1;
|
||||||
assert_eq!(op_pool.get_attestations(state, spec).len(), 0);
|
assert_eq!(op_pool.get_attestations(state, spec).len(), 0);
|
||||||
|
|
||||||
// Then once the delay has elapsed, we should get a single aggregated attestation.
|
// Then once the delay has elapsed, we should get a single aggregated attestation.
|
||||||
@ -738,7 +753,7 @@ mod tests {
|
|||||||
|
|
||||||
let agg_att = &block_attestations[0];
|
let agg_att = &block_attestations[0];
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
agg_att.aggregation_bitfield.num_set_bits(),
|
agg_att.aggregation_bits.num_set_bits(),
|
||||||
spec.target_committee_size as usize
|
spec.target_committee_size as usize
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -854,7 +869,7 @@ mod tests {
|
|||||||
.map(CrosslinkCommittee::into_owned)
|
.map(CrosslinkCommittee::into_owned)
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let max_attestations = spec.max_attestations as usize;
|
let max_attestations = <MainnetEthSpec as EthSpec>::MaxAttestations::to_usize();
|
||||||
let target_committee_size = spec.target_committee_size as usize;
|
let target_committee_size = spec.target_committee_size as usize;
|
||||||
|
|
||||||
let insert_attestations = |cc: &OwnedCrosslinkCommittee, step_size| {
|
let insert_attestations = |cc: &OwnedCrosslinkCommittee, step_size| {
|
||||||
@ -897,7 +912,7 @@ mod tests {
|
|||||||
|
|
||||||
// All the best attestations should be signed by at least `big_step_size` (4) validators.
|
// All the best attestations should be signed by at least `big_step_size` (4) validators.
|
||||||
for att in &best_attestations {
|
for att in &best_attestations {
|
||||||
assert!(att.aggregation_bitfield.num_set_bits() >= big_step_size);
|
assert!(att.aggregation_bits.num_set_bits() >= big_step_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ impl<T> MaxCoverItem<T> {
|
|||||||
///
|
///
|
||||||
/// * Time complexity: `O(limit * items_iter.len())`
|
/// * Time complexity: `O(limit * items_iter.len())`
|
||||||
/// * Space complexity: `O(item_iter.len())`
|
/// * Space complexity: `O(item_iter.len())`
|
||||||
pub fn maximum_cover<'a, I, T>(items_iter: I, limit: usize) -> Vec<T::Object>
|
pub fn maximum_cover<I, T>(items_iter: I, limit: usize) -> Vec<T::Object>
|
||||||
where
|
where
|
||||||
I: IntoIterator<Item = T>,
|
I: IntoIterator<Item = T>,
|
||||||
T: MaxCover,
|
T: MaxCover,
|
||||||
|
@ -9,14 +9,14 @@ use types::*;
|
|||||||
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
|
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
|
||||||
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
|
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
|
||||||
#[derive(Encode, Decode)]
|
#[derive(Encode, Decode)]
|
||||||
pub struct PersistedOperationPool {
|
pub struct PersistedOperationPool<T: EthSpec> {
|
||||||
/// Mapping from attestation ID to attestation mappings.
|
/// Mapping from attestation ID to attestation mappings.
|
||||||
// We could save space by not storing the attestation ID, but it might
|
// We could save space by not storing the attestation ID, but it might
|
||||||
// be difficult to make that roundtrip due to eager aggregation.
|
// be difficult to make that roundtrip due to eager aggregation.
|
||||||
attestations: Vec<(AttestationId, Vec<Attestation>)>,
|
attestations: Vec<(AttestationId, Vec<Attestation<T>>)>,
|
||||||
deposits: Vec<Deposit>,
|
deposits: Vec<(u64, Deposit)>,
|
||||||
/// Attester slashings.
|
/// Attester slashings.
|
||||||
attester_slashings: Vec<AttesterSlashing>,
|
attester_slashings: Vec<AttesterSlashing<T>>,
|
||||||
/// Proposer slashings.
|
/// Proposer slashings.
|
||||||
proposer_slashings: Vec<ProposerSlashing>,
|
proposer_slashings: Vec<ProposerSlashing>,
|
||||||
/// Voluntary exits.
|
/// Voluntary exits.
|
||||||
@ -25,9 +25,9 @@ pub struct PersistedOperationPool {
|
|||||||
transfers: Vec<Transfer>,
|
transfers: Vec<Transfer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PersistedOperationPool {
|
impl<T: EthSpec> PersistedOperationPool<T> {
|
||||||
/// Convert an `OperationPool` into serializable form.
|
/// Convert an `OperationPool` into serializable form.
|
||||||
pub fn from_operation_pool<T: EthSpec>(operation_pool: &OperationPool<T>) -> Self {
|
pub fn from_operation_pool(operation_pool: &OperationPool<T>) -> Self {
|
||||||
let attestations = operation_pool
|
let attestations = operation_pool
|
||||||
.attestations
|
.attestations
|
||||||
.read()
|
.read()
|
||||||
@ -39,7 +39,7 @@ impl PersistedOperationPool {
|
|||||||
.deposits
|
.deposits
|
||||||
.read()
|
.read()
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(_, d)| d.clone())
|
.map(|(index, d)| (*index, d.clone()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let attester_slashings = operation_pool
|
let attester_slashings = operation_pool
|
||||||
@ -76,13 +76,9 @@ impl PersistedOperationPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Reconstruct an `OperationPool`.
|
/// Reconstruct an `OperationPool`.
|
||||||
pub fn into_operation_pool<T: EthSpec>(
|
pub fn into_operation_pool(self, state: &BeaconState<T>, spec: &ChainSpec) -> OperationPool<T> {
|
||||||
self,
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> OperationPool<T> {
|
|
||||||
let attestations = RwLock::new(self.attestations.into_iter().collect());
|
let attestations = RwLock::new(self.attestations.into_iter().collect());
|
||||||
let deposits = RwLock::new(self.deposits.into_iter().map(|d| (d.index, d)).collect());
|
let deposits = RwLock::new(self.deposits.into_iter().collect());
|
||||||
let attester_slashings = RwLock::new(
|
let attester_slashings = RwLock::new(
|
||||||
self.attester_slashings
|
self.attester_slashings
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -4,12 +4,7 @@ version = "0.1.0"
|
|||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[[bench]]
|
|
||||||
name = "benches"
|
|
||||||
harness = false
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.2"
|
|
||||||
env_logger = "0.6.0"
|
env_logger = "0.6.0"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
@ -17,15 +12,10 @@ serde_yaml = "0.8"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../utils/bls" }
|
bls = { path = "../utils/bls" }
|
||||||
fnv = "1.0"
|
|
||||||
hashing = { path = "../utils/hashing" }
|
|
||||||
int_to_bytes = { path = "../utils/int_to_bytes" }
|
|
||||||
integer-sqrt = "0.1"
|
integer-sqrt = "0.1"
|
||||||
itertools = "0.8"
|
itertools = "0.8"
|
||||||
log = "0.4"
|
eth2_ssz_types = { path = "../utils/ssz_types" }
|
||||||
merkle_proof = { path = "../utils/merkle_proof" }
|
merkle_proof = { path = "../utils/merkle_proof" }
|
||||||
eth2_ssz = { path = "../utils/ssz" }
|
|
||||||
eth2_ssz_derive = { path = "../utils/ssz_derive" }
|
|
||||||
tree_hash = { path = "../utils/tree_hash" }
|
tree_hash = { path = "../utils/tree_hash" }
|
||||||
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
|
@ -1,270 +0,0 @@
|
|||||||
use criterion::Criterion;
|
|
||||||
use criterion::{black_box, Benchmark};
|
|
||||||
use state_processing::{
|
|
||||||
per_block_processing,
|
|
||||||
per_block_processing::{
|
|
||||||
process_attestations, process_attester_slashings, process_deposits, process_eth1_data,
|
|
||||||
process_exits, process_proposer_slashings, process_randao, process_transfers,
|
|
||||||
verify_block_signature,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use tree_hash::TreeHash;
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
/// Run the detailed benchmarking suite on the given `BeaconState`.
|
|
||||||
///
|
|
||||||
/// `desc` will be added to the title of each bench.
|
|
||||||
pub fn bench_block_processing(
|
|
||||||
c: &mut Criterion,
|
|
||||||
initial_block: &BeaconBlock,
|
|
||||||
initial_state: &BeaconState,
|
|
||||||
initial_spec: &ChainSpec,
|
|
||||||
desc: &str,
|
|
||||||
) {
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("verify_block_signature", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
verify_block_signature(&mut state, &block, &spec).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_randao", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_randao(&mut state, &block, &spec).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_eth1_data", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_eth1_data(&mut state, &block.eth1_data).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_proposer_slashings", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_proposer_slashings(&mut state, &block.body.proposer_slashings, &spec)
|
|
||||||
.unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_attester_slashings", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_attester_slashings(&mut state, &block.body.attester_slashings, &spec)
|
|
||||||
.unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_attestations", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_attestations(&mut state, &block.body.attestations, &spec).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_deposits", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_deposits(&mut state, &block.body.deposits, &spec).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_exits", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("process_transfers", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_transfers(&mut state, &block.body.transfers, &spec).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state = initial_state.clone();
|
|
||||||
let block = initial_block.clone();
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("per_block_processing", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
per_block_processing(&mut state, &block, &spec).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut state = initial_state.clone();
|
|
||||||
state.drop_cache(RelativeEpoch::Previous);
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("build_previous_state_committee_cache", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
state
|
|
||||||
.build_committee_cache(RelativeEpoch::Previous, &spec)
|
|
||||||
.unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut state = initial_state.clone();
|
|
||||||
state.drop_cache(RelativeEpoch::Current);
|
|
||||||
let spec = initial_spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("build_current_state_committee_cache", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
state
|
|
||||||
.build_committee_cache(RelativeEpoch::Current, &spec)
|
|
||||||
.unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut state = initial_state.clone();
|
|
||||||
state.drop_pubkey_cache();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("build_pubkey_cache", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state.clone(),
|
|
||||||
|mut state| {
|
|
||||||
state.update_pubkey_cache().unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let block = initial_block.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/block_processing", desc),
|
|
||||||
Benchmark::new("tree_hash_block", move |b| {
|
|
||||||
b.iter(|| black_box(block.tree_hash_root()))
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
}
|
|
@ -1,263 +0,0 @@
|
|||||||
use criterion::Criterion;
|
|
||||||
use criterion::{black_box, Benchmark};
|
|
||||||
use state_processing::{
|
|
||||||
per_epoch_processing,
|
|
||||||
per_epoch_processing::{
|
|
||||||
clean_attestations, initialize_validator_statuses, process_crosslinks, process_eth1_data,
|
|
||||||
process_justification, process_rewards_and_penalities, process_validator_registry,
|
|
||||||
update_active_tree_index_roots, update_latest_slashed_balances,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use tree_hash::TreeHash;
|
|
||||||
use types::test_utils::TestingBeaconStateBuilder;
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
pub const BENCHING_SAMPLE_SIZE: usize = 10;
|
|
||||||
pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10;
|
|
||||||
|
|
||||||
/// Run the benchmarking suite on a foundation spec with 16,384 validators.
|
|
||||||
pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) {
|
|
||||||
let spec = ChainSpec::mainnet();
|
|
||||||
|
|
||||||
let mut builder =
|
|
||||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
|
|
||||||
|
|
||||||
// Set the state to be just before an epoch transition.
|
|
||||||
let target_slot = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
|
|
||||||
builder.teleport_to_slot(target_slot, &spec);
|
|
||||||
|
|
||||||
// Builds all caches; benches will not contain shuffling/committee building times.
|
|
||||||
builder.build_caches(&spec).unwrap();
|
|
||||||
|
|
||||||
// Inserts one attestation with full participation for each committee able to include an
|
|
||||||
// attestation in this state.
|
|
||||||
builder.insert_attestations(&spec);
|
|
||||||
|
|
||||||
let (state, _keypairs) = builder.build();
|
|
||||||
|
|
||||||
// Assert that the state has an attestations for each committee that is able to include an
|
|
||||||
// attestation in the state.
|
|
||||||
let committees_per_epoch = spec.get_epoch_committee_count(validator_count);
|
|
||||||
let committees_per_slot = committees_per_epoch / T::slots_per_epoch();
|
|
||||||
let previous_epoch_attestations = committees_per_epoch;
|
|
||||||
let current_epoch_attestations =
|
|
||||||
committees_per_slot * (T::slots_per_epoch() - spec.min_attestation_inclusion_delay);
|
|
||||||
assert_eq!(
|
|
||||||
state.latest_attestations.len() as u64,
|
|
||||||
previous_epoch_attestations + current_epoch_attestations,
|
|
||||||
"The state should have an attestation for each committee."
|
|
||||||
);
|
|
||||||
|
|
||||||
// Assert that we will run the first arm of process_rewards_and_penalities
|
|
||||||
let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch;
|
|
||||||
assert_eq!(
|
|
||||||
epochs_since_finality, 4,
|
|
||||||
"Epochs since finality should be 4"
|
|
||||||
);
|
|
||||||
|
|
||||||
bench_epoch_processing(c, &state, &spec, &format!("{}_validators", validator_count));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the detailed benchmarking suite on the given `BeaconState`.
|
|
||||||
///
|
|
||||||
/// `desc` will be added to the title of each bench.
|
|
||||||
fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) {
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("process_eth1_data", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_eth1_data(&mut state, &spec_clone);
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("initialize_validator_statuses", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
initialize_validator_statuses(&mut state, &spec_clone).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
let attesters = initialize_validator_statuses(&state, &spec).unwrap();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("process_justification", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_justification(&mut state, &attesters.total_balances, &spec_clone);
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(10),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("process_crosslinks", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()),
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
let attesters = initialize_validator_statuses(&state, &spec).unwrap();
|
|
||||||
let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("process_rewards_and_penalties", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| (state_clone.clone(), attesters.clone()),
|
|
||||||
|(mut state, mut attesters)| {
|
|
||||||
process_rewards_and_penalities(
|
|
||||||
&mut state,
|
|
||||||
&mut attesters,
|
|
||||||
&winning_root_for_shards,
|
|
||||||
&spec_clone,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("process_ejections", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
state.process_ejections(&spec_clone);
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("process_validator_registry", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
process_validator_registry(&mut state, &spec_clone).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("update_active_tree_index_roots", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
update_active_tree_index_roots(&mut state, &spec_clone).unwrap();
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("update_latest_slashed_balances", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
update_latest_slashed_balances(&mut state, &spec_clone);
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("clean_attestations", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| {
|
|
||||||
clean_attestations(&mut state, &spec_clone);
|
|
||||||
state
|
|
||||||
},
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
let spec_clone = spec.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("per_epoch_processing", move |b| {
|
|
||||||
b.iter_batched(
|
|
||||||
|| state_clone.clone(),
|
|
||||||
|mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()),
|
|
||||||
criterion::BatchSize::SmallInput,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_clone = state.clone();
|
|
||||||
c.bench(
|
|
||||||
&format!("{}/epoch_processing", desc),
|
|
||||||
Benchmark::new("tree_hash_state", move |b| {
|
|
||||||
b.iter(|| black_box(state_clone.tree_hash_root()))
|
|
||||||
})
|
|
||||||
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
|
|
||||||
);
|
|
||||||
}
|
|
@ -1,103 +0,0 @@
|
|||||||
use block_benching_builder::BlockBenchingBuilder;
|
|
||||||
use criterion::Criterion;
|
|
||||||
use criterion::{criterion_group, criterion_main};
|
|
||||||
use env_logger::{Builder, Env};
|
|
||||||
use log::info;
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
mod bench_block_processing;
|
|
||||||
mod bench_epoch_processing;
|
|
||||||
mod block_benching_builder;
|
|
||||||
|
|
||||||
pub const VALIDATOR_COUNT: usize = 16_384;
|
|
||||||
|
|
||||||
// `LOG_LEVEL == "info"` gives handy messages.
|
|
||||||
pub const LOG_LEVEL: &str = "info";
|
|
||||||
|
|
||||||
/// Build a worst-case block and benchmark processing it.
|
|
||||||
pub fn block_processing_worst_case(c: &mut Criterion) {
|
|
||||||
if LOG_LEVEL != "" {
|
|
||||||
Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init();
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"Building worst case block bench with {} validators",
|
|
||||||
VALIDATOR_COUNT
|
|
||||||
);
|
|
||||||
|
|
||||||
// Use the specifications from the Eth2.0 spec.
|
|
||||||
let spec = ChainSpec::mainnet();
|
|
||||||
|
|
||||||
// Create a builder for configuring the block and state for benching.
|
|
||||||
let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
|
|
||||||
|
|
||||||
// Set the number of included operations to be maximum (e.g., `MAX_ATTESTATIONS`, etc.)
|
|
||||||
bench_builder.maximize_block_operations(&spec);
|
|
||||||
|
|
||||||
// Set the state and block to be in the last slot of the 4th epoch.
|
|
||||||
let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
|
|
||||||
bench_builder.set_slot(last_slot_of_epoch, &spec);
|
|
||||||
|
|
||||||
// Build all the state caches so the build times aren't included in the benches.
|
|
||||||
bench_builder.build_caches(&spec);
|
|
||||||
|
|
||||||
// Generate the block and state then run benches.
|
|
||||||
let (block, state) = bench_builder.build(&spec);
|
|
||||||
bench_block_processing::bench_block_processing(
|
|
||||||
c,
|
|
||||||
&block,
|
|
||||||
&state,
|
|
||||||
&spec,
|
|
||||||
&format!("{}_validators/worst_case", VALIDATOR_COUNT),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build a reasonable-case block and benchmark processing it.
|
|
||||||
pub fn block_processing_reasonable_case(c: &mut Criterion) {
|
|
||||||
info!(
|
|
||||||
"Building reasonable case block bench with {} validators",
|
|
||||||
VALIDATOR_COUNT
|
|
||||||
);
|
|
||||||
|
|
||||||
// Use the specifications from the Eth2.0 spec.
|
|
||||||
let spec = ChainSpec::mainnet();
|
|
||||||
|
|
||||||
// Create a builder for configuring the block and state for benching.
|
|
||||||
let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
|
|
||||||
|
|
||||||
// Set the number of included operations to what we might expect normally.
|
|
||||||
bench_builder.num_proposer_slashings = 0;
|
|
||||||
bench_builder.num_attester_slashings = 0;
|
|
||||||
bench_builder.num_attestations = (spec.shard_count / T::slots_per_epoch()) as usize;
|
|
||||||
bench_builder.num_deposits = 2;
|
|
||||||
bench_builder.num_exits = 2;
|
|
||||||
bench_builder.num_transfers = 2;
|
|
||||||
|
|
||||||
// Set the state and block to be in the last slot of the 4th epoch.
|
|
||||||
let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
|
|
||||||
bench_builder.set_slot(last_slot_of_epoch, &spec);
|
|
||||||
|
|
||||||
// Build all the state caches so the build times aren't included in the benches.
|
|
||||||
bench_builder.build_caches(&spec);
|
|
||||||
|
|
||||||
// Generate the block and state then run benches.
|
|
||||||
let (block, state) = bench_builder.build(&spec);
|
|
||||||
bench_block_processing::bench_block_processing(
|
|
||||||
c,
|
|
||||||
&block,
|
|
||||||
&state,
|
|
||||||
&spec,
|
|
||||||
&format!("{}_validators/reasonable_case", VALIDATOR_COUNT),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn state_processing(c: &mut Criterion) {
|
|
||||||
bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT);
|
|
||||||
}
|
|
||||||
|
|
||||||
criterion_group!(
|
|
||||||
benches,
|
|
||||||
block_processing_reasonable_case,
|
|
||||||
block_processing_worst_case,
|
|
||||||
state_processing
|
|
||||||
);
|
|
||||||
criterion_main!(benches);
|
|
@ -1,175 +0,0 @@
|
|||||||
use log::info;
|
|
||||||
use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder};
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
pub struct BlockBenchingBuilder {
|
|
||||||
pub state_builder: TestingBeaconStateBuilder,
|
|
||||||
pub block_builder: TestingBeaconBlockBuilder,
|
|
||||||
|
|
||||||
pub num_validators: usize,
|
|
||||||
pub num_proposer_slashings: usize,
|
|
||||||
pub num_attester_slashings: usize,
|
|
||||||
pub num_indices_per_slashable_vote: usize,
|
|
||||||
pub num_attestations: usize,
|
|
||||||
pub num_deposits: usize,
|
|
||||||
pub num_exits: usize,
|
|
||||||
pub num_transfers: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockBenchingBuilder {
|
|
||||||
pub fn new(num_validators: usize, spec: &ChainSpec) -> Self {
|
|
||||||
let state_builder =
|
|
||||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec);
|
|
||||||
let block_builder = TestingBeaconBlockBuilder::new(spec);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
state_builder,
|
|
||||||
block_builder,
|
|
||||||
num_validators: 0,
|
|
||||||
num_proposer_slashings: 0,
|
|
||||||
num_attester_slashings: 0,
|
|
||||||
num_indices_per_slashable_vote: spec.max_indices_per_slashable_vote as usize,
|
|
||||||
num_attestations: 0,
|
|
||||||
num_deposits: 0,
|
|
||||||
num_exits: 0,
|
|
||||||
num_transfers: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn maximize_block_operations(&mut self, spec: &ChainSpec) {
|
|
||||||
self.num_proposer_slashings = spec.max_proposer_slashings as usize;
|
|
||||||
self.num_attester_slashings = spec.max_attester_slashings as usize;
|
|
||||||
self.num_indices_per_slashable_vote = spec.max_indices_per_slashable_vote as usize;
|
|
||||||
self.num_attestations = spec.max_attestations as usize;
|
|
||||||
self.num_deposits = spec.max_deposits as usize;
|
|
||||||
self.num_exits = spec.max_voluntary_exits as usize;
|
|
||||||
self.num_transfers = spec.max_transfers as usize;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) {
|
|
||||||
self.state_builder.teleport_to_slot(slot, &spec);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_caches(&mut self, spec: &ChainSpec) {
|
|
||||||
// Builds all caches; benches will not contain shuffling/committee building times.
|
|
||||||
self.state_builder.build_caches(&spec).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(mut self, spec: &ChainSpec) -> (BeaconBlock, BeaconState) {
|
|
||||||
let (mut state, keypairs) = self.state_builder.build();
|
|
||||||
let builder = &mut self.block_builder;
|
|
||||||
|
|
||||||
builder.set_slot(state.slot);
|
|
||||||
|
|
||||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
|
||||||
let keypair = &keypairs[proposer_index];
|
|
||||||
|
|
||||||
builder.set_randao_reveal(&keypair.sk, &state.fork, spec);
|
|
||||||
|
|
||||||
// Used as a stream of validator indices for use in slashings, exits, etc.
|
|
||||||
let mut validators_iter = (0..keypairs.len() as u64).into_iter();
|
|
||||||
|
|
||||||
// Insert `ProposerSlashing` objects.
|
|
||||||
for _ in 0..self.num_proposer_slashings {
|
|
||||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
|
||||||
|
|
||||||
builder.insert_proposer_slashing(
|
|
||||||
validator_index,
|
|
||||||
&keypairs[validator_index as usize].sk,
|
|
||||||
&state.fork,
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"Inserted {} proposer slashings.",
|
|
||||||
builder.block.body.proposer_slashings.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Insert `AttesterSlashing` objects
|
|
||||||
for _ in 0..self.num_attester_slashings {
|
|
||||||
let mut attesters: Vec<u64> = vec![];
|
|
||||||
let mut secret_keys: Vec<&SecretKey> = vec![];
|
|
||||||
|
|
||||||
for _ in 0..self.num_indices_per_slashable_vote {
|
|
||||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
|
||||||
|
|
||||||
attesters.push(validator_index);
|
|
||||||
secret_keys.push(&keypairs[validator_index as usize].sk);
|
|
||||||
}
|
|
||||||
|
|
||||||
builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec);
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"Inserted {} attester slashings.",
|
|
||||||
builder.block.body.attester_slashings.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Insert `Attestation` objects.
|
|
||||||
let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect();
|
|
||||||
builder
|
|
||||||
.insert_attestations(
|
|
||||||
&state,
|
|
||||||
&all_secret_keys,
|
|
||||||
self.num_attestations as usize,
|
|
||||||
spec,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
info!(
|
|
||||||
"Inserted {} attestations.",
|
|
||||||
builder.block.body.attestations.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Insert `Deposit` objects.
|
|
||||||
for i in 0..self.num_deposits {
|
|
||||||
builder.insert_deposit(
|
|
||||||
32_000_000_000,
|
|
||||||
state.deposit_index + (i as u64),
|
|
||||||
&state,
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
info!("Inserted {} deposits.", builder.block.body.deposits.len());
|
|
||||||
|
|
||||||
// Insert the maximum possible number of `Exit` objects.
|
|
||||||
for _ in 0..self.num_exits {
|
|
||||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
|
||||||
|
|
||||||
builder.insert_exit(
|
|
||||||
&state,
|
|
||||||
validator_index,
|
|
||||||
&keypairs[validator_index as usize].sk,
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
info!(
|
|
||||||
"Inserted {} exits.",
|
|
||||||
builder.block.body.voluntary_exits.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
// Insert the maximum possible number of `Transfer` objects.
|
|
||||||
for _ in 0..self.num_transfers {
|
|
||||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
|
||||||
|
|
||||||
// Manually set the validator to be withdrawn.
|
|
||||||
state.validator_registry[validator_index as usize].withdrawable_epoch =
|
|
||||||
state.previous_epoch(spec);
|
|
||||||
|
|
||||||
builder.insert_transfer(
|
|
||||||
&state,
|
|
||||||
validator_index,
|
|
||||||
validator_index,
|
|
||||||
1,
|
|
||||||
keypairs[validator_index as usize].clone(),
|
|
||||||
spec,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
info!("Inserted {} transfers.", builder.block.body.transfers.len());
|
|
||||||
|
|
||||||
let mut block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
|
||||||
|
|
||||||
// Set the eth1 data to be different from the state.
|
|
||||||
block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]);
|
|
||||||
|
|
||||||
(block, state)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
use super::{get_attesting_indices, get_attesting_indices_unsorted};
|
|
||||||
use itertools::{Either, Itertools};
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
/// Convert `attestation` to (almost) indexed-verifiable form.
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
pub fn convert_to_indexed<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
attestation: &Attestation,
|
|
||||||
) -> Result<IndexedAttestation, BeaconStateError> {
|
|
||||||
let attesting_indices =
|
|
||||||
get_attesting_indices(state, &attestation.data, &attestation.aggregation_bitfield)?;
|
|
||||||
|
|
||||||
// We verify the custody bitfield by calling `get_attesting_indices_unsorted` and throwing
|
|
||||||
// away the result. This avoids double-sorting - the partition below takes care of the ordering.
|
|
||||||
get_attesting_indices_unsorted(state, &attestation.data, &attestation.custody_bitfield)?;
|
|
||||||
|
|
||||||
let (custody_bit_0_indices, custody_bit_1_indices) =
|
|
||||||
attesting_indices.into_iter().enumerate().partition_map(
|
|
||||||
|(committee_idx, validator_idx)| match attestation.custody_bitfield.get(committee_idx) {
|
|
||||||
Ok(true) => Either::Right(validator_idx as u64),
|
|
||||||
_ => Either::Left(validator_idx as u64),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(IndexedAttestation {
|
|
||||||
custody_bit_0_indices,
|
|
||||||
custody_bit_1_indices,
|
|
||||||
data: attestation.data.clone(),
|
|
||||||
signature: attestation.signature.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,44 +1,33 @@
|
|||||||
use crate::common::verify_bitfield_length;
|
use std::collections::BTreeSet;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
/// Returns validator indices which participated in the attestation, sorted by increasing index.
|
/// Returns validator indices which participated in the attestation, sorted by increasing index.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.1
|
||||||
pub fn get_attesting_indices<T: EthSpec>(
|
pub fn get_attesting_indices<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
attestation_data: &AttestationData,
|
attestation_data: &AttestationData,
|
||||||
bitfield: &Bitfield,
|
bitlist: &BitList<T::MaxValidatorsPerCommittee>,
|
||||||
) -> Result<Vec<usize>, BeaconStateError> {
|
) -> Result<BTreeSet<usize>, BeaconStateError> {
|
||||||
get_attesting_indices_unsorted(state, attestation_data, bitfield).map(|mut indices| {
|
|
||||||
// Fast unstable sort is safe because validator indices are unique
|
|
||||||
indices.sort_unstable();
|
|
||||||
indices
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns validator indices which participated in the attestation, unsorted.
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
pub fn get_attesting_indices_unsorted<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
attestation_data: &AttestationData,
|
|
||||||
bitfield: &Bitfield,
|
|
||||||
) -> Result<Vec<usize>, BeaconStateError> {
|
|
||||||
let target_relative_epoch =
|
let target_relative_epoch =
|
||||||
RelativeEpoch::from_epoch(state.current_epoch(), attestation_data.target_epoch)?;
|
RelativeEpoch::from_epoch(state.current_epoch(), attestation_data.target.epoch)?;
|
||||||
|
|
||||||
let committee =
|
let committee = state.get_crosslink_committee_for_shard(
|
||||||
state.get_crosslink_committee_for_shard(attestation_data.shard, target_relative_epoch)?;
|
attestation_data.crosslink.shard,
|
||||||
|
target_relative_epoch,
|
||||||
|
)?;
|
||||||
|
|
||||||
if !verify_bitfield_length(&bitfield, committee.committee.len()) {
|
/* TODO(freeze): re-enable this?
|
||||||
|
if bitlist.len() > committee.committee.len() {
|
||||||
return Err(BeaconStateError::InvalidBitfield);
|
return Err(BeaconStateError::InvalidBitfield);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
Ok(committee
|
Ok(committee
|
||||||
.committee
|
.committee
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter_map(|(i, validator_index)| match bitfield.get(i) {
|
.filter_map(|(i, validator_index)| match bitlist.get(i) {
|
||||||
Ok(true) => Some(*validator_index),
|
Ok(true) => Some(*validator_index),
|
||||||
_ => None,
|
_ => None,
|
||||||
})
|
})
|
||||||
|
@ -0,0 +1,56 @@
|
|||||||
|
use tree_hash::TreeHash;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Return the compact committee root at `relative_epoch`.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
|
pub fn get_compact_committees_root<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
relative_epoch: RelativeEpoch,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Hash256, BeaconStateError> {
|
||||||
|
let mut committees =
|
||||||
|
FixedVector::<_, T::ShardCount>::from_elem(CompactCommittee::<T>::default());
|
||||||
|
// FIXME: this is a spec bug, whereby the start shard for the epoch after the next epoch
|
||||||
|
// is mistakenly used. The start shard from the cache SHOULD work.
|
||||||
|
// Waiting on a release to fix https://github.com/ethereum/eth2.0-specs/issues/1315
|
||||||
|
let start_shard = if relative_epoch == RelativeEpoch::Next {
|
||||||
|
state.next_epoch_start_shard(spec)?
|
||||||
|
} else {
|
||||||
|
state.get_epoch_start_shard(relative_epoch)?
|
||||||
|
};
|
||||||
|
|
||||||
|
for committee_number in 0..state.get_committee_count(relative_epoch)? {
|
||||||
|
let shard = (start_shard + committee_number) % T::ShardCount::to_u64();
|
||||||
|
// FIXME: this is a partial workaround for the above, but it only works in the case
|
||||||
|
// where there's a committee for every shard in every epoch. It works for the minimal
|
||||||
|
// tests but not the mainnet ones.
|
||||||
|
let fake_shard = if relative_epoch == RelativeEpoch::Next {
|
||||||
|
(shard + 1) % T::ShardCount::to_u64()
|
||||||
|
} else {
|
||||||
|
shard
|
||||||
|
};
|
||||||
|
|
||||||
|
for &index in state
|
||||||
|
.get_crosslink_committee_for_shard(fake_shard, relative_epoch)?
|
||||||
|
.committee
|
||||||
|
{
|
||||||
|
let validator = state
|
||||||
|
.validators
|
||||||
|
.get(index)
|
||||||
|
.ok_or(BeaconStateError::UnknownValidator)?;
|
||||||
|
committees[shard as usize]
|
||||||
|
.pubkeys
|
||||||
|
.push(validator.pubkey.clone())?;
|
||||||
|
let compact_balance = validator.effective_balance / spec.effective_balance_increment;
|
||||||
|
// `index` (top 6 bytes) + `slashed` (16th bit) + `compact_balance` (bottom 15 bits)
|
||||||
|
let compact_validator: u64 =
|
||||||
|
((index as u64) << 16) + (u64::from(validator.slashed) << 15) + compact_balance;
|
||||||
|
committees[shard as usize]
|
||||||
|
.compact_validators
|
||||||
|
.push(compact_validator)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Hash256::from_slice(&committees.tree_hash_root()))
|
||||||
|
}
|
122
eth2/state_processing/src/common/get_indexed_attestation.rs
Normal file
122
eth2/state_processing/src/common/get_indexed_attestation.rs
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
use super::get_attesting_indices;
|
||||||
|
use crate::per_block_processing::errors::{
|
||||||
|
AttestationInvalid as Invalid, AttestationValidationError as Error,
|
||||||
|
};
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Convert `attestation` to (almost) indexed-verifiable form.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
|
pub fn get_indexed_attestation<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
attestation: &Attestation<T>,
|
||||||
|
) -> Result<IndexedAttestation<T>, Error> {
|
||||||
|
let attesting_indices =
|
||||||
|
get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?;
|
||||||
|
|
||||||
|
let custody_bit_1_indices =
|
||||||
|
get_attesting_indices(state, &attestation.data, &attestation.custody_bits)?;
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
custody_bit_1_indices.is_subset(&attesting_indices),
|
||||||
|
Invalid::CustodyBitfieldNotSubset
|
||||||
|
);
|
||||||
|
|
||||||
|
let custody_bit_0_indices = &attesting_indices - &custody_bit_1_indices;
|
||||||
|
|
||||||
|
Ok(IndexedAttestation {
|
||||||
|
custody_bit_0_indices: VariableList::new(
|
||||||
|
custody_bit_0_indices
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| x as u64)
|
||||||
|
.collect(),
|
||||||
|
)?,
|
||||||
|
custody_bit_1_indices: VariableList::new(
|
||||||
|
custody_bit_1_indices
|
||||||
|
.into_iter()
|
||||||
|
.map(|x| x as u64)
|
||||||
|
.collect(),
|
||||||
|
)?,
|
||||||
|
data: attestation.data.clone(),
|
||||||
|
signature: attestation.signature.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
use itertools::{Either, Itertools};
|
||||||
|
use types::test_utils::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn custody_bitfield_indexing() {
|
||||||
|
let validator_count = 128;
|
||||||
|
let spec = MinimalEthSpec::default_spec();
|
||||||
|
let state_builder =
|
||||||
|
TestingBeaconStateBuilder::<MinimalEthSpec>::from_default_keypairs_file_if_exists(
|
||||||
|
validator_count,
|
||||||
|
&spec,
|
||||||
|
);
|
||||||
|
let (mut state, keypairs) = state_builder.build();
|
||||||
|
state.build_all_caches(&spec).unwrap();
|
||||||
|
state.slot += 1;
|
||||||
|
|
||||||
|
let shard = 0;
|
||||||
|
let cc = state
|
||||||
|
.get_crosslink_committee_for_shard(shard, RelativeEpoch::Current)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Make a third of the validators sign with custody bit 0, a third with custody bit 1
|
||||||
|
// and a third not sign at all.
|
||||||
|
assert!(
|
||||||
|
cc.committee.len() >= 4,
|
||||||
|
"need at least 4 validators per committee for this test to work"
|
||||||
|
);
|
||||||
|
let (mut bit_0_indices, mut bit_1_indices): (Vec<_>, Vec<_>) = cc
|
||||||
|
.committee
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(i, _)| i % 3 != 0)
|
||||||
|
.partition_map(|(i, index)| {
|
||||||
|
if i % 3 == 1 {
|
||||||
|
Either::Left(*index)
|
||||||
|
} else {
|
||||||
|
Either::Right(*index)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assert!(!bit_0_indices.is_empty());
|
||||||
|
assert!(!bit_1_indices.is_empty());
|
||||||
|
|
||||||
|
let bit_0_keys = bit_0_indices
|
||||||
|
.iter()
|
||||||
|
.map(|validator_index| &keypairs[*validator_index].sk)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let bit_1_keys = bit_1_indices
|
||||||
|
.iter()
|
||||||
|
.map(|validator_index| &keypairs[*validator_index].sk)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let mut attestation_builder =
|
||||||
|
TestingAttestationBuilder::new(&state, &cc.committee, cc.slot, shard, &spec);
|
||||||
|
attestation_builder
|
||||||
|
.sign(&bit_0_indices, &bit_0_keys, &state.fork, &spec, false)
|
||||||
|
.sign(&bit_1_indices, &bit_1_keys, &state.fork, &spec, true);
|
||||||
|
let attestation = attestation_builder.build();
|
||||||
|
|
||||||
|
let indexed_attestation = get_indexed_attestation(&state, &attestation).unwrap();
|
||||||
|
|
||||||
|
bit_0_indices.sort();
|
||||||
|
bit_1_indices.sort();
|
||||||
|
|
||||||
|
assert!(indexed_attestation
|
||||||
|
.custody_bit_0_indices
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.eq(bit_0_indices.iter().map(|idx| *idx as u64)));
|
||||||
|
assert!(indexed_attestation
|
||||||
|
.custody_bit_1_indices
|
||||||
|
.iter()
|
||||||
|
.copied()
|
||||||
|
.eq(bit_1_indices.iter().map(|idx| *idx as u64)));
|
||||||
|
}
|
||||||
|
}
|
@ -3,23 +3,23 @@ use types::{BeaconStateError as Error, *};
|
|||||||
|
|
||||||
/// Initiate the exit of the validator of the given `index`.
|
/// Initiate the exit of the validator of the given `index`.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.1
|
||||||
pub fn initiate_validator_exit<T: EthSpec>(
|
pub fn initiate_validator_exit<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
index: usize,
|
index: usize,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if index >= state.validator_registry.len() {
|
if index >= state.validators.len() {
|
||||||
return Err(Error::UnknownValidator);
|
return Err(Error::UnknownValidator);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return if the validator already initiated exit
|
// Return if the validator already initiated exit
|
||||||
if state.validator_registry[index].exit_epoch != spec.far_future_epoch {
|
if state.validators[index].exit_epoch != spec.far_future_epoch {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute exit queue epoch
|
// Compute exit queue epoch
|
||||||
let delayed_epoch = state.get_delayed_activation_exit_epoch(state.current_epoch(), spec);
|
let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec);
|
||||||
let mut exit_queue_epoch = state
|
let mut exit_queue_epoch = state
|
||||||
.exit_cache
|
.exit_cache
|
||||||
.max_epoch()
|
.max_epoch()
|
||||||
@ -31,8 +31,8 @@ pub fn initiate_validator_exit<T: EthSpec>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
state.exit_cache.record_validator_exit(exit_queue_epoch);
|
state.exit_cache.record_validator_exit(exit_queue_epoch);
|
||||||
state.validator_registry[index].exit_epoch = exit_queue_epoch;
|
state.validators[index].exit_epoch = exit_queue_epoch;
|
||||||
state.validator_registry[index].withdrawable_epoch =
|
state.validators[index].withdrawable_epoch =
|
||||||
exit_queue_epoch + spec.min_validator_withdrawability_delay;
|
exit_queue_epoch + spec.min_validator_withdrawability_delay;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
mod convert_to_indexed;
|
|
||||||
mod get_attesting_indices;
|
mod get_attesting_indices;
|
||||||
|
mod get_compact_committees_root;
|
||||||
|
mod get_indexed_attestation;
|
||||||
mod initiate_validator_exit;
|
mod initiate_validator_exit;
|
||||||
mod slash_validator;
|
mod slash_validator;
|
||||||
mod verify_bitfield;
|
|
||||||
|
|
||||||
pub use convert_to_indexed::convert_to_indexed;
|
pub use get_attesting_indices::get_attesting_indices;
|
||||||
pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_unsorted};
|
pub use get_compact_committees_root::get_compact_committees_root;
|
||||||
|
pub use get_indexed_attestation::get_indexed_attestation;
|
||||||
pub use initiate_validator_exit::initiate_validator_exit;
|
pub use initiate_validator_exit::initiate_validator_exit;
|
||||||
pub use slash_validator::slash_validator;
|
pub use slash_validator::slash_validator;
|
||||||
pub use verify_bitfield::verify_bitfield_length;
|
|
||||||
|
@ -1,45 +1,51 @@
|
|||||||
use crate::common::initiate_validator_exit;
|
use crate::common::initiate_validator_exit;
|
||||||
|
use std::cmp;
|
||||||
use types::{BeaconStateError as Error, *};
|
use types::{BeaconStateError as Error, *};
|
||||||
|
|
||||||
/// Slash the validator with index ``index``.
|
/// Slash the validator with index ``index``.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn slash_validator<T: EthSpec>(
|
pub fn slash_validator<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
slashed_index: usize,
|
slashed_index: usize,
|
||||||
opt_whistleblower_index: Option<usize>,
|
opt_whistleblower_index: Option<usize>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if slashed_index >= state.validator_registry.len() || slashed_index >= state.balances.len() {
|
if slashed_index >= state.validators.len() || slashed_index >= state.balances.len() {
|
||||||
return Err(BeaconStateError::UnknownValidator);
|
return Err(BeaconStateError::UnknownValidator);
|
||||||
}
|
}
|
||||||
|
|
||||||
let current_epoch = state.current_epoch();
|
let epoch = state.current_epoch();
|
||||||
|
|
||||||
initiate_validator_exit(state, slashed_index, spec)?;
|
initiate_validator_exit(state, slashed_index, spec)?;
|
||||||
|
|
||||||
state.validator_registry[slashed_index].slashed = true;
|
state.validators[slashed_index].slashed = true;
|
||||||
state.validator_registry[slashed_index].withdrawable_epoch =
|
state.validators[slashed_index].withdrawable_epoch = cmp::max(
|
||||||
current_epoch + Epoch::from(T::latest_slashed_exit_length());
|
state.validators[slashed_index].withdrawable_epoch,
|
||||||
let slashed_balance = state.get_effective_balance(slashed_index, spec)?;
|
epoch + Epoch::from(T::EpochsPerSlashingsVector::to_u64()),
|
||||||
|
);
|
||||||
state.set_slashed_balance(
|
let validator_effective_balance = state.get_effective_balance(slashed_index, spec)?;
|
||||||
current_epoch,
|
state.set_slashings(
|
||||||
state.get_slashed_balance(current_epoch)? + slashed_balance,
|
epoch,
|
||||||
|
state.get_slashings(epoch)? + validator_effective_balance,
|
||||||
)?;
|
)?;
|
||||||
|
safe_sub_assign!(
|
||||||
|
state.balances[slashed_index],
|
||||||
|
validator_effective_balance / spec.min_slashing_penalty_quotient
|
||||||
|
);
|
||||||
|
|
||||||
|
// Apply proposer and whistleblower rewards
|
||||||
let proposer_index =
|
let proposer_index =
|
||||||
state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?;
|
state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?;
|
||||||
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);
|
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);
|
||||||
let whistleblowing_reward = slashed_balance / spec.whistleblowing_reward_quotient;
|
let whistleblower_reward = validator_effective_balance / spec.whistleblower_reward_quotient;
|
||||||
let proposer_reward = whistleblowing_reward / spec.proposer_reward_quotient;
|
let proposer_reward = whistleblower_reward / spec.proposer_reward_quotient;
|
||||||
|
|
||||||
safe_add_assign!(state.balances[proposer_index], proposer_reward);
|
safe_add_assign!(state.balances[proposer_index], proposer_reward);
|
||||||
safe_add_assign!(
|
safe_add_assign!(
|
||||||
state.balances[whistleblower_index],
|
state.balances[whistleblower_index],
|
||||||
whistleblowing_reward.saturating_sub(proposer_reward)
|
whistleblower_reward.saturating_sub(proposer_reward)
|
||||||
);
|
);
|
||||||
safe_sub_assign!(state.balances[slashed_index], whistleblowing_reward);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,79 +0,0 @@
|
|||||||
use types::*;
|
|
||||||
|
|
||||||
/// Verify ``bitfield`` against the ``committee_size``.
|
|
||||||
///
|
|
||||||
/// Is title `verify_bitfield` in spec.
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> bool {
|
|
||||||
if bitfield.num_bytes() != ((committee_size + 7) / 8) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in committee_size..(bitfield.num_bytes() * 8) {
|
|
||||||
if bitfield.get(i).unwrap_or(false) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn bitfield_length() {
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0001]), 4),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(&Bitfield::from_bytes(&[0b0001_0001]), 4),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0000]), 4),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000]), 8),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000, 0b0000_0000]), 16),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000, 0b0000_0000]), 15),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000]), 8),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(
|
|
||||||
&Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000, 0b0000_0000]),
|
|
||||||
8
|
|
||||||
),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
verify_bitfield_length(
|
|
||||||
&Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000, 0b0000_0000]),
|
|
||||||
24
|
|
||||||
),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
73
eth2/state_processing/src/genesis.rs
Normal file
73
eth2/state_processing/src/genesis.rs
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
use super::per_block_processing::{errors::BlockProcessingError, process_deposit};
|
||||||
|
use crate::common::get_compact_committees_root;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
use types::typenum::U4294967296;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Initialize a `BeaconState` from genesis data.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
|
// TODO: this is quite inefficient and we probably want to rethink how we do this
|
||||||
|
pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
|
||||||
|
eth1_block_hash: Hash256,
|
||||||
|
eth1_timestamp: u64,
|
||||||
|
deposits: Vec<Deposit>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<BeaconState<T>, BlockProcessingError> {
|
||||||
|
let genesis_time =
|
||||||
|
eth1_timestamp - eth1_timestamp % spec.seconds_per_day + 2 * spec.seconds_per_day;
|
||||||
|
let eth1_data = Eth1Data {
|
||||||
|
// Temporary deposit root
|
||||||
|
deposit_root: Hash256::zero(),
|
||||||
|
deposit_count: deposits.len() as u64,
|
||||||
|
block_hash: eth1_block_hash,
|
||||||
|
};
|
||||||
|
let mut state = BeaconState::new(genesis_time, eth1_data, spec);
|
||||||
|
|
||||||
|
// Process deposits
|
||||||
|
let leaves: Vec<_> = deposits
|
||||||
|
.iter()
|
||||||
|
.map(|deposit| deposit.data.clone())
|
||||||
|
.collect();
|
||||||
|
for (index, deposit) in deposits.into_iter().enumerate() {
|
||||||
|
let deposit_data_list = VariableList::<_, U4294967296>::from(leaves[..=index].to_vec());
|
||||||
|
state.eth1_data.deposit_root = Hash256::from_slice(&deposit_data_list.tree_hash_root());
|
||||||
|
process_deposit(&mut state, &deposit, spec, true)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process activations
|
||||||
|
for (index, validator) in state.validators.iter_mut().enumerate() {
|
||||||
|
let balance = state.balances[index];
|
||||||
|
validator.effective_balance = std::cmp::min(
|
||||||
|
balance - balance % spec.effective_balance_increment,
|
||||||
|
spec.max_effective_balance,
|
||||||
|
);
|
||||||
|
if validator.effective_balance == spec.max_effective_balance {
|
||||||
|
validator.activation_eligibility_epoch = T::genesis_epoch();
|
||||||
|
validator.activation_epoch = T::genesis_epoch();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we have our validators, initialize the caches (including the committees)
|
||||||
|
state.build_all_caches(spec)?;
|
||||||
|
|
||||||
|
// Populate active_index_roots and compact_committees_roots
|
||||||
|
let indices_list = VariableList::<usize, T::ValidatorRegistryLimit>::from(
|
||||||
|
state.get_active_validator_indices(T::genesis_epoch()),
|
||||||
|
);
|
||||||
|
let active_index_root = Hash256::from_slice(&indices_list.tree_hash_root());
|
||||||
|
let committee_root = get_compact_committees_root(&state, RelativeEpoch::Current, spec)?;
|
||||||
|
state.fill_active_index_roots_with(active_index_root);
|
||||||
|
state.fill_compact_committees_roots_with(committee_root);
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine whether a candidate genesis state is suitable for starting the chain.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.1
|
||||||
|
pub fn is_valid_genesis_state<T: EthSpec>(state: &BeaconState<T>, spec: &ChainSpec) -> bool {
|
||||||
|
state.genesis_time >= spec.min_genesis_time
|
||||||
|
&& state.get_active_validator_indices(T::genesis_epoch()).len() as u64
|
||||||
|
>= spec.min_genesis_active_validator_count
|
||||||
|
}
|
@ -1,56 +0,0 @@
|
|||||||
use super::per_block_processing::{errors::BlockProcessingError, process_deposits};
|
|
||||||
use tree_hash::TreeHash;
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
pub enum GenesisError {
|
|
||||||
BlockProcessingError(BlockProcessingError),
|
|
||||||
BeaconStateError(BeaconStateError),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the genesis `BeaconState`
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
pub fn get_genesis_beacon_state<T: EthSpec>(
|
|
||||||
genesis_validator_deposits: &[Deposit],
|
|
||||||
genesis_time: u64,
|
|
||||||
genesis_eth1_data: Eth1Data,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<BeaconState<T>, BlockProcessingError> {
|
|
||||||
// Get the genesis `BeaconState`
|
|
||||||
let mut state = BeaconState::genesis(genesis_time, genesis_eth1_data, spec);
|
|
||||||
|
|
||||||
// Process genesis deposits.
|
|
||||||
process_deposits(&mut state, genesis_validator_deposits, spec)?;
|
|
||||||
|
|
||||||
// Process genesis activations.
|
|
||||||
for validator in &mut state.validator_registry {
|
|
||||||
if validator.effective_balance >= spec.max_effective_balance {
|
|
||||||
validator.activation_eligibility_epoch = T::genesis_epoch();
|
|
||||||
validator.activation_epoch = T::genesis_epoch();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the current epoch cache is built.
|
|
||||||
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
|
||||||
|
|
||||||
// Set all the active index roots to be the genesis active index root.
|
|
||||||
let active_validator_indices = state
|
|
||||||
.get_cached_active_validator_indices(RelativeEpoch::Current)?
|
|
||||||
.to_vec();
|
|
||||||
let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.tree_hash_root());
|
|
||||||
state.fill_active_index_roots_with(genesis_active_index_root);
|
|
||||||
|
|
||||||
Ok(state)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BlockProcessingError> for GenesisError {
|
|
||||||
fn from(e: BlockProcessingError) -> GenesisError {
|
|
||||||
GenesisError::BlockProcessingError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BeaconStateError> for GenesisError {
|
|
||||||
fn from(e: BeaconStateError) -> GenesisError {
|
|
||||||
GenesisError::BeaconStateError(e)
|
|
||||||
}
|
|
||||||
}
|
|
@ -2,12 +2,12 @@
|
|||||||
mod macros;
|
mod macros;
|
||||||
|
|
||||||
pub mod common;
|
pub mod common;
|
||||||
pub mod get_genesis_state;
|
pub mod genesis;
|
||||||
pub mod per_block_processing;
|
pub mod per_block_processing;
|
||||||
pub mod per_epoch_processing;
|
pub mod per_epoch_processing;
|
||||||
pub mod per_slot_processing;
|
pub mod per_slot_processing;
|
||||||
|
|
||||||
pub use get_genesis_state::get_genesis_beacon_state;
|
pub use genesis::{initialize_beacon_state_from_eth1, is_valid_genesis_state};
|
||||||
pub use per_block_processing::{
|
pub use per_block_processing::{
|
||||||
errors::{BlockInvalid, BlockProcessingError},
|
errors::{BlockInvalid, BlockProcessingError},
|
||||||
per_block_processing, per_block_processing_without_verifying_block_signature,
|
per_block_processing, per_block_processing_without_verifying_block_signature,
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
use crate::common::{initiate_validator_exit, slash_validator};
|
use crate::common::{initiate_validator_exit, slash_validator};
|
||||||
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
|
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::convert::TryInto;
|
||||||
|
use std::iter::FromIterator;
|
||||||
use tree_hash::{SignedRoot, TreeHash};
|
use tree_hash::{SignedRoot, TreeHash};
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
@ -8,30 +11,29 @@ pub use self::verify_attester_slashing::{
|
|||||||
get_slashable_indices, get_slashable_indices_modular, verify_attester_slashing,
|
get_slashable_indices, get_slashable_indices_modular, verify_attester_slashing,
|
||||||
};
|
};
|
||||||
pub use self::verify_proposer_slashing::verify_proposer_slashing;
|
pub use self::verify_proposer_slashing::verify_proposer_slashing;
|
||||||
pub use validate_attestation::{
|
pub use is_valid_indexed_attestation::{
|
||||||
validate_attestation, validate_attestation_time_independent_only,
|
is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature,
|
||||||
validate_attestation_without_signature,
|
};
|
||||||
|
pub use verify_attestation::{
|
||||||
|
verify_attestation, verify_attestation_time_independent_only,
|
||||||
|
verify_attestation_without_signature,
|
||||||
};
|
};
|
||||||
pub use verify_deposit::{
|
pub use verify_deposit::{
|
||||||
get_existing_validator_index, verify_deposit_index, verify_deposit_merkle_proof,
|
get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature,
|
||||||
verify_deposit_signature,
|
|
||||||
};
|
};
|
||||||
pub use verify_exit::{verify_exit, verify_exit_time_independent_only};
|
pub use verify_exit::{verify_exit, verify_exit_time_independent_only};
|
||||||
pub use verify_indexed_attestation::{
|
|
||||||
verify_indexed_attestation, verify_indexed_attestation_without_signature,
|
|
||||||
};
|
|
||||||
pub use verify_transfer::{
|
pub use verify_transfer::{
|
||||||
execute_transfer, verify_transfer, verify_transfer_time_independent_only,
|
execute_transfer, verify_transfer, verify_transfer_time_independent_only,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod block_processing_builder;
|
pub mod block_processing_builder;
|
||||||
pub mod errors;
|
pub mod errors;
|
||||||
|
mod is_valid_indexed_attestation;
|
||||||
pub mod tests;
|
pub mod tests;
|
||||||
mod validate_attestation;
|
mod verify_attestation;
|
||||||
mod verify_attester_slashing;
|
mod verify_attester_slashing;
|
||||||
mod verify_deposit;
|
mod verify_deposit;
|
||||||
mod verify_exit;
|
mod verify_exit;
|
||||||
mod verify_indexed_attestation;
|
|
||||||
mod verify_proposer_slashing;
|
mod verify_proposer_slashing;
|
||||||
mod verify_transfer;
|
mod verify_transfer;
|
||||||
|
|
||||||
@ -40,10 +42,10 @@ mod verify_transfer;
|
|||||||
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
||||||
/// returns an error describing why the block was invalid or how the function failed to execute.
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn per_block_processing<T: EthSpec>(
|
pub fn per_block_processing<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
per_block_processing_signature_optional(state, block, true, spec)
|
per_block_processing_signature_optional(state, block, true, spec)
|
||||||
@ -55,10 +57,10 @@ pub fn per_block_processing<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
||||||
/// returns an error describing why the block was invalid or how the function failed to execute.
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn per_block_processing_without_verifying_block_signature<T: EthSpec>(
|
pub fn per_block_processing_without_verifying_block_signature<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
per_block_processing_signature_optional(state, block, false, spec)
|
per_block_processing_signature_optional(state, block, false, spec)
|
||||||
@ -70,10 +72,10 @@ pub fn per_block_processing_without_verifying_block_signature<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
|
||||||
/// returns an error describing why the block was invalid or how the function failed to execute.
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
fn per_block_processing_signature_optional<T: EthSpec>(
|
fn per_block_processing_signature_optional<T: EthSpec>(
|
||||||
mut state: &mut BeaconState<T>,
|
mut state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock<T>,
|
||||||
should_verify_block_signature: bool,
|
should_verify_block_signature: bool,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -84,7 +86,7 @@ fn per_block_processing_signature_optional<T: EthSpec>(
|
|||||||
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
||||||
|
|
||||||
process_randao(&mut state, &block, &spec)?;
|
process_randao(&mut state, &block, &spec)?;
|
||||||
process_eth1_data(&mut state, &block.body.eth1_data, spec)?;
|
process_eth1_data(&mut state, &block.body.eth1_data)?;
|
||||||
process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?;
|
process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?;
|
||||||
process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?;
|
process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?;
|
||||||
process_attestations(&mut state, &block.body.attestations, spec)?;
|
process_attestations(&mut state, &block.body.attestations, spec)?;
|
||||||
@ -97,10 +99,10 @@ fn per_block_processing_signature_optional<T: EthSpec>(
|
|||||||
|
|
||||||
/// Processes the block header.
|
/// Processes the block header.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_block_header<T: EthSpec>(
|
pub fn process_block_header<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
should_verify_block_signature: bool,
|
should_verify_block_signature: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -109,18 +111,18 @@ pub fn process_block_header<T: EthSpec>(
|
|||||||
let expected_previous_block_root =
|
let expected_previous_block_root =
|
||||||
Hash256::from_slice(&state.latest_block_header.signed_root());
|
Hash256::from_slice(&state.latest_block_header.signed_root());
|
||||||
verify!(
|
verify!(
|
||||||
block.previous_block_root == expected_previous_block_root,
|
block.parent_root == expected_previous_block_root,
|
||||||
Invalid::ParentBlockRootMismatch {
|
Invalid::ParentBlockRootMismatch {
|
||||||
state: expected_previous_block_root,
|
state: expected_previous_block_root,
|
||||||
block: block.previous_block_root,
|
block: block.parent_root,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
state.latest_block_header = block.temporary_block_header(spec);
|
state.latest_block_header = block.temporary_block_header();
|
||||||
|
|
||||||
// Verify proposer is not slashed
|
// Verify proposer is not slashed
|
||||||
let proposer_idx = state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?;
|
let proposer_idx = state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?;
|
||||||
let proposer = &state.validator_registry[proposer_idx];
|
let proposer = &state.validators[proposer_idx];
|
||||||
verify!(!proposer.slashed, Invalid::ProposerSlashed(proposer_idx));
|
verify!(!proposer.slashed, Invalid::ProposerSlashed(proposer_idx));
|
||||||
|
|
||||||
if should_verify_block_signature {
|
if should_verify_block_signature {
|
||||||
@ -132,13 +134,13 @@ pub fn process_block_header<T: EthSpec>(
|
|||||||
|
|
||||||
/// Verifies the signature of a block.
|
/// Verifies the signature of a block.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_block_signature<T: EthSpec>(
|
pub fn verify_block_signature<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let block_proposer = &state.validator_registry
|
let block_proposer = &state.validators
|
||||||
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
|
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
|
||||||
|
|
||||||
let domain = spec.get_domain(
|
let domain = spec.get_domain(
|
||||||
@ -160,16 +162,16 @@ pub fn verify_block_signature<T: EthSpec>(
|
|||||||
/// Verifies the `randao_reveal` against the block's proposer pubkey and updates
|
/// Verifies the `randao_reveal` against the block's proposer pubkey and updates
|
||||||
/// `state.latest_randao_mixes`.
|
/// `state.latest_randao_mixes`.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_randao<T: EthSpec>(
|
pub fn process_randao<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let block_proposer = &state.validator_registry
|
let block_proposer = &state.validators
|
||||||
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
|
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
|
||||||
|
|
||||||
// Verify the RANDAO is a valid signature of the proposer.
|
// Verify RANDAO reveal.
|
||||||
verify!(
|
verify!(
|
||||||
block.body.randao_reveal.verify(
|
block.body.randao_reveal.verify(
|
||||||
&state.current_epoch().tree_hash_root()[..],
|
&state.current_epoch().tree_hash_root()[..],
|
||||||
@ -191,22 +193,21 @@ pub fn process_randao<T: EthSpec>(
|
|||||||
|
|
||||||
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
|
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_eth1_data<T: EthSpec>(
|
pub fn process_eth1_data<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
eth1_data: &Eth1Data,
|
eth1_data: &Eth1Data,
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
state.eth1_data_votes.push(eth1_data.clone());
|
state.eth1_data_votes.push(eth1_data.clone())?;
|
||||||
|
|
||||||
let num_votes = state
|
let num_votes = state
|
||||||
.eth1_data_votes
|
.eth1_data_votes
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|vote| *vote == eth1_data)
|
.filter(|vote| *vote == eth1_data)
|
||||||
.count() as u64;
|
.count();
|
||||||
|
|
||||||
if num_votes * 2 > spec.slots_per_eth1_voting_period {
|
if num_votes * 2 > T::SlotsPerEth1VotingPeriod::to_usize() {
|
||||||
state.latest_eth1_data = eth1_data.clone();
|
state.eth1_data = eth1_data.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -217,17 +218,12 @@ pub fn process_eth1_data<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_proposer_slashings<T: EthSpec>(
|
pub fn process_proposer_slashings<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
proposer_slashings: &[ProposerSlashing],
|
proposer_slashings: &[ProposerSlashing],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
verify!(
|
|
||||||
proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
|
|
||||||
Invalid::MaxProposerSlashingsExceeded
|
|
||||||
);
|
|
||||||
|
|
||||||
// Verify proposer slashings in parallel.
|
// Verify proposer slashings in parallel.
|
||||||
proposer_slashings
|
proposer_slashings
|
||||||
.par_iter()
|
.par_iter()
|
||||||
@ -250,21 +246,15 @@ pub fn process_proposer_slashings<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_attester_slashings<T: EthSpec>(
|
pub fn process_attester_slashings<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
attester_slashings: &[AttesterSlashing],
|
attester_slashings: &[AttesterSlashing<T>],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
verify!(
|
|
||||||
attester_slashings.len() as u64 <= spec.max_attester_slashings,
|
|
||||||
Invalid::MaxAttesterSlashingsExceed
|
|
||||||
);
|
|
||||||
|
|
||||||
// Verify the `IndexedAttestation`s in parallel (these are the resource-consuming objects, not
|
// Verify the `IndexedAttestation`s in parallel (these are the resource-consuming objects, not
|
||||||
// the `AttesterSlashing`s themselves).
|
// the `AttesterSlashing`s themselves).
|
||||||
let mut indexed_attestations: Vec<&IndexedAttestation> =
|
let mut indexed_attestations: Vec<&_> = Vec::with_capacity(attester_slashings.len() * 2);
|
||||||
Vec::with_capacity(attester_slashings.len() * 2);
|
|
||||||
for attester_slashing in attester_slashings {
|
for attester_slashing in attester_slashings {
|
||||||
indexed_attestations.push(&attester_slashing.attestation_1);
|
indexed_attestations.push(&attester_slashing.attestation_1);
|
||||||
indexed_attestations.push(&attester_slashing.attestation_2);
|
indexed_attestations.push(&attester_slashing.attestation_2);
|
||||||
@ -275,7 +265,7 @@ pub fn process_attester_slashings<T: EthSpec>(
|
|||||||
.par_iter()
|
.par_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.try_for_each(|(i, indexed_attestation)| {
|
.try_for_each(|(i, indexed_attestation)| {
|
||||||
verify_indexed_attestation(&state, indexed_attestation, spec)
|
is_valid_indexed_attestation(&state, indexed_attestation, spec)
|
||||||
.map_err(|e| e.into_with_index(i))
|
.map_err(|e| e.into_with_index(i))
|
||||||
})?;
|
})?;
|
||||||
let all_indexed_attestations_have_been_checked = true;
|
let all_indexed_attestations_have_been_checked = true;
|
||||||
@ -308,17 +298,12 @@ pub fn process_attester_slashings<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_attestations<T: EthSpec>(
|
pub fn process_attestations<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
attestations: &[Attestation],
|
attestations: &[Attestation<T>],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
verify!(
|
|
||||||
attestations.len() as u64 <= spec.max_attestations,
|
|
||||||
Invalid::MaxAttestationsExceeded
|
|
||||||
);
|
|
||||||
|
|
||||||
// Ensure the previous epoch cache exists.
|
// Ensure the previous epoch cache exists.
|
||||||
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
||||||
|
|
||||||
@ -327,25 +312,27 @@ pub fn process_attestations<T: EthSpec>(
|
|||||||
.par_iter()
|
.par_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.try_for_each(|(i, attestation)| {
|
.try_for_each(|(i, attestation)| {
|
||||||
validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))
|
verify_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Update the state in series.
|
// Update the state in series.
|
||||||
let proposer_index =
|
let proposer_index =
|
||||||
state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)? as u64;
|
state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)? as u64;
|
||||||
for attestation in attestations {
|
for attestation in attestations {
|
||||||
let attestation_slot = state.get_attestation_slot(&attestation.data)?;
|
let attestation_slot = state.get_attestation_data_slot(&attestation.data)?;
|
||||||
let pending_attestation = PendingAttestation {
|
let pending_attestation = PendingAttestation {
|
||||||
aggregation_bitfield: attestation.aggregation_bitfield.clone(),
|
aggregation_bits: attestation.aggregation_bits.clone(),
|
||||||
data: attestation.data.clone(),
|
data: attestation.data.clone(),
|
||||||
inclusion_delay: (state.slot - attestation_slot).as_u64(),
|
inclusion_delay: (state.slot - attestation_slot).as_u64(),
|
||||||
proposer_index,
|
proposer_index,
|
||||||
};
|
};
|
||||||
|
|
||||||
if attestation.data.target_epoch == state.current_epoch() {
|
if attestation.data.target.epoch == state.current_epoch() {
|
||||||
state.current_epoch_attestations.push(pending_attestation)
|
state.current_epoch_attestations.push(pending_attestation)?;
|
||||||
} else {
|
} else {
|
||||||
state.previous_epoch_attestations.push(pending_attestation)
|
state
|
||||||
|
.previous_epoch_attestations
|
||||||
|
.push(pending_attestation)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,7 +344,7 @@ pub fn process_attestations<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_deposits<T: EthSpec>(
|
pub fn process_deposits<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
deposits: &[Deposit],
|
deposits: &[Deposit],
|
||||||
@ -366,64 +353,87 @@ pub fn process_deposits<T: EthSpec>(
|
|||||||
verify!(
|
verify!(
|
||||||
deposits.len() as u64
|
deposits.len() as u64
|
||||||
== std::cmp::min(
|
== std::cmp::min(
|
||||||
spec.max_deposits,
|
T::MaxDeposits::to_u64(),
|
||||||
state.latest_eth1_data.deposit_count - state.deposit_index
|
state.eth1_data.deposit_count - state.eth1_deposit_index
|
||||||
),
|
),
|
||||||
Invalid::DepositCountInvalid
|
Invalid::DepositCountInvalid
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify deposits in parallel.
|
// Verify merkle proofs in parallel.
|
||||||
deposits
|
deposits
|
||||||
.par_iter()
|
.par_iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.try_for_each(|(i, deposit)| {
|
.try_for_each(|(i, deposit)| {
|
||||||
verify_deposit_merkle_proof(state, deposit, spec).map_err(|e| e.into_with_index(i))
|
verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index + i as u64, spec)
|
||||||
|
.map_err(|e| e.into_with_index(i))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Check `state.deposit_index` and update the state in series.
|
// Update the state in series.
|
||||||
for (i, deposit) in deposits.iter().enumerate() {
|
for deposit in deposits {
|
||||||
verify_deposit_index(state, deposit).map_err(|e| e.into_with_index(i))?;
|
process_deposit(state, deposit, spec, false)?;
|
||||||
|
}
|
||||||
|
|
||||||
state.deposit_index += 1;
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the
|
/// Process a single deposit, optionally verifying its merkle proof.
|
||||||
// depositing validator already exists in the registry.
|
///
|
||||||
state.update_pubkey_cache()?;
|
/// Spec v0.8.1
|
||||||
|
pub fn process_deposit<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
deposit: &Deposit,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
verify_merkle_proof: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let deposit_index = state.eth1_deposit_index as usize;
|
||||||
|
if verify_merkle_proof {
|
||||||
|
verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index, spec)
|
||||||
|
.map_err(|e| e.into_with_index(deposit_index))?;
|
||||||
|
}
|
||||||
|
|
||||||
// Get an `Option<u64>` where `u64` is the validator index if this deposit public key
|
state.eth1_deposit_index += 1;
|
||||||
// already exists in the beacon_state.
|
|
||||||
let validator_index =
|
|
||||||
get_existing_validator_index(state, deposit).map_err(|e| e.into_with_index(i))?;
|
|
||||||
|
|
||||||
let amount = deposit.data.amount;
|
// Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the
|
||||||
|
// depositing validator already exists in the registry.
|
||||||
|
state.update_pubkey_cache()?;
|
||||||
|
|
||||||
if let Some(index) = validator_index {
|
let pubkey: PublicKey = match (&deposit.data.pubkey).try_into() {
|
||||||
// Update the existing validator balance.
|
Err(_) => return Ok(()), //bad public key => return early
|
||||||
safe_add_assign!(state.balances[index as usize], amount);
|
Ok(k) => k,
|
||||||
} else {
|
};
|
||||||
// The signature should be checked for new validators. Return early for a bad
|
// Get an `Option<u64>` where `u64` is the validator index if this deposit public key
|
||||||
// signature.
|
// already exists in the beacon_state.
|
||||||
if verify_deposit_signature(state, deposit, spec).is_err() {
|
let validator_index = get_existing_validator_index(state, &pubkey)
|
||||||
return Ok(());
|
.map_err(|e| e.into_with_index(deposit_index))?;
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new validator.
|
let amount = deposit.data.amount;
|
||||||
let validator = Validator {
|
|
||||||
pubkey: deposit.data.pubkey.clone(),
|
if let Some(index) = validator_index {
|
||||||
withdrawal_credentials: deposit.data.withdrawal_credentials,
|
// Update the existing validator balance.
|
||||||
activation_eligibility_epoch: spec.far_future_epoch,
|
safe_add_assign!(state.balances[index as usize], amount);
|
||||||
activation_epoch: spec.far_future_epoch,
|
} else {
|
||||||
exit_epoch: spec.far_future_epoch,
|
// The signature should be checked for new validators. Return early for a bad
|
||||||
withdrawable_epoch: spec.far_future_epoch,
|
// signature.
|
||||||
effective_balance: std::cmp::min(
|
if verify_deposit_signature(state, deposit, spec, &pubkey).is_err() {
|
||||||
amount - amount % spec.effective_balance_increment,
|
return Ok(());
|
||||||
spec.max_effective_balance,
|
|
||||||
),
|
|
||||||
slashed: false,
|
|
||||||
};
|
|
||||||
state.validator_registry.push(validator);
|
|
||||||
state.balances.push(deposit.data.amount);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create a new validator.
|
||||||
|
let validator = Validator {
|
||||||
|
pubkey,
|
||||||
|
withdrawal_credentials: deposit.data.withdrawal_credentials,
|
||||||
|
activation_eligibility_epoch: spec.far_future_epoch,
|
||||||
|
activation_epoch: spec.far_future_epoch,
|
||||||
|
exit_epoch: spec.far_future_epoch,
|
||||||
|
withdrawable_epoch: spec.far_future_epoch,
|
||||||
|
effective_balance: std::cmp::min(
|
||||||
|
amount - amount % spec.effective_balance_increment,
|
||||||
|
spec.max_effective_balance,
|
||||||
|
),
|
||||||
|
slashed: false,
|
||||||
|
};
|
||||||
|
state.validators.push(validator)?;
|
||||||
|
state.balances.push(deposit.data.amount)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -434,17 +444,12 @@ pub fn process_deposits<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_exits<T: EthSpec>(
|
pub fn process_exits<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
voluntary_exits: &[VoluntaryExit],
|
voluntary_exits: &[VoluntaryExit],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
verify!(
|
|
||||||
voluntary_exits.len() as u64 <= spec.max_voluntary_exits,
|
|
||||||
Invalid::MaxExitsExceeded
|
|
||||||
);
|
|
||||||
|
|
||||||
// Verify exits in parallel.
|
// Verify exits in parallel.
|
||||||
voluntary_exits
|
voluntary_exits
|
||||||
.par_iter()
|
.par_iter()
|
||||||
@ -466,15 +471,16 @@ pub fn process_exits<T: EthSpec>(
|
|||||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn process_transfers<T: EthSpec>(
|
pub fn process_transfers<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
transfers: &[Transfer],
|
transfers: &[Transfer],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
// Verify that there are no duplicate transfers
|
||||||
verify!(
|
verify!(
|
||||||
transfers.len() as u64 <= spec.max_transfers,
|
transfers.len() == HashSet::<_>::from_iter(transfers).len(),
|
||||||
Invalid::MaxTransfersExceed
|
Invalid::DuplicateTransfers
|
||||||
);
|
);
|
||||||
|
|
||||||
transfers
|
transfers
|
||||||
|
@ -4,8 +4,7 @@ use types::*;
|
|||||||
|
|
||||||
pub struct BlockProcessingBuilder<T: EthSpec> {
|
pub struct BlockProcessingBuilder<T: EthSpec> {
|
||||||
pub state_builder: TestingBeaconStateBuilder<T>,
|
pub state_builder: TestingBeaconStateBuilder<T>,
|
||||||
pub block_builder: TestingBeaconBlockBuilder,
|
pub block_builder: TestingBeaconBlockBuilder<T>,
|
||||||
|
|
||||||
pub num_validators: usize,
|
pub num_validators: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,15 +35,15 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
|||||||
randao_sk: Option<SecretKey>,
|
randao_sk: Option<SecretKey>,
|
||||||
previous_block_root: Option<Hash256>,
|
previous_block_root: Option<Hash256>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> (BeaconBlock, BeaconState<T>) {
|
) -> (BeaconBlock<T>, BeaconState<T>) {
|
||||||
let (state, keypairs) = self.state_builder.build();
|
let (state, keypairs) = self.state_builder.build();
|
||||||
let builder = &mut self.block_builder;
|
let builder = &mut self.block_builder;
|
||||||
|
|
||||||
builder.set_slot(state.slot);
|
builder.set_slot(state.slot);
|
||||||
|
|
||||||
match previous_block_root {
|
match previous_block_root {
|
||||||
Some(root) => builder.set_previous_block_root(root),
|
Some(root) => builder.set_parent_root(root),
|
||||||
None => builder.set_previous_block_root(Hash256::from_slice(
|
None => builder.set_parent_root(Hash256::from_slice(
|
||||||
&state.latest_block_header.signed_root(),
|
&state.latest_block_header.signed_root(),
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
@ -55,13 +54,11 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
|||||||
let keypair = &keypairs[proposer_index];
|
let keypair = &keypairs[proposer_index];
|
||||||
|
|
||||||
match randao_sk {
|
match randao_sk {
|
||||||
Some(sk) => builder.set_randao_reveal::<T>(&sk, &state.fork, spec),
|
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec),
|
||||||
None => builder.set_randao_reveal::<T>(&keypair.sk, &state.fork, spec),
|
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec),
|
||||||
}
|
}
|
||||||
|
|
||||||
let block = self
|
let block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||||
.block_builder
|
|
||||||
.build::<T>(&keypair.sk, &state.fork, spec);
|
|
||||||
|
|
||||||
(block, state)
|
(block, state)
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,8 @@ pub enum BlockProcessingError {
|
|||||||
Invalid(BlockInvalid),
|
Invalid(BlockInvalid),
|
||||||
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
|
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
|
||||||
BeaconStateError(BeaconStateError),
|
BeaconStateError(BeaconStateError),
|
||||||
|
/// Encountered an `ssz_types::Error` whilst attempting to determine validity.
|
||||||
|
SszTypesError(ssz_types::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl_from_beacon_state_error!(BlockProcessingError);
|
impl_from_beacon_state_error!(BlockProcessingError);
|
||||||
@ -78,6 +80,7 @@ pub enum BlockInvalid {
|
|||||||
MaxAttesterSlashingsExceed,
|
MaxAttesterSlashingsExceed,
|
||||||
MaxProposerSlashingsExceeded,
|
MaxProposerSlashingsExceeded,
|
||||||
DepositCountInvalid,
|
DepositCountInvalid,
|
||||||
|
DuplicateTransfers,
|
||||||
MaxExitsExceeded,
|
MaxExitsExceeded,
|
||||||
MaxTransfersExceed,
|
MaxTransfersExceed,
|
||||||
AttestationInvalid(usize, AttestationInvalid),
|
AttestationInvalid(usize, AttestationInvalid),
|
||||||
@ -92,6 +95,15 @@ pub enum BlockInvalid {
|
|||||||
DepositProcessingFailed(usize),
|
DepositProcessingFailed(usize),
|
||||||
ExitInvalid(usize, ExitInvalid),
|
ExitInvalid(usize, ExitInvalid),
|
||||||
TransferInvalid(usize, TransferInvalid),
|
TransferInvalid(usize, TransferInvalid),
|
||||||
|
// NOTE: this is only used in tests, normally a state root mismatch is handled
|
||||||
|
// in the beacon_chain rather than in state_processing
|
||||||
|
StateRootMismatch,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ssz_types::Error> for BlockProcessingError {
|
||||||
|
fn from(error: ssz_types::Error) -> Self {
|
||||||
|
BlockProcessingError::SszTypesError(error)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Into<BlockProcessingError> for BlockInvalid {
|
impl Into<BlockProcessingError> for BlockInvalid {
|
||||||
@ -116,8 +128,8 @@ pub enum AttestationValidationError {
|
|||||||
/// Describes why an object is invalid.
|
/// Describes why an object is invalid.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum AttestationInvalid {
|
pub enum AttestationInvalid {
|
||||||
/// Attestation references a pre-genesis slot.
|
/// Shard exceeds SHARD_COUNT.
|
||||||
PreGenesis { genesis: Slot, attestation: Slot },
|
BadShard,
|
||||||
/// Attestation included before the inclusion delay.
|
/// Attestation included before the inclusion delay.
|
||||||
IncludedTooEarly {
|
IncludedTooEarly {
|
||||||
state: Slot,
|
state: Slot,
|
||||||
@ -128,27 +140,23 @@ pub enum AttestationInvalid {
|
|||||||
IncludedTooLate { state: Slot, attestation: Slot },
|
IncludedTooLate { state: Slot, attestation: Slot },
|
||||||
/// Attestation target epoch does not match the current or previous epoch.
|
/// Attestation target epoch does not match the current or previous epoch.
|
||||||
BadTargetEpoch,
|
BadTargetEpoch,
|
||||||
/// Attestation justified epoch does not match the states current or previous justified epoch.
|
/// Attestation justified checkpoint doesn't match the state's current or previous justified
|
||||||
|
/// checkpoint.
|
||||||
///
|
///
|
||||||
/// `is_current` is `true` if the attestation was compared to the
|
/// `is_current` is `true` if the attestation was compared to the
|
||||||
/// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`.
|
/// `state.current_justified_checkpoint`, `false` if compared to `state.previous_justified_checkpoint`.
|
||||||
WrongJustifiedEpoch {
|
WrongJustifiedCheckpoint {
|
||||||
state: Epoch,
|
state: Checkpoint,
|
||||||
attestation: Epoch,
|
attestation: Checkpoint,
|
||||||
is_current: bool,
|
|
||||||
},
|
|
||||||
/// Attestation justified epoch root does not match root known to the state.
|
|
||||||
///
|
|
||||||
/// `is_current` is `true` if the attestation was compared to the
|
|
||||||
/// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`.
|
|
||||||
WrongJustifiedRoot {
|
|
||||||
state: Hash256,
|
|
||||||
attestation: Hash256,
|
|
||||||
is_current: bool,
|
is_current: bool,
|
||||||
},
|
},
|
||||||
/// Attestation crosslink root does not match the state crosslink root for the attestations
|
/// Attestation crosslink root does not match the state crosslink root for the attestations
|
||||||
/// slot.
|
/// slot.
|
||||||
BadPreviousCrosslink,
|
BadParentCrosslinkHash,
|
||||||
|
/// Attestation crosslink start epoch does not match the end epoch of the state crosslink.
|
||||||
|
BadParentCrosslinkStartEpoch,
|
||||||
|
/// Attestation crosslink end epoch does not match the expected value.
|
||||||
|
BadParentCrosslinkEndEpoch,
|
||||||
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
|
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
|
||||||
CustodyBitfieldHasSetBits,
|
CustodyBitfieldHasSetBits,
|
||||||
/// There are no set bits on the attestation -- an attestation must be signed by at least one
|
/// There are no set bits on the attestation -- an attestation must be signed by at least one
|
||||||
@ -164,6 +172,8 @@ pub enum AttestationInvalid {
|
|||||||
committee_len: usize,
|
committee_len: usize,
|
||||||
bitfield_len: usize,
|
bitfield_len: usize,
|
||||||
},
|
},
|
||||||
|
/// The bits set in the custody bitfield are not a subset of those set in the aggregation bits.
|
||||||
|
CustodyBitfieldNotSubset,
|
||||||
/// There was no known committee in this `epoch` for the given shard and slot.
|
/// There was no known committee in this `epoch` for the given shard and slot.
|
||||||
NoCommitteeForShard { shard: u64, slot: Slot },
|
NoCommitteeForShard { shard: u64, slot: Slot },
|
||||||
/// The validator index was unknown.
|
/// The validator index was unknown.
|
||||||
@ -186,6 +196,12 @@ impl From<IndexedAttestationValidationError> for AttestationValidationError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ssz_types::Error> for AttestationValidationError {
|
||||||
|
fn from(error: ssz_types::Error) -> Self {
|
||||||
|
Self::from(IndexedAttestationValidationError::from(error))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* `AttesterSlashing` Validation
|
* `AttesterSlashing` Validation
|
||||||
*/
|
*/
|
||||||
@ -239,15 +255,17 @@ pub enum IndexedAttestationInvalid {
|
|||||||
CustodyBitValidatorsIntersect,
|
CustodyBitValidatorsIntersect,
|
||||||
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
|
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
|
||||||
CustodyBitfieldHasSetBits,
|
CustodyBitfieldHasSetBits,
|
||||||
|
/// The custody bitfield violated a type-level bound.
|
||||||
|
CustodyBitfieldBoundsError(ssz_types::Error),
|
||||||
/// No validator indices were specified.
|
/// No validator indices were specified.
|
||||||
NoValidatorIndices,
|
NoValidatorIndices,
|
||||||
/// The number of indices exceeds the global maximum.
|
/// The number of indices exceeds the global maximum.
|
||||||
///
|
///
|
||||||
/// (max_indices, indices_given)
|
/// (max_indices, indices_given)
|
||||||
MaxIndicesExceed(u64, usize),
|
MaxIndicesExceed(usize, usize),
|
||||||
/// The validator indices were not in increasing order.
|
/// The validator indices were not in increasing order.
|
||||||
///
|
///
|
||||||
/// The error occured between the given `index` and `index + 1`
|
/// The error occurred between the given `index` and `index + 1`
|
||||||
BadValidatorIndicesOrdering(usize),
|
BadValidatorIndicesOrdering(usize),
|
||||||
/// The validator index is unknown. One cannot slash one who does not exist.
|
/// The validator index is unknown. One cannot slash one who does not exist.
|
||||||
UnknownValidator(u64),
|
UnknownValidator(u64),
|
||||||
@ -263,6 +281,14 @@ impl Into<IndexedAttestationInvalid> for IndexedAttestationValidationError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ssz_types::Error> for IndexedAttestationValidationError {
|
||||||
|
fn from(error: ssz_types::Error) -> Self {
|
||||||
|
IndexedAttestationValidationError::Invalid(
|
||||||
|
IndexedAttestationInvalid::CustodyBitfieldBoundsError(error),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl_into_with_index_without_beacon_error!(
|
impl_into_with_index_without_beacon_error!(
|
||||||
IndexedAttestationValidationError,
|
IndexedAttestationValidationError,
|
||||||
IndexedAttestationInvalid
|
IndexedAttestationInvalid
|
||||||
@ -323,6 +349,8 @@ pub enum DepositInvalid {
|
|||||||
BadIndex { state: u64, deposit: u64 },
|
BadIndex { state: u64, deposit: u64 },
|
||||||
/// The signature (proof-of-possession) does not match the given pubkey.
|
/// The signature (proof-of-possession) does not match the given pubkey.
|
||||||
BadSignature,
|
BadSignature,
|
||||||
|
/// The signature does not represent a valid BLS signature.
|
||||||
|
BadSignatureBytes,
|
||||||
/// The specified `branch` and `index` did not form a valid proof that the deposit is included
|
/// The specified `branch` and `index` did not form a valid proof that the deposit is included
|
||||||
/// in the eth1 deposit root.
|
/// in the eth1 deposit root.
|
||||||
BadMerkleProof,
|
BadMerkleProof,
|
||||||
@ -356,7 +384,10 @@ pub enum ExitInvalid {
|
|||||||
/// The exit is for a future epoch.
|
/// The exit is for a future epoch.
|
||||||
FutureEpoch { state: Epoch, exit: Epoch },
|
FutureEpoch { state: Epoch, exit: Epoch },
|
||||||
/// The validator has not been active for long enough.
|
/// The validator has not been active for long enough.
|
||||||
TooYoungToLeave { lifespan: Epoch, expected: u64 },
|
TooYoungToExit {
|
||||||
|
current_epoch: Epoch,
|
||||||
|
earliest_exit_epoch: Epoch,
|
||||||
|
},
|
||||||
/// The exit signature was not signed by the validator.
|
/// The exit signature was not signed by the validator.
|
||||||
BadSignature,
|
BadSignature,
|
||||||
}
|
}
|
||||||
@ -413,7 +444,7 @@ pub enum TransferInvalid {
|
|||||||
/// The `transfer.from` validator has been activated and is not withdrawable.
|
/// The `transfer.from` validator has been activated and is not withdrawable.
|
||||||
///
|
///
|
||||||
/// (from_validator)
|
/// (from_validator)
|
||||||
FromValidatorIneligableForTransfer(u64),
|
FromValidatorIneligibleForTransfer(u64),
|
||||||
/// The validators withdrawal credentials do not match `transfer.pubkey`.
|
/// The validators withdrawal credentials do not match `transfer.pubkey`.
|
||||||
///
|
///
|
||||||
/// (state_credentials, transfer_pubkey_credentials)
|
/// (state_credentials, transfer_pubkey_credentials)
|
||||||
|
@ -8,60 +8,58 @@ use types::*;
|
|||||||
|
|
||||||
/// Verify an `IndexedAttestation`.
|
/// Verify an `IndexedAttestation`.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_indexed_attestation<T: EthSpec>(
|
pub fn is_valid_indexed_attestation<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
indexed_attestation: &IndexedAttestation,
|
indexed_attestation: &IndexedAttestation<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
verify_indexed_attestation_parametric(state, indexed_attestation, spec, true)
|
is_valid_indexed_attestation_parametric(state, indexed_attestation, spec, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify but don't check the signature.
|
/// Verify but don't check the signature.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_indexed_attestation_without_signature<T: EthSpec>(
|
pub fn is_valid_indexed_attestation_without_signature<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
indexed_attestation: &IndexedAttestation,
|
indexed_attestation: &IndexedAttestation<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
verify_indexed_attestation_parametric(state, indexed_attestation, spec, false)
|
is_valid_indexed_attestation_parametric(state, indexed_attestation, spec, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Optionally check the signature.
|
/// Optionally check the signature.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
fn verify_indexed_attestation_parametric<T: EthSpec>(
|
fn is_valid_indexed_attestation_parametric<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
indexed_attestation: &IndexedAttestation,
|
indexed_attestation: &IndexedAttestation<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
verify_signature: bool,
|
verify_signature: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let custody_bit_0_indices = &indexed_attestation.custody_bit_0_indices;
|
let bit_0_indices = &indexed_attestation.custody_bit_0_indices;
|
||||||
let custody_bit_1_indices = &indexed_attestation.custody_bit_1_indices;
|
let bit_1_indices = &indexed_attestation.custody_bit_1_indices;
|
||||||
|
|
||||||
// Ensure no duplicate indices across custody bits
|
// Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
||||||
|
verify!(bit_1_indices.is_empty(), Invalid::CustodyBitfieldHasSetBits);
|
||||||
|
|
||||||
|
// Verify max number of indices
|
||||||
|
let total_indices = bit_0_indices.len() + bit_1_indices.len();
|
||||||
|
verify!(
|
||||||
|
total_indices <= T::MaxValidatorsPerCommittee::to_usize(),
|
||||||
|
Invalid::MaxIndicesExceed(T::MaxValidatorsPerCommittee::to_usize(), total_indices)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify index sets are disjoint
|
||||||
let custody_bit_intersection: HashSet<&u64> =
|
let custody_bit_intersection: HashSet<&u64> =
|
||||||
&HashSet::from_iter(custody_bit_0_indices) & &HashSet::from_iter(custody_bit_1_indices);
|
&HashSet::from_iter(bit_0_indices.iter()) & &HashSet::from_iter(bit_1_indices.iter());
|
||||||
verify!(
|
verify!(
|
||||||
custody_bit_intersection.is_empty(),
|
custody_bit_intersection.is_empty(),
|
||||||
Invalid::CustodyBitValidatorsIntersect
|
Invalid::CustodyBitValidatorsIntersect
|
||||||
);
|
);
|
||||||
|
|
||||||
// Check that nobody signed with custody bit 1 (to be removed in phase 1)
|
|
||||||
if !custody_bit_1_indices.is_empty() {
|
|
||||||
invalid!(Invalid::CustodyBitfieldHasSetBits);
|
|
||||||
}
|
|
||||||
|
|
||||||
let total_indices = custody_bit_0_indices.len() + custody_bit_1_indices.len();
|
|
||||||
verify!(1 <= total_indices, Invalid::NoValidatorIndices);
|
|
||||||
verify!(
|
|
||||||
total_indices as u64 <= spec.max_indices_per_attestation,
|
|
||||||
Invalid::MaxIndicesExceed(spec.max_indices_per_attestation, total_indices)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check that both vectors of indices are sorted
|
// Check that both vectors of indices are sorted
|
||||||
let check_sorted = |list: &Vec<u64>| {
|
let check_sorted = |list: &[u64]| -> Result<(), Error> {
|
||||||
list.windows(2).enumerate().try_for_each(|(i, pair)| {
|
list.windows(2).enumerate().try_for_each(|(i, pair)| {
|
||||||
if pair[0] >= pair[1] {
|
if pair[0] >= pair[1] {
|
||||||
invalid!(Invalid::BadValidatorIndicesOrdering(i));
|
invalid!(Invalid::BadValidatorIndicesOrdering(i));
|
||||||
@ -71,11 +69,11 @@ fn verify_indexed_attestation_parametric<T: EthSpec>(
|
|||||||
})?;
|
})?;
|
||||||
Ok(())
|
Ok(())
|
||||||
};
|
};
|
||||||
check_sorted(custody_bit_0_indices)?;
|
check_sorted(&bit_0_indices)?;
|
||||||
check_sorted(custody_bit_1_indices)?;
|
check_sorted(&bit_1_indices)?;
|
||||||
|
|
||||||
if verify_signature {
|
if verify_signature {
|
||||||
verify_indexed_attestation_signature(state, indexed_attestation, spec)?;
|
is_valid_indexed_attestation_signature(state, indexed_attestation, spec)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -94,7 +92,7 @@ where
|
|||||||
AggregatePublicKey::new(),
|
AggregatePublicKey::new(),
|
||||||
|mut aggregate_pubkey, &validator_idx| {
|
|mut aggregate_pubkey, &validator_idx| {
|
||||||
state
|
state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(validator_idx as usize)
|
.get(validator_idx as usize)
|
||||||
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(validator_idx)))
|
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(validator_idx)))
|
||||||
.map(|validator| {
|
.map(|validator| {
|
||||||
@ -107,10 +105,10 @@ where
|
|||||||
|
|
||||||
/// Verify the signature of an IndexedAttestation.
|
/// Verify the signature of an IndexedAttestation.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
fn verify_indexed_attestation_signature<T: EthSpec>(
|
fn is_valid_indexed_attestation_signature<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
indexed_attestation: &IndexedAttestation,
|
indexed_attestation: &IndexedAttestation<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let bit_0_pubkey = create_aggregate_pubkey(state, &indexed_attestation.custody_bit_0_indices)?;
|
let bit_0_pubkey = create_aggregate_pubkey(state, &indexed_attestation.custody_bit_0_indices)?;
|
||||||
@ -127,20 +125,11 @@ fn verify_indexed_attestation_signature<T: EthSpec>(
|
|||||||
}
|
}
|
||||||
.tree_hash_root();
|
.tree_hash_root();
|
||||||
|
|
||||||
let mut messages = vec![];
|
let messages = vec![&message_0[..], &message_1[..]];
|
||||||
let mut keys = vec![];
|
let keys = vec![&bit_0_pubkey, &bit_1_pubkey];
|
||||||
|
|
||||||
if !indexed_attestation.custody_bit_0_indices.is_empty() {
|
|
||||||
messages.push(&message_0[..]);
|
|
||||||
keys.push(&bit_0_pubkey);
|
|
||||||
}
|
|
||||||
if !indexed_attestation.custody_bit_1_indices.is_empty() {
|
|
||||||
messages.push(&message_1[..]);
|
|
||||||
keys.push(&bit_1_pubkey);
|
|
||||||
}
|
|
||||||
|
|
||||||
let domain = spec.get_domain(
|
let domain = spec.get_domain(
|
||||||
indexed_attestation.data.target_epoch,
|
indexed_attestation.data.target.epoch,
|
||||||
Domain::Attestation,
|
Domain::Attestation,
|
||||||
&state.fork,
|
&state.fork,
|
||||||
);
|
);
|
@ -51,7 +51,7 @@ fn invalid_parent_block_root() {
|
|||||||
Err(BlockProcessingError::Invalid(
|
Err(BlockProcessingError::Invalid(
|
||||||
BlockInvalid::ParentBlockRootMismatch {
|
BlockInvalid::ParentBlockRootMismatch {
|
||||||
state: Hash256::from_slice(&state.latest_block_header.signed_root()),
|
state: Hash256::from_slice(&state.latest_block_header.signed_root()),
|
||||||
block: block.previous_block_root
|
block: block.parent_root
|
||||||
}
|
}
|
||||||
))
|
))
|
||||||
);
|
);
|
||||||
|
@ -1,156 +0,0 @@
|
|||||||
use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error};
|
|
||||||
use crate::common::convert_to_indexed;
|
|
||||||
use crate::per_block_processing::{
|
|
||||||
verify_indexed_attestation, verify_indexed_attestation_without_signature,
|
|
||||||
};
|
|
||||||
use tree_hash::TreeHash;
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
|
||||||
/// given state.
|
|
||||||
///
|
|
||||||
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
pub fn validate_attestation<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
attestation: &Attestation,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
validate_attestation_parametric(state, attestation, spec, true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Like `validate_attestation` but doesn't run checks which may become true in future states.
|
|
||||||
pub fn validate_attestation_time_independent_only<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
attestation: &Attestation,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
validate_attestation_parametric(state, attestation, spec, true, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
|
||||||
/// given state, without validating the aggregate signature.
|
|
||||||
///
|
|
||||||
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
pub fn validate_attestation_without_signature<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
attestation: &Attestation,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
validate_attestation_parametric(state, attestation, spec, false, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
|
||||||
/// given state, optionally validating the aggregate signature.
|
|
||||||
///
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
fn validate_attestation_parametric<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
attestation: &Attestation,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
verify_signature: bool,
|
|
||||||
time_independent_only: bool,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let attestation_slot = state.get_attestation_slot(&attestation.data)?;
|
|
||||||
|
|
||||||
// Check attestation slot.
|
|
||||||
verify!(
|
|
||||||
time_independent_only
|
|
||||||
|| attestation_slot + spec.min_attestation_inclusion_delay <= state.slot,
|
|
||||||
Invalid::IncludedTooEarly {
|
|
||||||
state: state.slot,
|
|
||||||
delay: spec.min_attestation_inclusion_delay,
|
|
||||||
attestation: attestation_slot
|
|
||||||
}
|
|
||||||
);
|
|
||||||
verify!(
|
|
||||||
state.slot <= attestation_slot + T::slots_per_epoch(),
|
|
||||||
Invalid::IncludedTooLate {
|
|
||||||
state: state.slot,
|
|
||||||
attestation: attestation_slot
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
// Verify the Casper FFG vote.
|
|
||||||
if !time_independent_only {
|
|
||||||
verify_casper_ffg_vote(attestation, state)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Crosslink data root is zero (to be removed in phase 1).
|
|
||||||
verify!(
|
|
||||||
attestation.data.crosslink_data_root == spec.zero_hash,
|
|
||||||
Invalid::ShardBlockRootNotZero
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check signature and bitfields
|
|
||||||
let indexed_attestation = convert_to_indexed(state, attestation)?;
|
|
||||||
if verify_signature {
|
|
||||||
verify_indexed_attestation(state, &indexed_attestation, spec)?;
|
|
||||||
} else {
|
|
||||||
verify_indexed_attestation_without_signature(state, &indexed_attestation, spec)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check target epoch, source epoch, source root, and source crosslink.
|
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
fn verify_casper_ffg_vote<T: EthSpec>(
|
|
||||||
attestation: &Attestation,
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let data = &attestation.data;
|
|
||||||
if data.target_epoch == state.current_epoch() {
|
|
||||||
verify!(
|
|
||||||
data.source_epoch == state.current_justified_epoch,
|
|
||||||
Invalid::WrongJustifiedEpoch {
|
|
||||||
state: state.current_justified_epoch,
|
|
||||||
attestation: data.source_epoch,
|
|
||||||
is_current: true,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
verify!(
|
|
||||||
data.source_root == state.current_justified_root,
|
|
||||||
Invalid::WrongJustifiedRoot {
|
|
||||||
state: state.current_justified_root,
|
|
||||||
attestation: data.source_root,
|
|
||||||
is_current: true,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
verify!(
|
|
||||||
data.previous_crosslink_root
|
|
||||||
== Hash256::from_slice(&state.get_current_crosslink(data.shard)?.tree_hash_root()),
|
|
||||||
Invalid::BadPreviousCrosslink
|
|
||||||
);
|
|
||||||
} else if data.target_epoch == state.previous_epoch() {
|
|
||||||
verify!(
|
|
||||||
data.source_epoch == state.previous_justified_epoch,
|
|
||||||
Invalid::WrongJustifiedEpoch {
|
|
||||||
state: state.previous_justified_epoch,
|
|
||||||
attestation: data.source_epoch,
|
|
||||||
is_current: false,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
verify!(
|
|
||||||
data.source_root == state.previous_justified_root,
|
|
||||||
Invalid::WrongJustifiedRoot {
|
|
||||||
state: state.previous_justified_root,
|
|
||||||
attestation: data.source_root,
|
|
||||||
is_current: false,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
verify!(
|
|
||||||
data.previous_crosslink_root
|
|
||||||
== Hash256::from_slice(&state.get_previous_crosslink(data.shard)?.tree_hash_root()),
|
|
||||||
Invalid::BadPreviousCrosslink
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
invalid!(Invalid::BadTargetEpoch)
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -0,0 +1,156 @@
|
|||||||
|
use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error};
|
||||||
|
use crate::common::get_indexed_attestation;
|
||||||
|
use crate::per_block_processing::{
|
||||||
|
is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature,
|
||||||
|
};
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
||||||
|
/// given state.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
|
pub fn verify_attestation<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
attestation: &Attestation<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify_attestation_parametric(state, attestation, spec, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Like `verify_attestation` but doesn't run checks which may become true in future states.
|
||||||
|
pub fn verify_attestation_time_independent_only<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
attestation: &Attestation<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify_attestation_parametric(state, attestation, spec, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
||||||
|
/// given state, without validating the aggregate signature.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
|
pub fn verify_attestation_without_signature<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
attestation: &Attestation<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
verify_attestation_parametric(state, attestation, spec, false, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the
|
||||||
|
/// given state, optionally validating the aggregate signature.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
|
fn verify_attestation_parametric<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
attestation: &Attestation<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
verify_signature: bool,
|
||||||
|
time_independent_only: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let data = &attestation.data;
|
||||||
|
verify!(
|
||||||
|
data.crosslink.shard < T::ShardCount::to_u64(),
|
||||||
|
Invalid::BadShard
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check attestation slot.
|
||||||
|
let attestation_slot = state.get_attestation_data_slot(&data)?;
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
time_independent_only
|
||||||
|
|| attestation_slot + spec.min_attestation_inclusion_delay <= state.slot,
|
||||||
|
Invalid::IncludedTooEarly {
|
||||||
|
state: state.slot,
|
||||||
|
delay: spec.min_attestation_inclusion_delay,
|
||||||
|
attestation: attestation_slot
|
||||||
|
}
|
||||||
|
);
|
||||||
|
verify!(
|
||||||
|
state.slot <= attestation_slot + T::slots_per_epoch(),
|
||||||
|
Invalid::IncludedTooLate {
|
||||||
|
state: state.slot,
|
||||||
|
attestation: attestation_slot
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify the Casper FFG vote and crosslink data.
|
||||||
|
if !time_independent_only {
|
||||||
|
let parent_crosslink = verify_casper_ffg_vote(attestation, state)?;
|
||||||
|
|
||||||
|
verify!(
|
||||||
|
data.crosslink.parent_root == Hash256::from_slice(&parent_crosslink.tree_hash_root()),
|
||||||
|
Invalid::BadParentCrosslinkHash
|
||||||
|
);
|
||||||
|
verify!(
|
||||||
|
data.crosslink.start_epoch == parent_crosslink.end_epoch,
|
||||||
|
Invalid::BadParentCrosslinkStartEpoch
|
||||||
|
);
|
||||||
|
verify!(
|
||||||
|
data.crosslink.end_epoch
|
||||||
|
== std::cmp::min(
|
||||||
|
data.target.epoch,
|
||||||
|
parent_crosslink.end_epoch + spec.max_epochs_per_crosslink
|
||||||
|
),
|
||||||
|
Invalid::BadParentCrosslinkEndEpoch
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Crosslink data root is zero (to be removed in phase 1).
|
||||||
|
verify!(
|
||||||
|
attestation.data.crosslink.data_root == Hash256::zero(),
|
||||||
|
Invalid::ShardBlockRootNotZero
|
||||||
|
);
|
||||||
|
|
||||||
|
// Check signature and bitfields
|
||||||
|
let indexed_attestation = get_indexed_attestation(state, attestation)?;
|
||||||
|
if verify_signature {
|
||||||
|
is_valid_indexed_attestation(state, &indexed_attestation, spec)?;
|
||||||
|
} else {
|
||||||
|
is_valid_indexed_attestation_without_signature(state, &indexed_attestation, spec)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check target epoch and source checkpoint.
|
||||||
|
///
|
||||||
|
/// Return the parent crosslink for further checks.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
|
fn verify_casper_ffg_vote<'a, T: EthSpec>(
|
||||||
|
attestation: &Attestation<T>,
|
||||||
|
state: &'a BeaconState<T>,
|
||||||
|
) -> Result<&'a Crosslink, Error> {
|
||||||
|
let data = &attestation.data;
|
||||||
|
if data.target.epoch == state.current_epoch() {
|
||||||
|
verify!(
|
||||||
|
data.source == state.current_justified_checkpoint,
|
||||||
|
Invalid::WrongJustifiedCheckpoint {
|
||||||
|
state: state.current_justified_checkpoint.clone(),
|
||||||
|
attestation: data.source.clone(),
|
||||||
|
is_current: true,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
Ok(state.get_current_crosslink(data.crosslink.shard)?)
|
||||||
|
} else if data.target.epoch == state.previous_epoch() {
|
||||||
|
verify!(
|
||||||
|
data.source == state.previous_justified_checkpoint,
|
||||||
|
Invalid::WrongJustifiedCheckpoint {
|
||||||
|
state: state.previous_justified_checkpoint.clone(),
|
||||||
|
attestation: data.source.clone(),
|
||||||
|
is_current: false,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
Ok(state.get_previous_crosslink(data.crosslink.shard)?)
|
||||||
|
} else {
|
||||||
|
invalid!(Invalid::BadTargetEpoch)
|
||||||
|
}
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
use super::errors::{AttesterSlashingInvalid as Invalid, AttesterSlashingValidationError as Error};
|
use super::errors::{AttesterSlashingInvalid as Invalid, AttesterSlashingValidationError as Error};
|
||||||
use super::verify_indexed_attestation::verify_indexed_attestation;
|
use super::is_valid_indexed_attestation::is_valid_indexed_attestation;
|
||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
@ -8,10 +8,10 @@ use types::*;
|
|||||||
///
|
///
|
||||||
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.1
|
||||||
pub fn verify_attester_slashing<T: EthSpec>(
|
pub fn verify_attester_slashing<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
attester_slashing: &AttesterSlashing,
|
attester_slashing: &AttesterSlashing<T>,
|
||||||
should_verify_indexed_attestations: bool,
|
should_verify_indexed_attestations: bool,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -26,9 +26,9 @@ pub fn verify_attester_slashing<T: EthSpec>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
if should_verify_indexed_attestations {
|
if should_verify_indexed_attestations {
|
||||||
verify_indexed_attestation(state, &attestation_1, spec)
|
is_valid_indexed_attestation(state, &attestation_1, spec)
|
||||||
.map_err(|e| Error::Invalid(Invalid::IndexedAttestation1Invalid(e.into())))?;
|
.map_err(|e| Error::Invalid(Invalid::IndexedAttestation1Invalid(e.into())))?;
|
||||||
verify_indexed_attestation(state, &attestation_2, spec)
|
is_valid_indexed_attestation(state, &attestation_2, spec)
|
||||||
.map_err(|e| Error::Invalid(Invalid::IndexedAttestation2Invalid(e.into())))?;
|
.map_err(|e| Error::Invalid(Invalid::IndexedAttestation2Invalid(e.into())))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,10 +39,10 @@ pub fn verify_attester_slashing<T: EthSpec>(
|
|||||||
///
|
///
|
||||||
/// Returns Ok(indices) if `indices.len() > 0`.
|
/// Returns Ok(indices) if `indices.len() > 0`.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.1
|
||||||
pub fn get_slashable_indices<T: EthSpec>(
|
pub fn get_slashable_indices<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
attester_slashing: &AttesterSlashing,
|
attester_slashing: &AttesterSlashing<T>,
|
||||||
) -> Result<Vec<u64>, Error> {
|
) -> Result<Vec<u64>, Error> {
|
||||||
get_slashable_indices_modular(state, attester_slashing, |_, validator| {
|
get_slashable_indices_modular(state, attester_slashing, |_, validator| {
|
||||||
validator.is_slashable_at(state.current_epoch())
|
validator.is_slashable_at(state.current_epoch())
|
||||||
@ -53,7 +53,7 @@ pub fn get_slashable_indices<T: EthSpec>(
|
|||||||
/// for determining whether a given validator should be considered slashable.
|
/// for determining whether a given validator should be considered slashable.
|
||||||
pub fn get_slashable_indices_modular<F, T: EthSpec>(
|
pub fn get_slashable_indices_modular<F, T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
attester_slashing: &AttesterSlashing,
|
attester_slashing: &AttesterSlashing<T>,
|
||||||
is_slashable: F,
|
is_slashable: F,
|
||||||
) -> Result<Vec<u64>, Error>
|
) -> Result<Vec<u64>, Error>
|
||||||
where
|
where
|
||||||
@ -79,7 +79,7 @@ where
|
|||||||
|
|
||||||
for index in &attesting_indices_1 & &attesting_indices_2 {
|
for index in &attesting_indices_1 & &attesting_indices_2 {
|
||||||
let validator = state
|
let validator = state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(index as usize)
|
.get(index as usize)
|
||||||
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(index)))?;
|
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(index)))?;
|
||||||
|
|
||||||
|
@ -1,47 +1,34 @@
|
|||||||
use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error};
|
use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error};
|
||||||
use merkle_proof::verify_merkle_proof;
|
use merkle_proof::verify_merkle_proof;
|
||||||
|
use std::convert::TryInto;
|
||||||
use tree_hash::{SignedRoot, TreeHash};
|
use tree_hash::{SignedRoot, TreeHash};
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
/// Verify `Deposit.pubkey` signed `Deposit.signature`.
|
/// Verify `Deposit.pubkey` signed `Deposit.signature`.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_deposit_signature<T: EthSpec>(
|
pub fn verify_deposit_signature<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
deposit: &Deposit,
|
deposit: &Deposit,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
pubkey: &PublicKey,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
// Note: Deposits are valid across forks, thus the deposit domain is computed
|
||||||
|
// with the fork zeroed.
|
||||||
|
let domain = spec.get_domain(state.current_epoch(), Domain::Deposit, &Fork::default());
|
||||||
|
let signature: Signature = (&deposit.data.signature)
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::Invalid(Invalid::BadSignatureBytes))?;
|
||||||
|
|
||||||
verify!(
|
verify!(
|
||||||
deposit.data.signature.verify(
|
signature.verify(&deposit.data.signed_root(), domain, pubkey),
|
||||||
&deposit.data.signed_root(),
|
|
||||||
spec.get_domain(state.current_epoch(), Domain::Deposit, &state.fork),
|
|
||||||
&deposit.data.pubkey,
|
|
||||||
),
|
|
||||||
Invalid::BadSignature
|
Invalid::BadSignature
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify that the `Deposit` index is correct.
|
/// Returns a `Some(validator index)` if a pubkey already exists in the `validators`,
|
||||||
///
|
|
||||||
/// Spec v0.6.3
|
|
||||||
pub fn verify_deposit_index<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
deposit: &Deposit,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
verify!(
|
|
||||||
deposit.index == state.deposit_index,
|
|
||||||
Invalid::BadIndex {
|
|
||||||
state: state.deposit_index,
|
|
||||||
deposit: deposit.index
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a `Some(validator index)` if a pubkey already exists in the `validator_registry`,
|
|
||||||
/// otherwise returns `None`.
|
/// otherwise returns `None`.
|
||||||
///
|
///
|
||||||
/// ## Errors
|
/// ## Errors
|
||||||
@ -49,18 +36,22 @@ pub fn verify_deposit_index<T: EthSpec>(
|
|||||||
/// Errors if the state's `pubkey_cache` is not current.
|
/// Errors if the state's `pubkey_cache` is not current.
|
||||||
pub fn get_existing_validator_index<T: EthSpec>(
|
pub fn get_existing_validator_index<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
deposit: &Deposit,
|
pub_key: &PublicKey,
|
||||||
) -> Result<Option<u64>, Error> {
|
) -> Result<Option<u64>, Error> {
|
||||||
let validator_index = state.get_validator_index(&deposit.data.pubkey)?;
|
let validator_index = state.get_validator_index(pub_key)?;
|
||||||
Ok(validator_index.map(|idx| idx as u64))
|
Ok(validator_index.map(|idx| idx as u64))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify that a deposit is included in the state's eth1 deposit root.
|
/// Verify that a deposit is included in the state's eth1 deposit root.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// The deposit index is provided as a parameter so we can check proofs
|
||||||
|
/// before they're due to be processed, and in parallel.
|
||||||
|
///
|
||||||
|
/// Spec v0.8.0
|
||||||
pub fn verify_deposit_merkle_proof<T: EthSpec>(
|
pub fn verify_deposit_merkle_proof<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
deposit: &Deposit,
|
deposit: &Deposit,
|
||||||
|
deposit_index: u64,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let leaf = deposit.data.tree_hash_root();
|
let leaf = deposit.data.tree_hash_root();
|
||||||
@ -69,9 +60,9 @@ pub fn verify_deposit_merkle_proof<T: EthSpec>(
|
|||||||
verify_merkle_proof(
|
verify_merkle_proof(
|
||||||
Hash256::from_slice(&leaf),
|
Hash256::from_slice(&leaf),
|
||||||
&deposit.proof[..],
|
&deposit.proof[..],
|
||||||
spec.deposit_contract_tree_depth as usize,
|
spec.deposit_contract_tree_depth as usize + 1,
|
||||||
deposit.index as usize,
|
deposit_index as usize,
|
||||||
state.latest_eth1_data.deposit_root,
|
state.eth1_data.deposit_root,
|
||||||
),
|
),
|
||||||
Invalid::BadMerkleProof
|
Invalid::BadMerkleProof
|
||||||
);
|
);
|
||||||
|
@ -7,7 +7,7 @@ use types::*;
|
|||||||
///
|
///
|
||||||
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_exit<T: EthSpec>(
|
pub fn verify_exit<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
exit: &VoluntaryExit,
|
exit: &VoluntaryExit,
|
||||||
@ -18,7 +18,7 @@ pub fn verify_exit<T: EthSpec>(
|
|||||||
|
|
||||||
/// Like `verify_exit` but doesn't run checks which may become true in future states.
|
/// Like `verify_exit` but doesn't run checks which may become true in future states.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_exit_time_independent_only<T: EthSpec>(
|
pub fn verify_exit_time_independent_only<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
exit: &VoluntaryExit,
|
exit: &VoluntaryExit,
|
||||||
@ -29,7 +29,7 @@ pub fn verify_exit_time_independent_only<T: EthSpec>(
|
|||||||
|
|
||||||
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
|
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
fn verify_exit_parametric<T: EthSpec>(
|
fn verify_exit_parametric<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
exit: &VoluntaryExit,
|
exit: &VoluntaryExit,
|
||||||
@ -37,7 +37,7 @@ fn verify_exit_parametric<T: EthSpec>(
|
|||||||
time_independent_only: bool,
|
time_independent_only: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let validator = state
|
let validator = state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(exit.validator_index as usize)
|
.get(exit.validator_index as usize)
|
||||||
.ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?;
|
.ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?;
|
||||||
|
|
||||||
@ -63,12 +63,11 @@ fn verify_exit_parametric<T: EthSpec>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Verify the validator has been active long enough.
|
// Verify the validator has been active long enough.
|
||||||
let lifespan = state.current_epoch() - validator.activation_epoch;
|
|
||||||
verify!(
|
verify!(
|
||||||
lifespan >= spec.persistent_committee_period,
|
state.current_epoch() >= validator.activation_epoch + spec.persistent_committee_period,
|
||||||
Invalid::TooYoungToLeave {
|
Invalid::TooYoungToExit {
|
||||||
lifespan,
|
current_epoch: state.current_epoch(),
|
||||||
expected: spec.persistent_committee_period,
|
earliest_exit_epoch: validator.activation_epoch + spec.persistent_committee_period,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -7,19 +7,20 @@ use types::*;
|
|||||||
///
|
///
|
||||||
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_proposer_slashing<T: EthSpec>(
|
pub fn verify_proposer_slashing<T: EthSpec>(
|
||||||
proposer_slashing: &ProposerSlashing,
|
proposer_slashing: &ProposerSlashing,
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let proposer = state
|
let proposer = state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(proposer_slashing.proposer_index as usize)
|
.get(proposer_slashing.proposer_index as usize)
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
Error::Invalid(Invalid::ProposerUnknown(proposer_slashing.proposer_index))
|
Error::Invalid(Invalid::ProposerUnknown(proposer_slashing.proposer_index))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
// Verify that the epoch is the same
|
||||||
verify!(
|
verify!(
|
||||||
proposer_slashing.header_1.slot.epoch(T::slots_per_epoch())
|
proposer_slashing.header_1.slot.epoch(T::slots_per_epoch())
|
||||||
== proposer_slashing.header_2.slot.epoch(T::slots_per_epoch()),
|
== proposer_slashing.header_2.slot.epoch(T::slots_per_epoch()),
|
||||||
@ -29,11 +30,13 @@ pub fn verify_proposer_slashing<T: EthSpec>(
|
|||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// But the headers are different
|
||||||
verify!(
|
verify!(
|
||||||
proposer_slashing.header_1 != proposer_slashing.header_2,
|
proposer_slashing.header_1 != proposer_slashing.header_2,
|
||||||
Invalid::ProposalsIdentical
|
Invalid::ProposalsIdentical
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Check proposer is slashable
|
||||||
verify!(
|
verify!(
|
||||||
proposer.is_slashable_at(state.current_epoch()),
|
proposer.is_slashable_at(state.current_epoch()),
|
||||||
Invalid::ProposerNotSlashable(proposer_slashing.proposer_index)
|
Invalid::ProposerNotSlashable(proposer_slashing.proposer_index)
|
||||||
@ -65,7 +68,7 @@ pub fn verify_proposer_slashing<T: EthSpec>(
|
|||||||
///
|
///
|
||||||
/// Returns `true` if the signature is valid.
|
/// Returns `true` if the signature is valid.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
fn verify_header_signature<T: EthSpec>(
|
fn verify_header_signature<T: EthSpec>(
|
||||||
header: &BeaconBlockHeader,
|
header: &BeaconBlockHeader,
|
||||||
pubkey: &PublicKey,
|
pubkey: &PublicKey,
|
||||||
|
@ -8,7 +8,7 @@ use types::*;
|
|||||||
///
|
///
|
||||||
/// Returns `Ok(())` if the `Transfer` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `Transfer` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_transfer<T: EthSpec>(
|
pub fn verify_transfer<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
@ -19,7 +19,7 @@ pub fn verify_transfer<T: EthSpec>(
|
|||||||
|
|
||||||
/// Like `verify_transfer` but doesn't run checks which may become true in future states.
|
/// Like `verify_transfer` but doesn't run checks which may become true in future states.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn verify_transfer_time_independent_only<T: EthSpec>(
|
pub fn verify_transfer_time_independent_only<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
@ -37,7 +37,7 @@ pub fn verify_transfer_time_independent_only<T: EthSpec>(
|
|||||||
/// present or future.
|
/// present or future.
|
||||||
/// - Validator transfer eligibility (e.g., is withdrawable)
|
/// - Validator transfer eligibility (e.g., is withdrawable)
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
fn verify_transfer_parametric<T: EthSpec>(
|
fn verify_transfer_parametric<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
@ -62,8 +62,8 @@ fn verify_transfer_parametric<T: EthSpec>(
|
|||||||
|
|
||||||
// Verify the sender has adequate balance.
|
// Verify the sender has adequate balance.
|
||||||
verify!(
|
verify!(
|
||||||
time_independent_only || sender_balance >= transfer.amount,
|
time_independent_only || sender_balance >= total_amount,
|
||||||
Invalid::FromBalanceInsufficient(transfer.amount, sender_balance)
|
Invalid::FromBalanceInsufficient(total_amount, sender_balance)
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify sender balance will not be "dust" (i.e., greater than zero but less than the minimum deposit
|
// Verify sender balance will not be "dust" (i.e., greater than zero but less than the minimum deposit
|
||||||
@ -97,24 +97,22 @@ fn verify_transfer_parametric<T: EthSpec>(
|
|||||||
|
|
||||||
// Load the sender `Validator` record from the state.
|
// Load the sender `Validator` record from the state.
|
||||||
let sender_validator = state
|
let sender_validator = state
|
||||||
.validator_registry
|
.validators
|
||||||
.get(transfer.sender as usize)
|
.get(transfer.sender as usize)
|
||||||
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
|
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
|
||||||
|
|
||||||
let epoch = state.slot.epoch(T::slots_per_epoch());
|
|
||||||
|
|
||||||
// Ensure one of the following is met:
|
// Ensure one of the following is met:
|
||||||
//
|
//
|
||||||
// - Time dependent checks are being ignored.
|
// - Time dependent checks are being ignored.
|
||||||
// - The sender has not been activated.
|
// - The sender has never been eligible for activation.
|
||||||
// - The sender is withdrawable at the state's epoch.
|
// - The sender is withdrawable at the state's epoch.
|
||||||
// - The transfer will not reduce the sender below the max effective balance.
|
// - The transfer will not reduce the sender below the max effective balance.
|
||||||
verify!(
|
verify!(
|
||||||
time_independent_only
|
time_independent_only
|
||||||
|| sender_validator.activation_eligibility_epoch == spec.far_future_epoch
|
|| sender_validator.activation_eligibility_epoch == spec.far_future_epoch
|
||||||
|| sender_validator.is_withdrawable_at(epoch)
|
|| sender_validator.is_withdrawable_at(state.current_epoch())
|
||||||
|| total_amount + spec.max_effective_balance <= sender_balance,
|
|| total_amount + spec.max_effective_balance <= sender_balance,
|
||||||
Invalid::FromValidatorIneligableForTransfer(transfer.sender)
|
Invalid::FromValidatorIneligibleForTransfer(transfer.sender)
|
||||||
);
|
);
|
||||||
|
|
||||||
// Ensure the withdrawal credentials generated from the sender's pubkey match those stored in
|
// Ensure the withdrawal credentials generated from the sender's pubkey match those stored in
|
||||||
@ -154,7 +152,7 @@ fn verify_transfer_parametric<T: EthSpec>(
|
|||||||
///
|
///
|
||||||
/// Does not check that the transfer is valid, however checks for overflow in all actions.
|
/// Does not check that the transfer is valid, however checks for overflow in all actions.
|
||||||
///
|
///
|
||||||
/// Spec v0.6.3
|
/// Spec v0.8.0
|
||||||
pub fn execute_transfer<T: EthSpec>(
|
pub fn execute_transfer<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user