Merge branch 'master' into update-readme
This commit is contained in:
commit
15ec3fe390
@ -154,7 +154,6 @@ If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
|||||||
- [`validator_client/`](validator_client/): the "Validator Client" binary and crates exclusively
|
- [`validator_client/`](validator_client/): the "Validator Client" binary and crates exclusively
|
||||||
associated with it.
|
associated with it.
|
||||||
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
**Lighthouse welcomes contributors.**
|
**Lighthouse welcomes contributors.**
|
||||||
|
@ -13,3 +13,4 @@ slog-async = "^2.3.0"
|
|||||||
validator_client = { path = "../validator_client" }
|
validator_client = { path = "../validator_client" }
|
||||||
types = { path = "../eth2/types" }
|
types = { path = "../eth2/types" }
|
||||||
eth2_config = { path = "../eth2/utils/eth2_config" }
|
eth2_config = { path = "../eth2/utils/eth2_config" }
|
||||||
|
dirs = "2.0.1"
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
use bls::Keypair;
|
use bls::Keypair;
|
||||||
use clap::{App, Arg, SubCommand};
|
use clap::{App, Arg, SubCommand};
|
||||||
use eth2_config::get_data_dir;
|
|
||||||
use slog::{crit, debug, info, o, Drain};
|
use slog::{crit, debug, info, o, Drain};
|
||||||
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::test_utils::generate_deterministic_keypair;
|
use types::test_utils::generate_deterministic_keypair;
|
||||||
use validator_client::Config as ValidatorClientConfig;
|
use validator_client::Config as ValidatorClientConfig;
|
||||||
|
|
||||||
pub const DEFAULT_DATA_DIR: &str = ".lighthouse-account-manager";
|
pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator";
|
||||||
pub const CLIENT_CONFIG_FILENAME: &str = "account-manager.toml";
|
pub const CLIENT_CONFIG_FILENAME: &str = "account-manager.toml";
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@ -61,13 +61,33 @@ fn main() {
|
|||||||
)
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) {
|
let data_dir = match matches
|
||||||
Ok(dir) => dir,
|
.value_of("datadir")
|
||||||
Err(e) => {
|
.and_then(|v| Some(PathBuf::from(v)))
|
||||||
crit!(log, "Failed to initialize data dir"; "error" => format!("{:?}", e));
|
{
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
// use the default
|
||||||
|
let mut default_dir = match dirs::home_dir() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
crit!(log, "Failed to find a home directory");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
default_dir.push(DEFAULT_DATA_DIR);
|
||||||
|
PathBuf::from(default_dir)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// create the directory if needed
|
||||||
|
match fs::create_dir_all(&data_dir) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => {
|
||||||
|
crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut client_config = ValidatorClientConfig::default();
|
let mut client_config = ValidatorClientConfig::default();
|
||||||
|
|
||||||
|
@ -22,3 +22,5 @@ tokio-timer = "0.2.10"
|
|||||||
futures = "0.1.25"
|
futures = "0.1.25"
|
||||||
exit-future = "0.1.3"
|
exit-future = "0.1.3"
|
||||||
state_processing = { path = "../eth2/state_processing" }
|
state_processing = { path = "../eth2/state_processing" }
|
||||||
|
env_logger = "0.6.1"
|
||||||
|
dirs = "2.0.1"
|
||||||
|
@ -20,9 +20,12 @@ serde = "1.0"
|
|||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
||||||
state_processing = { path = "../../eth2/state_processing" }
|
state_processing = { path = "../../eth2/state_processing" }
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
lmd_ghost = { path = "../../eth2/lmd_ghost" }
|
lmd_ghost = { path = "../../eth2/lmd_ghost" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rand = "0.5.5"
|
||||||
|
@ -6,7 +6,7 @@ use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY};
|
|||||||
use lmd_ghost::LmdGhost;
|
use lmd_ghost::LmdGhost;
|
||||||
use log::trace;
|
use log::trace;
|
||||||
use operation_pool::DepositInsertStatus;
|
use operation_pool::DepositInsertStatus;
|
||||||
use operation_pool::OperationPool;
|
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||||
use parking_lot::{RwLock, RwLockReadGuard};
|
use parking_lot::{RwLock, RwLockReadGuard};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use state_processing::per_block_processing::errors::{
|
use state_processing::per_block_processing::errors::{
|
||||||
@ -147,11 +147,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let last_finalized_root = p.canonical_head.beacon_state.finalized_root;
|
let last_finalized_root = p.canonical_head.beacon_state.finalized_root;
|
||||||
let last_finalized_block = &p.canonical_head.beacon_block;
|
let last_finalized_block = &p.canonical_head.beacon_block;
|
||||||
|
|
||||||
|
let op_pool = p.op_pool.into_operation_pool(&p.state, &spec);
|
||||||
|
|
||||||
Ok(Some(BeaconChain {
|
Ok(Some(BeaconChain {
|
||||||
spec,
|
spec,
|
||||||
slot_clock,
|
slot_clock,
|
||||||
fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root),
|
fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root),
|
||||||
op_pool: OperationPool::default(),
|
op_pool,
|
||||||
canonical_head: RwLock::new(p.canonical_head),
|
canonical_head: RwLock::new(p.canonical_head),
|
||||||
state: RwLock::new(p.state),
|
state: RwLock::new(p.state),
|
||||||
genesis_block_root: p.genesis_block_root,
|
genesis_block_root: p.genesis_block_root,
|
||||||
@ -164,6 +166,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn persist(&self) -> Result<(), Error> {
|
pub fn persist(&self) -> Result<(), Error> {
|
||||||
let p: PersistedBeaconChain<T> = PersistedBeaconChain {
|
let p: PersistedBeaconChain<T> = PersistedBeaconChain {
|
||||||
canonical_head: self.canonical_head.read().clone(),
|
canonical_head: self.canonical_head.read().clone(),
|
||||||
|
op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool),
|
||||||
genesis_block_root: self.genesis_block_root,
|
genesis_block_root: self.genesis_block_root,
|
||||||
state: self.state.read().clone(),
|
state: self.state.read().clone(),
|
||||||
};
|
};
|
||||||
@ -506,8 +509,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
&self,
|
&self,
|
||||||
deposit: Deposit,
|
deposit: Deposit,
|
||||||
) -> Result<DepositInsertStatus, DepositValidationError> {
|
) -> Result<DepositInsertStatus, DepositValidationError> {
|
||||||
self.op_pool
|
self.op_pool.insert_deposit(deposit)
|
||||||
.insert_deposit(deposit, &*self.state.read(), &self.spec)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accept some exit and queue it for inclusion in an appropriate block.
|
/// Accept some exit and queue it for inclusion in an appropriate block.
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use crate::{BeaconChainTypes, CheckPoint};
|
use crate::{BeaconChainTypes, CheckPoint};
|
||||||
|
use operation_pool::PersistedOperationPool;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||||
@ -10,7 +11,7 @@ pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA";
|
|||||||
#[derive(Encode, Decode)]
|
#[derive(Encode, Decode)]
|
||||||
pub struct PersistedBeaconChain<T: BeaconChainTypes> {
|
pub struct PersistedBeaconChain<T: BeaconChainTypes> {
|
||||||
pub canonical_head: CheckPoint<T::EthSpec>,
|
pub canonical_head: CheckPoint<T::EthSpec>,
|
||||||
// TODO: operations pool.
|
pub op_pool: PersistedOperationPool,
|
||||||
pub genesis_block_root: Hash256,
|
pub genesis_block_root: Hash256,
|
||||||
pub state: BeaconState<T::EthSpec>,
|
pub state: BeaconState<T::EthSpec>,
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,8 @@ use types::{
|
|||||||
Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot,
|
Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY};
|
||||||
|
|
||||||
/// Indicates how the `BeaconChainHarness` should produce blocks.
|
/// Indicates how the `BeaconChainHarness` should produce blocks.
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub enum BlockStrategy {
|
pub enum BlockStrategy {
|
||||||
@ -68,8 +70,8 @@ where
|
|||||||
E: EthSpec,
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
pub chain: BeaconChain<CommonTypes<L, E>>,
|
pub chain: BeaconChain<CommonTypes<L, E>>,
|
||||||
keypairs: Vec<Keypair>,
|
pub keypairs: Vec<Keypair>,
|
||||||
spec: ChainSpec,
|
pub spec: ChainSpec,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<L, E> BeaconChainHarness<L, E>
|
impl<L, E> BeaconChainHarness<L, E>
|
||||||
|
@ -1,16 +1,21 @@
|
|||||||
#![cfg(not(debug_assertions))]
|
#![cfg(not(debug_assertions))]
|
||||||
|
|
||||||
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
|
use beacon_chain::test_utils::{
|
||||||
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain,
|
||||||
|
BEACON_CHAIN_DB_KEY,
|
||||||
|
};
|
||||||
use lmd_ghost::ThreadSafeReducedTree;
|
use lmd_ghost::ThreadSafeReducedTree;
|
||||||
use store::MemoryStore;
|
use rand::Rng;
|
||||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
use store::{MemoryStore, Store};
|
||||||
|
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
|
use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, Slot};
|
||||||
|
|
||||||
// Should ideally be divisible by 3.
|
// Should ideally be divisible by 3.
|
||||||
pub const VALIDATOR_COUNT: usize = 24;
|
pub const VALIDATOR_COUNT: usize = 24;
|
||||||
|
|
||||||
fn get_harness(
|
type TestForkChoice = ThreadSafeReducedTree<MemoryStore, MinimalEthSpec>;
|
||||||
validator_count: usize,
|
|
||||||
) -> BeaconChainHarness<ThreadSafeReducedTree<MemoryStore, MinimalEthSpec>, MinimalEthSpec> {
|
fn get_harness(validator_count: usize) -> BeaconChainHarness<TestForkChoice, MinimalEthSpec> {
|
||||||
let harness = BeaconChainHarness::new(validator_count);
|
let harness = BeaconChainHarness::new(validator_count);
|
||||||
|
|
||||||
// Move past the zero slot.
|
// Move past the zero slot.
|
||||||
@ -225,3 +230,38 @@ fn does_not_finalize_without_attestation() {
|
|||||||
"no epoch should have been finalized"
|
"no epoch should have been finalized"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn roundtrip_operation_pool() {
|
||||||
|
let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5;
|
||||||
|
|
||||||
|
let harness = get_harness(VALIDATOR_COUNT);
|
||||||
|
|
||||||
|
// Add some attestations
|
||||||
|
harness.extend_chain(
|
||||||
|
num_blocks_produced as usize,
|
||||||
|
BlockStrategy::OnCanonicalHead,
|
||||||
|
AttestationStrategy::AllValidators,
|
||||||
|
);
|
||||||
|
assert!(harness.chain.op_pool.num_attestations() > 0);
|
||||||
|
|
||||||
|
// Add some deposits
|
||||||
|
let rng = &mut XorShiftRng::from_seed([66; 16]);
|
||||||
|
for _ in 0..rng.gen_range(1, VALIDATOR_COUNT) {
|
||||||
|
harness
|
||||||
|
.chain
|
||||||
|
.process_deposit(Deposit::random_for_test(rng))
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: could add some other operations
|
||||||
|
harness.chain.persist().unwrap();
|
||||||
|
|
||||||
|
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
|
||||||
|
let p: PersistedBeaconChain<CommonTypes<TestForkChoice, MinimalEthSpec>> =
|
||||||
|
harness.chain.store.get(&key).unwrap().unwrap();
|
||||||
|
|
||||||
|
let restored_op_pool = p.op_pool.into_operation_pool(&p.state, &harness.spec);
|
||||||
|
|
||||||
|
assert_eq!(harness.chain.op_pool, restored_op_pool);
|
||||||
|
}
|
||||||
|
@ -9,17 +9,20 @@ beacon_chain = { path = "../beacon_chain" }
|
|||||||
network = { path = "../network" }
|
network = { path = "../network" }
|
||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
http_server = { path = "../http_server" }
|
http_server = { path = "../http_server" }
|
||||||
|
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||||
rpc = { path = "../rpc" }
|
rpc = { path = "../rpc" }
|
||||||
prometheus = "^0.6"
|
prometheus = "^0.6"
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
||||||
eth2_config = { path = "../../eth2/utils/eth2_config" }
|
eth2_config = { path = "../../eth2/utils/eth2_config" }
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||||
serde = "1.0"
|
serde = "1.0.93"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
error-chain = "0.12.0"
|
error-chain = "0.12.0"
|
||||||
slog = "^2.2.3"
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] }
|
||||||
|
slog-term = "^2.4.0"
|
||||||
|
slog-async = "^2.3.0"
|
||||||
tokio = "0.1.15"
|
tokio = "0.1.15"
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
dirs = "1.0.3"
|
dirs = "1.0.3"
|
||||||
|
@ -7,7 +7,7 @@ use std::path::PathBuf;
|
|||||||
|
|
||||||
/// The core configuration of a Lighthouse beacon node.
|
/// The core configuration of a Lighthouse beacon node.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct ClientConfig {
|
pub struct Config {
|
||||||
pub data_dir: PathBuf,
|
pub data_dir: PathBuf,
|
||||||
pub db_type: String,
|
pub db_type: String,
|
||||||
db_name: String,
|
db_name: String,
|
||||||
@ -16,7 +16,7 @@ pub struct ClientConfig {
|
|||||||
pub http: HttpServerConfig,
|
pub http: HttpServerConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ClientConfig {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
data_dir: PathBuf::from(".lighthouse"),
|
data_dir: PathBuf::from(".lighthouse"),
|
||||||
@ -24,14 +24,14 @@ impl Default for ClientConfig {
|
|||||||
db_name: "chain_db".to_string(),
|
db_name: "chain_db".to_string(),
|
||||||
// Note: there are no default bootnodes specified.
|
// Note: there are no default bootnodes specified.
|
||||||
// Once bootnodes are established, add them here.
|
// Once bootnodes are established, add them here.
|
||||||
network: NetworkConfig::new(vec![]),
|
network: NetworkConfig::new(),
|
||||||
rpc: rpc::RPCConfig::default(),
|
rpc: rpc::RPCConfig::default(),
|
||||||
http: HttpServerConfig::default(),
|
http: HttpServerConfig::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClientConfig {
|
impl Config {
|
||||||
/// Returns the path to which the client may initialize an on-disk database.
|
/// Returns the path to which the client may initialize an on-disk database.
|
||||||
pub fn db_path(&self) -> Option<PathBuf> {
|
pub fn db_path(&self) -> Option<PathBuf> {
|
||||||
self.data_dir()
|
self.data_dir()
|
||||||
@ -49,7 +49,7 @@ impl ClientConfig {
|
|||||||
///
|
///
|
||||||
/// Returns an error if arguments are obviously invalid. May succeed even if some values are
|
/// Returns an error if arguments are obviously invalid. May succeed even if some values are
|
||||||
/// invalid.
|
/// invalid.
|
||||||
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
|
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), String> {
|
||||||
if let Some(dir) = args.value_of("datadir") {
|
if let Some(dir) = args.value_of("datadir") {
|
||||||
self.data_dir = PathBuf::from(dir);
|
self.data_dir = PathBuf::from(dir);
|
||||||
};
|
};
|
@ -1,7 +1,7 @@
|
|||||||
extern crate slog;
|
extern crate slog;
|
||||||
|
|
||||||
mod beacon_chain_types;
|
mod beacon_chain_types;
|
||||||
mod client_config;
|
mod config;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod notifier;
|
pub mod notifier;
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ use tokio::timer::Interval;
|
|||||||
pub use beacon_chain::BeaconChainTypes;
|
pub use beacon_chain::BeaconChainTypes;
|
||||||
pub use beacon_chain_types::ClientType;
|
pub use beacon_chain_types::ClientType;
|
||||||
pub use beacon_chain_types::InitialiseBeaconChain;
|
pub use beacon_chain_types::InitialiseBeaconChain;
|
||||||
pub use client_config::ClientConfig;
|
pub use config::Config as ClientConfig;
|
||||||
pub use eth2_config::Eth2Config;
|
pub use eth2_config::Eth2Config;
|
||||||
|
|
||||||
/// Main beacon node client service. This provides the connection and initialisation of the clients
|
/// Main beacon node client service. This provides the connection and initialisation of the clients
|
||||||
|
@ -7,15 +7,18 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
# SigP repository until PR is merged
|
#SigP repository
|
||||||
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" }
|
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" }
|
||||||
|
enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b", features = ["serde"] }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
||||||
slog = "2.4.1"
|
slog = { version = "^2.4.1" , features = ["max_level_trace", "release_max_level_trace"] }
|
||||||
version = { path = "../version" }
|
version = { path = "../version" }
|
||||||
tokio = "0.1.16"
|
tokio = "0.1.16"
|
||||||
futures = "0.1.25"
|
futures = "0.1.25"
|
||||||
error-chain = "0.12.0"
|
error-chain = "0.12.0"
|
||||||
|
tokio-timer = "0.2.10"
|
||||||
|
dirs = "2.0.1"
|
||||||
|
@ -1,45 +1,72 @@
|
|||||||
|
use crate::discovery::Discovery;
|
||||||
use crate::rpc::{RPCEvent, RPCMessage, Rpc};
|
use crate::rpc::{RPCEvent, RPCMessage, Rpc};
|
||||||
use crate::NetworkConfig;
|
use crate::{error, NetworkConfig};
|
||||||
|
use crate::{Topic, TopicHash};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::{
|
core::{
|
||||||
|
identity::Keypair,
|
||||||
swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess},
|
swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess},
|
||||||
PublicKey,
|
|
||||||
},
|
},
|
||||||
|
discv5::Discv5Event,
|
||||||
gossipsub::{Gossipsub, GossipsubEvent},
|
gossipsub::{Gossipsub, GossipsubEvent},
|
||||||
identify::{protocol::IdentifyInfo, Identify, IdentifyEvent},
|
ping::{Ping, PingConfig, PingEvent},
|
||||||
ping::{Ping, PingEvent},
|
|
||||||
tokio_io::{AsyncRead, AsyncWrite},
|
tokio_io::{AsyncRead, AsyncWrite},
|
||||||
NetworkBehaviour, PeerId,
|
NetworkBehaviour, PeerId,
|
||||||
};
|
};
|
||||||
use slog::{debug, o, trace, warn};
|
use slog::{o, trace, warn};
|
||||||
use ssz::{ssz_encode, Decode, DecodeError, Encode};
|
use ssz::{ssz_encode, Decode, DecodeError, Encode};
|
||||||
|
use std::num::NonZeroU32;
|
||||||
|
use std::time::Duration;
|
||||||
use types::{Attestation, BeaconBlock};
|
use types::{Attestation, BeaconBlock};
|
||||||
use types::{Topic, TopicHash};
|
|
||||||
|
|
||||||
/// Builds the network behaviour for the libp2p Swarm.
|
/// Builds the network behaviour that manages the core protocols of eth2.
|
||||||
/// Implements gossipsub message routing.
|
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
|
||||||
|
/// behaviours.
|
||||||
#[derive(NetworkBehaviour)]
|
#[derive(NetworkBehaviour)]
|
||||||
#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")]
|
#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")]
|
||||||
pub struct Behaviour<TSubstream: AsyncRead + AsyncWrite> {
|
pub struct Behaviour<TSubstream: AsyncRead + AsyncWrite> {
|
||||||
/// The routing pub-sub mechanism for eth2.
|
/// The routing pub-sub mechanism for eth2.
|
||||||
gossipsub: Gossipsub<TSubstream>,
|
gossipsub: Gossipsub<TSubstream>,
|
||||||
// TODO: Add Kademlia for peer discovery
|
/// The serenity RPC specified in the wire-0 protocol.
|
||||||
/// The events generated by this behaviour to be consumed in the swarm poll.
|
|
||||||
serenity_rpc: Rpc<TSubstream>,
|
serenity_rpc: Rpc<TSubstream>,
|
||||||
/// Allows discovery of IP addresses for peers on the network.
|
|
||||||
identify: Identify<TSubstream>,
|
|
||||||
/// Keep regular connection to peers and disconnect if absent.
|
/// Keep regular connection to peers and disconnect if absent.
|
||||||
// TODO: Keepalive, likely remove this later.
|
|
||||||
// TODO: Make the ping time customizeable.
|
|
||||||
ping: Ping<TSubstream>,
|
ping: Ping<TSubstream>,
|
||||||
|
/// Kademlia for peer discovery.
|
||||||
|
discovery: Discovery<TSubstream>,
|
||||||
#[behaviour(ignore)]
|
#[behaviour(ignore)]
|
||||||
|
/// The events generated by this behaviour to be consumed in the swarm poll.
|
||||||
events: Vec<BehaviourEvent>,
|
events: Vec<BehaviourEvent>,
|
||||||
/// Logger for behaviour actions.
|
/// Logger for behaviour actions.
|
||||||
#[behaviour(ignore)]
|
#[behaviour(ignore)]
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||||
|
pub fn new(
|
||||||
|
local_key: &Keypair,
|
||||||
|
net_conf: &NetworkConfig,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> error::Result<Self> {
|
||||||
|
let local_peer_id = local_key.public().clone().into_peer_id();
|
||||||
|
let behaviour_log = log.new(o!());
|
||||||
|
let ping_config = PingConfig::new()
|
||||||
|
.with_timeout(Duration::from_secs(30))
|
||||||
|
.with_interval(Duration::from_secs(20))
|
||||||
|
.with_max_failures(NonZeroU32::new(2).expect("2 != 0"))
|
||||||
|
.with_keep_alive(false);
|
||||||
|
|
||||||
|
Ok(Behaviour {
|
||||||
|
serenity_rpc: Rpc::new(log),
|
||||||
|
gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()),
|
||||||
|
discovery: Discovery::new(local_key, net_conf, log)?,
|
||||||
|
ping: Ping::new(ping_config),
|
||||||
|
events: Vec::new(),
|
||||||
|
log: behaviour_log,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubEvent>
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubEvent>
|
||||||
for Behaviour<TSubstream>
|
for Behaviour<TSubstream>
|
||||||
@ -89,30 +116,6 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<RPCMessage
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<IdentifyEvent>
|
|
||||||
for Behaviour<TSubstream>
|
|
||||||
{
|
|
||||||
fn inject_event(&mut self, event: IdentifyEvent) {
|
|
||||||
match event {
|
|
||||||
IdentifyEvent::Identified {
|
|
||||||
peer_id, mut info, ..
|
|
||||||
} => {
|
|
||||||
if info.listen_addrs.len() > 20 {
|
|
||||||
debug!(
|
|
||||||
self.log,
|
|
||||||
"More than 20 peers have been identified, truncating"
|
|
||||||
);
|
|
||||||
info.listen_addrs.truncate(20);
|
|
||||||
}
|
|
||||||
self.events
|
|
||||||
.push(BehaviourEvent::Identified(peer_id, Box::new(info)));
|
|
||||||
}
|
|
||||||
IdentifyEvent::Error { .. } => {}
|
|
||||||
IdentifyEvent::SendBack { .. } => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<PingEvent>
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<PingEvent>
|
||||||
for Behaviour<TSubstream>
|
for Behaviour<TSubstream>
|
||||||
{
|
{
|
||||||
@ -122,25 +125,6 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<PingEvent>
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||||
pub fn new(local_public_key: PublicKey, net_conf: &NetworkConfig, log: &slog::Logger) -> Self {
|
|
||||||
let local_peer_id = local_public_key.clone().into_peer_id();
|
|
||||||
let identify_config = net_conf.identify_config.clone();
|
|
||||||
let behaviour_log = log.new(o!());
|
|
||||||
|
|
||||||
Behaviour {
|
|
||||||
gossipsub: Gossipsub::new(local_peer_id, net_conf.gs_config.clone()),
|
|
||||||
serenity_rpc: Rpc::new(log),
|
|
||||||
identify: Identify::new(
|
|
||||||
identify_config.version,
|
|
||||||
identify_config.user_agent,
|
|
||||||
local_public_key,
|
|
||||||
),
|
|
||||||
ping: Ping::new(),
|
|
||||||
events: Vec::new(),
|
|
||||||
log: behaviour_log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consumes the events list when polled.
|
/// Consumes the events list when polled.
|
||||||
fn poll<TBehaviourIn>(
|
fn poll<TBehaviourIn>(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -153,18 +137,23 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<Discv5Event>
|
||||||
|
for Behaviour<TSubstream>
|
||||||
|
{
|
||||||
|
fn inject_event(&mut self, _event: Discv5Event) {
|
||||||
|
// discv5 has no events to inject
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Implements the combined behaviour for the libp2p service.
|
/// Implements the combined behaviour for the libp2p service.
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||||
|
/* Pubsub behaviour functions */
|
||||||
|
|
||||||
/// Subscribes to a gossipsub topic.
|
/// Subscribes to a gossipsub topic.
|
||||||
pub fn subscribe(&mut self, topic: Topic) -> bool {
|
pub fn subscribe(&mut self, topic: Topic) -> bool {
|
||||||
self.gossipsub.subscribe(topic)
|
self.gossipsub.subscribe(topic)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sends an RPC Request/Response via the RPC protocol.
|
|
||||||
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
|
|
||||||
self.serenity_rpc.send_rpc(peer_id, rpc_event);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Publishes a message on the pubsub (gossipsub) behaviour.
|
/// Publishes a message on the pubsub (gossipsub) behaviour.
|
||||||
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage) {
|
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage) {
|
||||||
let message_bytes = ssz_encode(&message);
|
let message_bytes = ssz_encode(&message);
|
||||||
@ -172,14 +161,19 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
|||||||
self.gossipsub.publish(topic, message_bytes.clone());
|
self.gossipsub.publish(topic, message_bytes.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Eth2 RPC behaviour functions */
|
||||||
|
|
||||||
|
/// Sends an RPC Request/Response via the RPC protocol.
|
||||||
|
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
|
||||||
|
self.serenity_rpc.send_rpc(peer_id, rpc_event);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The types of events than can be obtained from polling the behaviour.
|
/// The types of events than can be obtained from polling the behaviour.
|
||||||
pub enum BehaviourEvent {
|
pub enum BehaviourEvent {
|
||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
PeerDialed(PeerId),
|
PeerDialed(PeerId),
|
||||||
Identified(PeerId, Box<IdentifyInfo>),
|
|
||||||
// TODO: This is a stub at the moment
|
|
||||||
GossipMessage {
|
GossipMessage {
|
||||||
source: PeerId,
|
source: PeerId,
|
||||||
topics: Vec<TopicHash>,
|
topics: Vec<TopicHash>,
|
||||||
|
@ -1,89 +1,129 @@
|
|||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
|
use enr::Enr;
|
||||||
use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder};
|
use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder};
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use types::multiaddr::{Error as MultiaddrError, Multiaddr};
|
use std::path::PathBuf;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
/// The beacon node topic string to subscribe to.
|
||||||
|
pub const BEACON_PUBSUB_TOPIC: &str = "beacon_block";
|
||||||
|
pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation";
|
||||||
|
pub const SHARD_TOPIC_PREFIX: &str = "shard";
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
/// Network configuration for lighthouse.
|
/// Network configuration for lighthouse.
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
|
/// Data directory where node's keyfile is stored
|
||||||
|
pub network_dir: PathBuf,
|
||||||
|
|
||||||
/// IP address to listen on.
|
/// IP address to listen on.
|
||||||
listen_addresses: Vec<String>,
|
pub listen_address: std::net::IpAddr,
|
||||||
|
|
||||||
|
/// The TCP port that libp2p listens on.
|
||||||
|
pub libp2p_port: u16,
|
||||||
|
|
||||||
|
/// The address to broadcast to peers about which address we are listening on.
|
||||||
|
pub discovery_address: std::net::IpAddr,
|
||||||
|
|
||||||
|
/// UDP port that discovery listens on.
|
||||||
|
pub discovery_port: u16,
|
||||||
|
|
||||||
|
/// Target number of connected peers.
|
||||||
|
pub max_peers: usize,
|
||||||
|
|
||||||
/// Gossipsub configuration parameters.
|
/// Gossipsub configuration parameters.
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub gs_config: GossipsubConfig,
|
pub gs_config: GossipsubConfig,
|
||||||
/// Configuration parameters for node identification protocol.
|
|
||||||
#[serde(skip)]
|
|
||||||
pub identify_config: IdentifyConfig,
|
|
||||||
/// List of nodes to initially connect to.
|
/// List of nodes to initially connect to.
|
||||||
boot_nodes: Vec<String>,
|
pub boot_nodes: Vec<Enr>,
|
||||||
|
|
||||||
/// Client version
|
/// Client version
|
||||||
pub client_version: String,
|
pub client_version: String,
|
||||||
/// List of topics to subscribe to as strings
|
|
||||||
|
/// List of extra topics to initially subscribe to as strings.
|
||||||
pub topics: Vec<String>,
|
pub topics: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
/// Generate a default network configuration.
|
/// Generate a default network configuration.
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
|
let mut network_dir = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
|
||||||
|
network_dir.push(".lighthouse");
|
||||||
|
network_dir.push("network");
|
||||||
Config {
|
Config {
|
||||||
listen_addresses: vec!["/ip4/127.0.0.1/tcp/9000".to_string()],
|
network_dir,
|
||||||
|
listen_address: "127.0.0.1".parse().expect("vaild ip address"),
|
||||||
|
libp2p_port: 9000,
|
||||||
|
discovery_address: "127.0.0.1".parse().expect("valid ip address"),
|
||||||
|
discovery_port: 9000,
|
||||||
|
max_peers: 10,
|
||||||
|
//TODO: Set realistic values for production
|
||||||
gs_config: GossipsubConfigBuilder::new()
|
gs_config: GossipsubConfigBuilder::new()
|
||||||
.max_gossip_size(4_000_000)
|
.max_gossip_size(4_000_000)
|
||||||
|
.inactivity_timeout(Duration::from_secs(90))
|
||||||
|
.heartbeat_interval(Duration::from_secs(20))
|
||||||
.build(),
|
.build(),
|
||||||
identify_config: IdentifyConfig::default(),
|
|
||||||
boot_nodes: vec![],
|
boot_nodes: vec![],
|
||||||
client_version: version::version(),
|
client_version: version::version(),
|
||||||
topics: vec![String::from("beacon_chain")],
|
topics: Vec::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Generates a default Config.
|
||||||
impl Config {
|
impl Config {
|
||||||
pub fn new(boot_nodes: Vec<String>) -> Self {
|
pub fn new() -> Self {
|
||||||
let mut conf = Config::default();
|
Config::default()
|
||||||
conf.boot_nodes = boot_nodes;
|
|
||||||
|
|
||||||
conf
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn listen_addresses(&self) -> Result<Vec<Multiaddr>, MultiaddrError> {
|
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), String> {
|
||||||
self.listen_addresses.iter().map(|s| s.parse()).collect()
|
if let Some(dir) = args.value_of("datadir") {
|
||||||
}
|
self.network_dir = PathBuf::from(dir).join("network");
|
||||||
|
};
|
||||||
|
|
||||||
pub fn boot_nodes(&self) -> Result<Vec<Multiaddr>, MultiaddrError> {
|
|
||||||
self.boot_nodes.iter().map(|s| s.parse()).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
|
|
||||||
if let Some(listen_address_str) = args.value_of("listen-address") {
|
if let Some(listen_address_str) = args.value_of("listen-address") {
|
||||||
let listen_addresses = listen_address_str.split(',').map(Into::into).collect();
|
let listen_address = listen_address_str
|
||||||
self.listen_addresses = listen_addresses;
|
.parse()
|
||||||
|
.map_err(|_| format!("Invalid listen address: {:?}", listen_address_str))?;
|
||||||
|
self.listen_address = listen_address;
|
||||||
|
self.discovery_address = listen_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(boot_addresses_str) = args.value_of("boot-nodes") {
|
if let Some(max_peers_str) = args.value_of("maxpeers") {
|
||||||
let boot_addresses = boot_addresses_str.split(',').map(Into::into).collect();
|
self.max_peers = max_peers_str
|
||||||
self.boot_nodes = boot_addresses;
|
.parse::<usize>()
|
||||||
|
.map_err(|_| format!("Invalid number of max peers: {}", max_peers_str))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(port_str) = args.value_of("port") {
|
||||||
|
let port = port_str
|
||||||
|
.parse::<u16>()
|
||||||
|
.map_err(|_| format!("Invalid port: {}", port_str))?;
|
||||||
|
self.libp2p_port = port;
|
||||||
|
self.discovery_port = port;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(boot_enr_str) = args.value_of("boot-nodes") {
|
||||||
|
self.boot_nodes = boot_enr_str
|
||||||
|
.split(',')
|
||||||
|
.map(|enr| enr.parse().map_err(|_| format!("Invalid ENR: {}", enr)))
|
||||||
|
.collect::<Result<Vec<Enr>, _>>()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(discovery_address_str) = args.value_of("discovery-address") {
|
||||||
|
self.discovery_address = discovery_address_str
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| format!("Invalid discovery address: {:?}", discovery_address_str))?
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(disc_port_str) = args.value_of("disc-port") {
|
||||||
|
self.discovery_port = disc_port_str
|
||||||
|
.parse::<u16>()
|
||||||
|
.map_err(|_| format!("Invalid discovery port: {}", disc_port_str))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The configuration parameters for the Identify protocol
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct IdentifyConfig {
|
|
||||||
/// The protocol version to listen on.
|
|
||||||
pub version: String,
|
|
||||||
/// The client's name and version for identification.
|
|
||||||
pub user_agent: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for IdentifyConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
version: "/eth/serenity/1.0".to_string(),
|
|
||||||
user_agent: version::version(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
313
beacon_node/eth2-libp2p/src/discovery.rs
Normal file
313
beacon_node/eth2-libp2p/src/discovery.rs
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
use crate::{error, NetworkConfig};
|
||||||
|
/// This manages the discovery and management of peers.
|
||||||
|
///
|
||||||
|
/// Currently using discv5 for peer discovery.
|
||||||
|
///
|
||||||
|
use futures::prelude::*;
|
||||||
|
use libp2p::core::swarm::{
|
||||||
|
ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
||||||
|
};
|
||||||
|
use libp2p::core::{identity::Keypair, Multiaddr, PeerId, ProtocolsHandler};
|
||||||
|
use libp2p::discv5::{Discv5, Discv5Event};
|
||||||
|
use libp2p::enr::{Enr, EnrBuilder, NodeId};
|
||||||
|
use libp2p::multiaddr::Protocol;
|
||||||
|
use slog::{debug, info, o, warn};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::prelude::*;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
|
use tokio_timer::Delay;
|
||||||
|
|
||||||
|
/// Maximum seconds before searching for extra peers.
|
||||||
|
const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 60;
|
||||||
|
/// Initial delay between peer searches.
|
||||||
|
const INITIAL_SEARCH_DELAY: u64 = 5;
|
||||||
|
/// Local ENR storage filename.
|
||||||
|
const ENR_FILENAME: &str = "enr.dat";
|
||||||
|
|
||||||
|
/// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5
|
||||||
|
/// libp2p protocol.
|
||||||
|
pub struct Discovery<TSubstream> {
|
||||||
|
/// The peers currently connected to libp2p streams.
|
||||||
|
connected_peers: HashSet<PeerId>,
|
||||||
|
|
||||||
|
/// The target number of connected peers on the libp2p interface.
|
||||||
|
max_peers: usize,
|
||||||
|
|
||||||
|
/// The delay between peer discovery searches.
|
||||||
|
peer_discovery_delay: Delay,
|
||||||
|
|
||||||
|
/// Tracks the last discovery delay. The delay is doubled each round until the max
|
||||||
|
/// time is reached.
|
||||||
|
past_discovery_delay: u64,
|
||||||
|
|
||||||
|
/// The TCP port for libp2p. Used to convert an updated IP address to a multiaddr. Note: This
|
||||||
|
/// assumes that the external TCP port is the same as the internal TCP port if behind a NAT.
|
||||||
|
//TODO: Improve NAT handling limit the above restriction
|
||||||
|
tcp_port: u16,
|
||||||
|
|
||||||
|
/// The discovery behaviour used to discover new peers.
|
||||||
|
discovery: Discv5<TSubstream>,
|
||||||
|
|
||||||
|
/// Logger for the discovery behaviour.
|
||||||
|
log: slog::Logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSubstream> Discovery<TSubstream> {
|
||||||
|
pub fn new(
|
||||||
|
local_key: &Keypair,
|
||||||
|
config: &NetworkConfig,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> error::Result<Self> {
|
||||||
|
let log = log.new(o!("Service" => "Libp2p-Discovery"));
|
||||||
|
|
||||||
|
// checks if current ENR matches that found on disk
|
||||||
|
let local_enr = load_enr(local_key, config, &log)?;
|
||||||
|
|
||||||
|
info!(log, "Local ENR: {}", local_enr.to_base64());
|
||||||
|
debug!(log, "Local Node Id: {}", local_enr.node_id());
|
||||||
|
|
||||||
|
let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address)
|
||||||
|
.map_err(|e| format!("Discv5 service failed: {:?}", e))?;
|
||||||
|
|
||||||
|
// Add bootnodes to routing table
|
||||||
|
for bootnode_enr in config.boot_nodes.clone() {
|
||||||
|
debug!(
|
||||||
|
log,
|
||||||
|
"Adding node to routing table: {}",
|
||||||
|
bootnode_enr.node_id()
|
||||||
|
);
|
||||||
|
discovery.add_enr(bootnode_enr);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
connected_peers: HashSet::new(),
|
||||||
|
max_peers: config.max_peers,
|
||||||
|
peer_discovery_delay: Delay::new(Instant::now()),
|
||||||
|
past_discovery_delay: INITIAL_SEARCH_DELAY,
|
||||||
|
tcp_port: config.libp2p_port,
|
||||||
|
discovery,
|
||||||
|
log,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Manually search for peers. This restarts the discovery round, sparking multiple rapid
|
||||||
|
/// queries.
|
||||||
|
pub fn discover_peers(&mut self) {
|
||||||
|
self.past_discovery_delay = INITIAL_SEARCH_DELAY;
|
||||||
|
self.find_peers();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add an Enr to the routing table of the discovery mechanism.
|
||||||
|
pub fn add_enr(&mut self, enr: Enr) {
|
||||||
|
self.discovery.add_enr(enr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Search for new peers using the underlying discovery mechanism.
|
||||||
|
fn find_peers(&mut self) {
|
||||||
|
// pick a random NodeId
|
||||||
|
let random_node = NodeId::random();
|
||||||
|
debug!(self.log, "Searching for peers...");
|
||||||
|
self.discovery.find_node(random_node);
|
||||||
|
|
||||||
|
// update the time until next discovery
|
||||||
|
let delay = {
|
||||||
|
if self.past_discovery_delay < MAX_TIME_BETWEEN_PEER_SEARCHES {
|
||||||
|
self.past_discovery_delay *= 2;
|
||||||
|
self.past_discovery_delay
|
||||||
|
} else {
|
||||||
|
MAX_TIME_BETWEEN_PEER_SEARCHES
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.peer_discovery_delay
|
||||||
|
.reset(Instant::now() + Duration::from_secs(delay));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Redirect all behaviour events to underlying discovery behaviour.
|
||||||
|
impl<TSubstream> NetworkBehaviour for Discovery<TSubstream>
|
||||||
|
where
|
||||||
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
|
{
|
||||||
|
type ProtocolsHandler = <Discv5<TSubstream> as NetworkBehaviour>::ProtocolsHandler;
|
||||||
|
type OutEvent = <Discv5<TSubstream> as NetworkBehaviour>::OutEvent;
|
||||||
|
|
||||||
|
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||||
|
NetworkBehaviour::new_handler(&mut self.discovery)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
|
||||||
|
// Let discovery track possible known peers.
|
||||||
|
self.discovery.addresses_of_peer(peer_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) {
|
||||||
|
self.connected_peers.insert(peer_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) {
|
||||||
|
self.connected_peers.remove(peer_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inject_replaced(
|
||||||
|
&mut self,
|
||||||
|
_peer_id: PeerId,
|
||||||
|
_closed: ConnectedPoint,
|
||||||
|
_opened: ConnectedPoint,
|
||||||
|
) {
|
||||||
|
// discv5 doesn't implement
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inject_node_event(
|
||||||
|
&mut self,
|
||||||
|
_peer_id: PeerId,
|
||||||
|
_event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
|
||||||
|
) {
|
||||||
|
// discv5 doesn't implement
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll(
|
||||||
|
&mut self,
|
||||||
|
params: &mut impl PollParameters,
|
||||||
|
) -> Async<
|
||||||
|
NetworkBehaviourAction<
|
||||||
|
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||||
|
Self::OutEvent,
|
||||||
|
>,
|
||||||
|
> {
|
||||||
|
// search for peers if it is time
|
||||||
|
loop {
|
||||||
|
match self.peer_discovery_delay.poll() {
|
||||||
|
Ok(Async::Ready(_)) => {
|
||||||
|
if self.connected_peers.len() < self.max_peers {
|
||||||
|
self.find_peers();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(Async::NotReady) => break,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(self.log, "Discovery peer search failed: {:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poll discovery
|
||||||
|
loop {
|
||||||
|
match self.discovery.poll(params) {
|
||||||
|
Async::Ready(NetworkBehaviourAction::GenerateEvent(event)) => {
|
||||||
|
match event {
|
||||||
|
Discv5Event::Discovered(_enr) => {
|
||||||
|
// not concerned about FINDNODE results, rather the result of an entire
|
||||||
|
// query.
|
||||||
|
}
|
||||||
|
Discv5Event::SocketUpdated(socket) => {
|
||||||
|
info!(self.log, "Address updated"; "IP" => format!("{}",socket.ip()));
|
||||||
|
let mut address = Multiaddr::from(socket.ip());
|
||||||
|
address.push(Protocol::Tcp(self.tcp_port));
|
||||||
|
return Async::Ready(NetworkBehaviourAction::ReportObservedAddr {
|
||||||
|
address,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Discv5Event::FindNodeResult { closer_peers, .. } => {
|
||||||
|
debug!(self.log, "Discv5 query found {} peers", closer_peers.len());
|
||||||
|
if closer_peers.is_empty() {
|
||||||
|
debug!(self.log, "Discv5 random query yielded empty results");
|
||||||
|
}
|
||||||
|
for peer_id in closer_peers {
|
||||||
|
// if we need more peers, attempt a connection
|
||||||
|
if self.connected_peers.len() < self.max_peers
|
||||||
|
&& self.connected_peers.get(&peer_id).is_none()
|
||||||
|
{
|
||||||
|
debug!(self.log, "Discv5: Peer discovered"; "Peer"=> format!("{:?}", peer_id));
|
||||||
|
return Async::Ready(NetworkBehaviourAction::DialPeer {
|
||||||
|
peer_id,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// discv5 does not output any other NetworkBehaviourAction
|
||||||
|
Async::Ready(_) => {}
|
||||||
|
Async::NotReady => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Async::NotReady
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Loads an ENR from file if it exists and matches the current NodeId and sequence number. If none
|
||||||
|
/// exists, generates a new one.
|
||||||
|
///
|
||||||
|
/// If an ENR exists, with the same NodeId and IP address, we use the disk-generated one as its
|
||||||
|
/// ENR sequence will be equal or higher than a newly generated one.
|
||||||
|
fn load_enr(
|
||||||
|
local_key: &Keypair,
|
||||||
|
config: &NetworkConfig,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> Result<Enr, String> {
|
||||||
|
// Build the local ENR.
|
||||||
|
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
|
||||||
|
// majority of our peers.
|
||||||
|
let mut local_enr = EnrBuilder::new()
|
||||||
|
.ip(config.discovery_address.into())
|
||||||
|
.tcp(config.libp2p_port)
|
||||||
|
.udp(config.discovery_port)
|
||||||
|
.build(&local_key)
|
||||||
|
.map_err(|e| format!("Could not build Local ENR: {:?}", e))?;
|
||||||
|
|
||||||
|
let enr_f = config.network_dir.join(ENR_FILENAME);
|
||||||
|
if let Ok(mut enr_file) = File::open(enr_f.clone()) {
|
||||||
|
let mut enr_string = String::new();
|
||||||
|
match enr_file.read_to_string(&mut enr_string) {
|
||||||
|
Err(_) => debug!(log, "Could not read ENR from file"),
|
||||||
|
Ok(_) => {
|
||||||
|
match Enr::from_str(&enr_string) {
|
||||||
|
Ok(enr) => {
|
||||||
|
debug!(log, "ENR found in file: {:?}", enr_f);
|
||||||
|
|
||||||
|
if enr.node_id() == local_enr.node_id() {
|
||||||
|
if enr.ip() == config.discovery_address.into()
|
||||||
|
&& enr.tcp() == Some(config.libp2p_port)
|
||||||
|
&& enr.udp() == Some(config.discovery_port)
|
||||||
|
{
|
||||||
|
debug!(log, "ENR loaded from file");
|
||||||
|
// the stored ENR has the same configuration, use it
|
||||||
|
return Ok(enr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// same node id, different configuration - update the sequence number
|
||||||
|
let new_seq_no = enr.seq().checked_add(1).ok_or_else(|| "ENR sequence number on file is too large. Remove it to generate a new NodeId")?;
|
||||||
|
local_enr.set_seq(new_seq_no, local_key).map_err(|e| {
|
||||||
|
format!("Could not update ENR sequence number: {:?}", e)
|
||||||
|
})?;
|
||||||
|
debug!(log, "ENR sequence number increased to: {}", new_seq_no);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(log, "ENR from file could not be decoded: {:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// write ENR to disk
|
||||||
|
let _ = std::fs::create_dir_all(&config.network_dir);
|
||||||
|
match File::create(enr_f.clone())
|
||||||
|
.and_then(|mut f| f.write_all(&local_enr.to_base64().as_bytes()))
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
debug!(log, "ENR written to disk");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
log,
|
||||||
|
"Could not write ENR to file: {:?}. Error: {}", enr_f, e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(local_enr)
|
||||||
|
}
|
@ -4,12 +4,18 @@
|
|||||||
/// This crate builds and manages the libp2p services required by the beacon node.
|
/// This crate builds and manages the libp2p services required by the beacon node.
|
||||||
pub mod behaviour;
|
pub mod behaviour;
|
||||||
mod config;
|
mod config;
|
||||||
|
mod discovery;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod rpc;
|
pub mod rpc;
|
||||||
mod service;
|
mod service;
|
||||||
|
|
||||||
pub use behaviour::PubsubMessage;
|
pub use behaviour::PubsubMessage;
|
||||||
pub use config::Config as NetworkConfig;
|
pub use config::{
|
||||||
|
Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX,
|
||||||
|
};
|
||||||
|
pub use libp2p::floodsub::{Topic, TopicBuilder, TopicHash};
|
||||||
|
pub use libp2p::multiaddr;
|
||||||
|
pub use libp2p::Multiaddr;
|
||||||
pub use libp2p::{
|
pub use libp2p::{
|
||||||
gossipsub::{GossipsubConfig, GossipsubConfigBuilder},
|
gossipsub::{GossipsubConfig, GossipsubConfigBuilder},
|
||||||
PeerId,
|
PeerId,
|
||||||
@ -17,5 +23,3 @@ pub use libp2p::{
|
|||||||
pub use rpc::RPCEvent;
|
pub use rpc::RPCEvent;
|
||||||
pub use service::Libp2pEvent;
|
pub use service::Libp2pEvent;
|
||||||
pub use service::Service;
|
pub use service::Service;
|
||||||
pub use types::multiaddr;
|
|
||||||
pub use types::Multiaddr;
|
|
||||||
|
@ -94,7 +94,7 @@ where
|
|||||||
|
|
||||||
fn poll(
|
fn poll(
|
||||||
&mut self,
|
&mut self,
|
||||||
_: &mut PollParameters<'_>,
|
_: &mut impl PollParameters,
|
||||||
) -> Async<
|
) -> Async<
|
||||||
NetworkBehaviourAction<
|
NetworkBehaviourAction<
|
||||||
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
<Self::ProtocolsHandler as ProtocolsHandler>::InEvent,
|
||||||
|
@ -11,7 +11,6 @@ use tokio::io::{AsyncRead, AsyncWrite};
|
|||||||
const MAX_READ_SIZE: usize = 4_194_304; // 4M
|
const MAX_READ_SIZE: usize = 4_194_304; // 4M
|
||||||
|
|
||||||
/// Implementation of the `ConnectionUpgrade` for the rpc protocol.
|
/// Implementation of the `ConnectionUpgrade` for the rpc protocol.
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCProtocol;
|
pub struct RPCProtocol;
|
||||||
|
|
||||||
|
@ -3,25 +3,30 @@ use crate::error;
|
|||||||
use crate::multiaddr::Protocol;
|
use crate::multiaddr::Protocol;
|
||||||
use crate::rpc::RPCEvent;
|
use crate::rpc::RPCEvent;
|
||||||
use crate::NetworkConfig;
|
use crate::NetworkConfig;
|
||||||
|
use crate::{TopicBuilder, TopicHash};
|
||||||
|
use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use libp2p::core::{
|
use libp2p::core::{
|
||||||
identity,
|
identity::Keypair,
|
||||||
|
multiaddr::Multiaddr,
|
||||||
muxing::StreamMuxerBox,
|
muxing::StreamMuxerBox,
|
||||||
nodes::Substream,
|
nodes::Substream,
|
||||||
transport::boxed::Boxed,
|
transport::boxed::Boxed,
|
||||||
upgrade::{InboundUpgradeExt, OutboundUpgradeExt},
|
upgrade::{InboundUpgradeExt, OutboundUpgradeExt},
|
||||||
};
|
};
|
||||||
use libp2p::identify::protocol::IdentifyInfo;
|
|
||||||
use libp2p::{core, secio, PeerId, Swarm, Transport};
|
use libp2p::{core, secio, PeerId, Swarm, Transport};
|
||||||
use slog::{debug, info, trace, warn};
|
use slog::{debug, info, trace, warn};
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::prelude::*;
|
||||||
use std::io::{Error, ErrorKind};
|
use std::io::{Error, ErrorKind};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use types::{TopicBuilder, TopicHash};
|
|
||||||
|
|
||||||
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
|
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
|
||||||
type Libp2pBehaviour = Behaviour<Substream<StreamMuxerBox>>;
|
type Libp2pBehaviour = Behaviour<Substream<StreamMuxerBox>>;
|
||||||
|
|
||||||
|
const NETWORK_KEY_FILENAME: &str = "key";
|
||||||
|
|
||||||
/// The configuration and state of the libp2p components for the beacon node.
|
/// The configuration and state of the libp2p components for the beacon node.
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
/// The libp2p Swarm handler.
|
/// The libp2p Swarm handler.
|
||||||
@ -35,59 +40,52 @@ pub struct Service {
|
|||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
|
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
|
||||||
debug!(log, "Libp2p Service starting");
|
debug!(log, "Network-libp2p Service starting");
|
||||||
|
|
||||||
// TODO: Currently using secp256k1 key pairs. Wire protocol specifies RSA. Waiting for this
|
// load the private key from CLI flag, disk or generate a new one
|
||||||
// PR to be merged to generate RSA keys: https://github.com/briansmith/ring/pull/733
|
let local_private_key = load_private_key(&config, &log);
|
||||||
// TODO: Save and recover node key from disk
|
|
||||||
let local_private_key = identity::Keypair::generate_secp256k1();
|
|
||||||
|
|
||||||
let local_public_key = local_private_key.public();
|
|
||||||
let local_peer_id = PeerId::from(local_private_key.public());
|
let local_peer_id = PeerId::from(local_private_key.public());
|
||||||
info!(log, "Local peer id: {:?}", local_peer_id);
|
info!(log, "Local peer id: {:?}", local_peer_id);
|
||||||
|
|
||||||
let mut swarm = {
|
let mut swarm = {
|
||||||
// Set up the transport
|
// Set up the transport - tcp/ws with secio and mplex/yamux
|
||||||
let transport = build_transport(local_private_key);
|
let transport = build_transport(local_private_key.clone());
|
||||||
// Set up gossipsub routing
|
// Lighthouse network behaviour
|
||||||
let behaviour = Behaviour::new(local_public_key.clone(), &config, &log);
|
let behaviour = Behaviour::new(&local_private_key, &config, &log)?;
|
||||||
// Set up Topology
|
Swarm::new(transport, behaviour, local_peer_id.clone())
|
||||||
let topology = local_peer_id.clone();
|
|
||||||
Swarm::new(transport, behaviour, topology)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// listen on all addresses
|
// listen on the specified address
|
||||||
for address in config
|
let listen_multiaddr = {
|
||||||
.listen_addresses()
|
let mut m = Multiaddr::from(config.listen_address);
|
||||||
.map_err(|e| format!("Invalid listen multiaddr: {}", e))?
|
m.push(Protocol::Tcp(config.libp2p_port));
|
||||||
{
|
m
|
||||||
match Swarm::listen_on(&mut swarm, address.clone()) {
|
|
||||||
Ok(mut listen_addr) => {
|
|
||||||
listen_addr.append(Protocol::P2p(local_peer_id.clone().into()));
|
|
||||||
info!(log, "Listening on: {}", listen_addr);
|
|
||||||
}
|
|
||||||
Err(err) => warn!(log, "Cannot listen on: {} : {:?}", address, err),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
|
||||||
|
Ok(_) => {
|
||||||
|
let mut log_address = listen_multiaddr;
|
||||||
|
log_address.push(Protocol::P2p(local_peer_id.clone().into()));
|
||||||
|
info!(log, "Listening on: {}", log_address);
|
||||||
}
|
}
|
||||||
// connect to boot nodes - these are currently stored as multiaddrs
|
Err(err) => warn!(
|
||||||
// Once we have discovery, can set to peerId
|
|
||||||
for bootnode in config
|
|
||||||
.boot_nodes()
|
|
||||||
.map_err(|e| format!("Invalid boot node multiaddr: {:?}", e))?
|
|
||||||
{
|
|
||||||
match Swarm::dial_addr(&mut swarm, bootnode.clone()) {
|
|
||||||
Ok(()) => debug!(log, "Dialing bootnode: {}", bootnode),
|
|
||||||
Err(err) => debug!(
|
|
||||||
log,
|
log,
|
||||||
"Could not connect to bootnode: {} error: {:?}", bootnode, err
|
"Cannot listen on: {} because: {:?}", listen_multiaddr, err
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
}
|
|
||||||
|
|
||||||
// subscribe to default gossipsub topics
|
// subscribe to default gossipsub topics
|
||||||
|
let mut topics = vec![];
|
||||||
|
//TODO: Handle multiple shard attestations. For now we simply use a separate topic for
|
||||||
|
//attestations
|
||||||
|
topics.push(BEACON_ATTESTATION_TOPIC.to_string());
|
||||||
|
topics.push(BEACON_PUBSUB_TOPIC.to_string());
|
||||||
|
topics.append(&mut config.topics.clone());
|
||||||
|
|
||||||
let mut subscribed_topics = vec![];
|
let mut subscribed_topics = vec![];
|
||||||
for topic in config.topics {
|
for topic in topics {
|
||||||
let t = TopicBuilder::new(topic.to_string()).build();
|
let t = TopicBuilder::new(topic.clone()).build();
|
||||||
if swarm.subscribe(t) {
|
if swarm.subscribe(t) {
|
||||||
trace!(log, "Subscribed to topic: {:?}", topic);
|
trace!(log, "Subscribed to topic: {:?}", topic);
|
||||||
subscribed_topics.push(topic);
|
subscribed_topics.push(topic);
|
||||||
@ -135,9 +133,6 @@ impl Stream for Service {
|
|||||||
BehaviourEvent::PeerDialed(peer_id) => {
|
BehaviourEvent::PeerDialed(peer_id) => {
|
||||||
return Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))));
|
return Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))));
|
||||||
}
|
}
|
||||||
BehaviourEvent::Identified(peer_id, info) => {
|
|
||||||
return Ok(Async::Ready(Some(Libp2pEvent::Identified(peer_id, info))));
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"),
|
Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"),
|
||||||
Ok(Async::NotReady) => break,
|
Ok(Async::NotReady) => break,
|
||||||
@ -150,7 +145,7 @@ impl Stream for Service {
|
|||||||
|
|
||||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, secio as the encryption layer, and
|
/// The implementation supports TCP/IP, WebSockets over TCP/IP, secio as the encryption layer, and
|
||||||
/// mplex or yamux as the multiplexing layer.
|
/// mplex or yamux as the multiplexing layer.
|
||||||
fn build_transport(local_private_key: identity::Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> {
|
fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox), Error> {
|
||||||
// TODO: The Wire protocol currently doesn't specify encryption and this will need to be customised
|
// TODO: The Wire protocol currently doesn't specify encryption and this will need to be customised
|
||||||
// in the future.
|
// in the future.
|
||||||
let transport = libp2p::tcp::TcpConfig::new();
|
let transport = libp2p::tcp::TcpConfig::new();
|
||||||
@ -187,8 +182,6 @@ pub enum Libp2pEvent {
|
|||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
/// Initiated the connection to a new peer.
|
/// Initiated the connection to a new peer.
|
||||||
PeerDialed(PeerId),
|
PeerDialed(PeerId),
|
||||||
/// Received information about a peer on the network.
|
|
||||||
Identified(PeerId, Box<IdentifyInfo>),
|
|
||||||
/// Received pubsub message.
|
/// Received pubsub message.
|
||||||
PubsubMessage {
|
PubsubMessage {
|
||||||
source: PeerId,
|
source: PeerId,
|
||||||
@ -196,3 +189,51 @@ pub enum Libp2pEvent {
|
|||||||
message: Box<PubsubMessage>,
|
message: Box<PubsubMessage>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Loads a private key from disk. If this fails, a new key is
|
||||||
|
/// generated and is then saved to disk.
|
||||||
|
///
|
||||||
|
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
|
||||||
|
fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
|
||||||
|
// TODO: Currently using secp256k1 keypairs - currently required for discv5
|
||||||
|
// check for key from disk
|
||||||
|
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
|
||||||
|
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
|
||||||
|
let mut key_bytes: Vec<u8> = Vec::with_capacity(36);
|
||||||
|
match network_key_file.read_to_end(&mut key_bytes) {
|
||||||
|
Err(_) => debug!(log, "Could not read network key file"),
|
||||||
|
Ok(_) => {
|
||||||
|
// only accept secp256k1 keys for now
|
||||||
|
if let Ok(secret_key) =
|
||||||
|
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes)
|
||||||
|
{
|
||||||
|
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
|
||||||
|
debug!(log, "Loaded network key from disk.");
|
||||||
|
return Keypair::Secp256k1(kp);
|
||||||
|
} else {
|
||||||
|
debug!(log, "Network key file is not a valid secp256k1 key");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if a key could not be loaded from disk, generate a new one and save it
|
||||||
|
let local_private_key = Keypair::generate_secp256k1();
|
||||||
|
if let Keypair::Secp256k1(key) = local_private_key.clone() {
|
||||||
|
let _ = std::fs::create_dir_all(&config.network_dir);
|
||||||
|
match File::create(network_key_f.clone())
|
||||||
|
.and_then(|mut f| f.write_all(&key.secret().to_bytes()))
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
debug!(log, "New network key generated and written to disk");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
log,
|
||||||
|
"Could not write node key to file: {:?}. Error: {}", network_key_f, e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
local_private_key
|
||||||
|
}
|
||||||
|
@ -13,7 +13,7 @@ network = { path = "../network" }
|
|||||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||||
version = { path = "../version" }
|
version = { path = "../version" }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||||
protos = { path = "../../protos" }
|
protos = { path = "../../protos" }
|
||||||
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
|
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
|
||||||
|
@ -13,8 +13,8 @@ store = { path = "../store" }
|
|||||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||||
version = { path = "../version" }
|
version = { path = "../version" }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] }
|
slog = { version = "^2.2.3" }
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
||||||
futures = "0.1.25"
|
futures = "0.1.25"
|
||||||
error-chain = "0.12.0"
|
error-chain = "0.12.0"
|
||||||
|
@ -4,6 +4,7 @@ use crate::NetworkConfig;
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
use crossbeam_channel::{unbounded as channel, Sender, TryRecvError};
|
use crossbeam_channel::{unbounded as channel, Sender, TryRecvError};
|
||||||
use eth2_libp2p::Service as LibP2PService;
|
use eth2_libp2p::Service as LibP2PService;
|
||||||
|
use eth2_libp2p::Topic;
|
||||||
use eth2_libp2p::{Libp2pEvent, PeerId};
|
use eth2_libp2p::{Libp2pEvent, PeerId};
|
||||||
use eth2_libp2p::{PubsubMessage, RPCEvent};
|
use eth2_libp2p::{PubsubMessage, RPCEvent};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
@ -13,7 +14,6 @@ use slog::{debug, info, o, trace};
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
use types::Topic;
|
|
||||||
|
|
||||||
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
||||||
pub struct Service<T: BeaconChainTypes> {
|
pub struct Service<T: BeaconChainTypes> {
|
||||||
@ -126,12 +126,6 @@ fn network_service(
|
|||||||
.send(HandlerMessage::PeerDialed(peer_id))
|
.send(HandlerMessage::PeerDialed(peer_id))
|
||||||
.map_err(|_| "failed to send rpc to handler")?;
|
.map_err(|_| "failed to send rpc to handler")?;
|
||||||
}
|
}
|
||||||
Libp2pEvent::Identified(peer_id, info) => {
|
|
||||||
debug!(
|
|
||||||
log,
|
|
||||||
"We have identified peer: {:?} with {:?}", peer_id, info
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Libp2pEvent::PubsubMessage {
|
Libp2pEvent::PubsubMessage {
|
||||||
source, message, ..
|
source, message, ..
|
||||||
} => {
|
} => {
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
use eth2_libp2p::rpc::methods::*;
|
use eth2_libp2p::rpc::methods::*;
|
||||||
use eth2_libp2p::PeerId;
|
use eth2_libp2p::PeerId;
|
||||||
use slog::{debug, error};
|
use slog::error;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
@ -22,7 +23,7 @@ use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot};
|
|||||||
pub struct ImportQueue<T: BeaconChainTypes> {
|
pub struct ImportQueue<T: BeaconChainTypes> {
|
||||||
pub chain: Arc<BeaconChain<T>>,
|
pub chain: Arc<BeaconChain<T>>,
|
||||||
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
|
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
|
||||||
pub partials: Vec<PartialBeaconBlock>,
|
partials: HashMap<Hash256, PartialBeaconBlock>,
|
||||||
/// Time before a queue entry is considered state.
|
/// Time before a queue entry is considered state.
|
||||||
pub stale_time: Duration,
|
pub stale_time: Duration,
|
||||||
/// Logging
|
/// Logging
|
||||||
@ -34,7 +35,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
pub fn new(chain: Arc<BeaconChain<T>>, stale_time: Duration, log: slog::Logger) -> Self {
|
pub fn new(chain: Arc<BeaconChain<T>>, stale_time: Duration, log: slog::Logger) -> Self {
|
||||||
Self {
|
Self {
|
||||||
chain,
|
chain,
|
||||||
partials: vec![],
|
partials: HashMap::new(),
|
||||||
stale_time,
|
stale_time,
|
||||||
log,
|
log,
|
||||||
}
|
}
|
||||||
@ -52,7 +53,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
let mut complete: Vec<(Hash256, BeaconBlock, PeerId)> = self
|
let mut complete: Vec<(Hash256, BeaconBlock, PeerId)> = self
|
||||||
.partials
|
.partials
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|partial| partial.clone().complete())
|
.filter_map(|(_, partial)| partial.clone().complete())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
// Sort the completable partials to be in ascending slot order.
|
// Sort the completable partials to be in ascending slot order.
|
||||||
@ -61,14 +62,14 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
complete
|
complete
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn contains_block_root(&self, block_root: Hash256) -> bool {
|
||||||
|
self.partials.contains_key(&block_root)
|
||||||
|
}
|
||||||
|
|
||||||
/// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial
|
/// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial
|
||||||
/// if it exists.
|
/// if it exists.
|
||||||
pub fn remove(&mut self, block_root: Hash256) -> Option<PartialBeaconBlock> {
|
pub fn remove(&mut self, block_root: Hash256) -> Option<PartialBeaconBlock> {
|
||||||
let position = self
|
self.partials.remove(&block_root)
|
||||||
.partials
|
|
||||||
.iter()
|
|
||||||
.position(|p| p.block_root == block_root)?;
|
|
||||||
Some(self.partials.remove(position))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flushes all stale entries from the queue.
|
/// Flushes all stale entries from the queue.
|
||||||
@ -76,31 +77,10 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
/// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the
|
/// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the
|
||||||
/// past.
|
/// past.
|
||||||
pub fn remove_stale(&mut self) {
|
pub fn remove_stale(&mut self) {
|
||||||
let stale_indices: Vec<usize> = self
|
let stale_time = self.stale_time;
|
||||||
.partials
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(i, partial)| {
|
|
||||||
if partial.inserted + self.stale_time <= Instant::now() {
|
|
||||||
Some(i)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if !stale_indices.is_empty() {
|
self.partials
|
||||||
debug!(
|
.retain(|_, partial| partial.inserted + stale_time > Instant::now())
|
||||||
self.log,
|
|
||||||
"ImportQueue removing stale entries";
|
|
||||||
"stale_items" => stale_indices.len(),
|
|
||||||
"stale_time_seconds" => self.stale_time.as_secs()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
stale_indices.iter().for_each(|&i| {
|
|
||||||
self.partials.remove(i);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if `self.chain` has not yet processed this block.
|
/// Returns `true` if `self.chain` has not yet processed this block.
|
||||||
@ -122,17 +102,19 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
block_roots: &[BlockRootSlot],
|
block_roots: &[BlockRootSlot],
|
||||||
sender: PeerId,
|
sender: PeerId,
|
||||||
) -> Vec<BlockRootSlot> {
|
) -> Vec<BlockRootSlot> {
|
||||||
let new_roots: Vec<BlockRootSlot> = block_roots
|
let new_block_root_slots: Vec<BlockRootSlot> = block_roots
|
||||||
.iter()
|
.iter()
|
||||||
|
// Ignore any roots already stored in the queue.
|
||||||
|
.filter(|brs| !self.contains_block_root(brs.block_root))
|
||||||
// Ignore any roots already processed by the chain.
|
// Ignore any roots already processed by the chain.
|
||||||
.filter(|brs| self.chain_has_not_seen_block(&brs.block_root))
|
.filter(|brs| self.chain_has_not_seen_block(&brs.block_root))
|
||||||
// Ignore any roots already stored in the queue.
|
|
||||||
.filter(|brs| !self.partials.iter().any(|p| p.block_root == brs.block_root))
|
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
new_roots.iter().for_each(|brs| {
|
self.partials.extend(
|
||||||
self.partials.push(PartialBeaconBlock {
|
new_block_root_slots
|
||||||
|
.iter()
|
||||||
|
.map(|brs| PartialBeaconBlock {
|
||||||
slot: brs.slot,
|
slot: brs.slot,
|
||||||
block_root: brs.block_root,
|
block_root: brs.block_root,
|
||||||
sender: sender.clone(),
|
sender: sender.clone(),
|
||||||
@ -140,9 +122,10 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
body: None,
|
body: None,
|
||||||
inserted: Instant::now(),
|
inserted: Instant::now(),
|
||||||
})
|
})
|
||||||
});
|
.map(|partial| (partial.block_root, partial)),
|
||||||
|
);
|
||||||
|
|
||||||
new_roots
|
new_block_root_slots
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for
|
/// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for
|
||||||
@ -170,7 +153,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
|
|
||||||
if self.chain_has_not_seen_block(&block_root) {
|
if self.chain_has_not_seen_block(&block_root) {
|
||||||
self.insert_header(block_root, header, sender.clone());
|
self.insert_header(block_root, header, sender.clone());
|
||||||
required_bodies.push(block_root)
|
required_bodies.push(block_root);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,31 +180,20 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
/// If the header already exists, the `inserted` time is set to `now` and not other
|
/// If the header already exists, the `inserted` time is set to `now` and not other
|
||||||
/// modifications are made.
|
/// modifications are made.
|
||||||
fn insert_header(&mut self, block_root: Hash256, header: BeaconBlockHeader, sender: PeerId) {
|
fn insert_header(&mut self, block_root: Hash256, header: BeaconBlockHeader, sender: PeerId) {
|
||||||
if let Some(i) = self
|
self.partials
|
||||||
.partials
|
.entry(block_root)
|
||||||
.iter()
|
.and_modify(|partial| {
|
||||||
.position(|p| p.block_root == block_root)
|
partial.header = Some(header.clone());
|
||||||
{
|
partial.inserted = Instant::now();
|
||||||
// Case 1: there already exists a partial with a matching block root.
|
})
|
||||||
//
|
.or_insert_with(|| PartialBeaconBlock {
|
||||||
// The `inserted` time is set to now and the header is replaced, regardless of whether
|
|
||||||
// it existed or not.
|
|
||||||
self.partials[i].header = Some(header);
|
|
||||||
self.partials[i].inserted = Instant::now();
|
|
||||||
} else {
|
|
||||||
// Case 2: there was no partial with a matching block root.
|
|
||||||
//
|
|
||||||
// A new partial is added. This case permits adding a header without already known the
|
|
||||||
// root.
|
|
||||||
self.partials.push(PartialBeaconBlock {
|
|
||||||
slot: header.slot,
|
slot: header.slot,
|
||||||
block_root,
|
block_root,
|
||||||
header: Some(header),
|
header: Some(header),
|
||||||
body: None,
|
body: None,
|
||||||
inserted: Instant::now(),
|
inserted: Instant::now(),
|
||||||
sender,
|
sender,
|
||||||
})
|
});
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates an existing partial with the `body`.
|
/// Updates an existing partial with the `body`.
|
||||||
@ -232,7 +204,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
fn insert_body(&mut self, body: BeaconBlockBody, sender: PeerId) {
|
fn insert_body(&mut self, body: BeaconBlockBody, sender: PeerId) {
|
||||||
let body_root = Hash256::from_slice(&body.tree_hash_root()[..]);
|
let body_root = Hash256::from_slice(&body.tree_hash_root()[..]);
|
||||||
|
|
||||||
self.partials.iter_mut().for_each(|mut p| {
|
self.partials.iter_mut().for_each(|(_, mut p)| {
|
||||||
if let Some(header) = &mut p.header {
|
if let Some(header) = &mut p.header {
|
||||||
if body_root == header.block_body_root {
|
if body_root == header.block_body_root {
|
||||||
p.inserted = Instant::now();
|
p.inserted = Instant::now();
|
||||||
@ -261,15 +233,10 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
|
|||||||
sender,
|
sender,
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(i) = self
|
self.partials
|
||||||
.partials
|
.entry(block_root)
|
||||||
.iter()
|
.and_modify(|existing_partial| *existing_partial = partial.clone())
|
||||||
.position(|p| p.block_root == block_root)
|
.or_insert(partial);
|
||||||
{
|
|
||||||
self.partials[i] = partial;
|
|
||||||
} else {
|
|
||||||
self.partials.push(partial)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
|||||||
use eth2_libp2p::rpc::methods::*;
|
use eth2_libp2p::rpc::methods::*;
|
||||||
use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId};
|
use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId};
|
||||||
use eth2_libp2p::PeerId;
|
use eth2_libp2p::PeerId;
|
||||||
use slog::{debug, error, info, o, warn};
|
use slog::{debug, error, info, o, trace, warn};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -17,7 +17,7 @@ use types::{
|
|||||||
const SLOT_IMPORT_TOLERANCE: u64 = 100;
|
const SLOT_IMPORT_TOLERANCE: u64 = 100;
|
||||||
|
|
||||||
/// The amount of seconds a block (or partial block) may exist in the import queue.
|
/// The amount of seconds a block (or partial block) may exist in the import queue.
|
||||||
const QUEUE_STALE_SECS: u64 = 600;
|
const QUEUE_STALE_SECS: u64 = 6;
|
||||||
|
|
||||||
/// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it.
|
/// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it.
|
||||||
/// Otherwise we queue it.
|
/// Otherwise we queue it.
|
||||||
@ -72,7 +72,6 @@ pub struct SimpleSync<T: BeaconChainTypes> {
|
|||||||
import_queue: ImportQueue<T>,
|
import_queue: ImportQueue<T>,
|
||||||
/// The current state of the syncing protocol.
|
/// The current state of the syncing protocol.
|
||||||
state: SyncState,
|
state: SyncState,
|
||||||
/// Sync logger.
|
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,94 +159,98 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
network: &mut NetworkContext,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
let spec = &self.chain.spec;
|
|
||||||
|
|
||||||
let remote = PeerSyncInfo::from(hello);
|
let remote = PeerSyncInfo::from(hello);
|
||||||
let local = PeerSyncInfo::from(&self.chain);
|
let local = PeerSyncInfo::from(&self.chain);
|
||||||
|
|
||||||
// Disconnect nodes who are on a different network.
|
let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
if local.network_id != remote.network_id {
|
if local.network_id != remote.network_id {
|
||||||
|
// The node is on a different network, disconnect them.
|
||||||
info!(
|
info!(
|
||||||
self.log, "HandshakeFailure";
|
self.log, "HandshakeFailure";
|
||||||
"peer" => format!("{:?}", peer_id),
|
"peer" => format!("{:?}", peer_id),
|
||||||
"reason" => "network_id"
|
"reason" => "network_id"
|
||||||
);
|
);
|
||||||
|
|
||||||
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
|
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
|
||||||
// Disconnect nodes if our finalized epoch is greater than thieirs, and their finalized
|
} else if remote.latest_finalized_epoch <= local.latest_finalized_epoch
|
||||||
// epoch is not in our chain. Viz., they are on another chain.
|
&& remote.latest_finalized_root != self.chain.spec.zero_hash
|
||||||
//
|
&& local.latest_finalized_root != self.chain.spec.zero_hash
|
||||||
// If the local or remote have a `latest_finalized_root == ZERO_HASH`, skips checks about
|
&& (self.root_at_slot(start_slot(remote.latest_finalized_epoch))
|
||||||
// the finalized_root. The logic is akward and I think we're better without it.
|
!= Some(remote.latest_finalized_root))
|
||||||
} else if (local.latest_finalized_epoch >= remote.latest_finalized_epoch)
|
|
||||||
&& (!self
|
|
||||||
.chain
|
|
||||||
.rev_iter_block_roots(local.best_slot)
|
|
||||||
.any(|(root, _slot)| root == remote.latest_finalized_root))
|
|
||||||
&& (local.latest_finalized_root != spec.zero_hash)
|
|
||||||
&& (remote.latest_finalized_root != spec.zero_hash)
|
|
||||||
{
|
{
|
||||||
|
// The remotes finalized epoch is less than or greater than ours, but the block root is
|
||||||
|
// different to the one in our chain.
|
||||||
|
//
|
||||||
|
// Therefore, the node is on a different chain and we should not communicate with them.
|
||||||
info!(
|
info!(
|
||||||
self.log, "HandshakeFailure";
|
self.log, "HandshakeFailure";
|
||||||
"peer" => format!("{:?}", peer_id),
|
"peer" => format!("{:?}", peer_id),
|
||||||
"reason" => "wrong_finalized_chain"
|
"reason" => "different finalized chain"
|
||||||
);
|
);
|
||||||
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
|
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
|
||||||
// Process handshakes from peers that seem to be on our chain.
|
} else if remote.latest_finalized_epoch < local.latest_finalized_epoch {
|
||||||
} else {
|
// The node has a lower finalized epoch, their chain is not useful to us. There are two
|
||||||
info!(self.log, "HandshakeSuccess"; "peer" => format!("{:?}", peer_id));
|
// cases where a node can have a lower finalized epoch:
|
||||||
self.known_peers.insert(peer_id.clone(), remote);
|
|
||||||
|
|
||||||
// If we have equal or better finalized epochs and best slots, we require nothing else from
|
|
||||||
// this peer.
|
|
||||||
//
|
//
|
||||||
// We make an exception when our best slot is 0. Best slot does not indicate wether or
|
// ## The node is on the same chain
|
||||||
// not there is a block at slot zero.
|
//
|
||||||
if (remote.latest_finalized_epoch <= local.latest_finalized_epoch)
|
// If a node is on the same chain but has a lower finalized epoch, their head must be
|
||||||
&& (remote.best_slot <= local.best_slot)
|
// lower than ours. Therefore, we have nothing to request from them.
|
||||||
&& (local.best_slot > 0)
|
//
|
||||||
{
|
// ## The node is on a fork
|
||||||
debug!(self.log, "Peer is naive"; "peer" => format!("{:?}", peer_id));
|
//
|
||||||
return;
|
// If a node is on a fork that has a lower finalized epoch, switching to that fork would
|
||||||
}
|
// cause us to revert a finalized block. This is not permitted, therefore we have no
|
||||||
|
// interest in their blocks.
|
||||||
// If the remote has a higher finalized epoch, request all block roots from our finalized
|
debug!(
|
||||||
// epoch through to its best slot.
|
self.log,
|
||||||
if remote.latest_finalized_epoch > local.latest_finalized_epoch {
|
"NaivePeer";
|
||||||
debug!(self.log, "Peer has high finalized epoch"; "peer" => format!("{:?}", peer_id));
|
"peer" => format!("{:?}", peer_id),
|
||||||
let start_slot = local
|
"reason" => "lower finalized epoch"
|
||||||
.latest_finalized_epoch
|
|
||||||
.start_slot(T::EthSpec::slots_per_epoch());
|
|
||||||
let required_slots = remote.best_slot - start_slot;
|
|
||||||
|
|
||||||
self.request_block_roots(
|
|
||||||
peer_id,
|
|
||||||
BeaconBlockRootsRequest {
|
|
||||||
start_slot,
|
|
||||||
count: required_slots.into(),
|
|
||||||
},
|
|
||||||
network,
|
|
||||||
);
|
);
|
||||||
// If the remote has a greater best slot, request the roots between our best slot and their
|
} else if self
|
||||||
// best slot.
|
.chain
|
||||||
} else if remote.best_slot > local.best_slot {
|
.store
|
||||||
debug!(self.log, "Peer has higher best slot"; "peer" => format!("{:?}", peer_id));
|
.exists::<BeaconBlock>(&remote.best_root)
|
||||||
let start_slot = local
|
.unwrap_or_else(|_| false)
|
||||||
.latest_finalized_epoch
|
{
|
||||||
.start_slot(T::EthSpec::slots_per_epoch());
|
// If the node's best-block is already known to us, we have nothing to request.
|
||||||
let required_slots = remote.best_slot - start_slot;
|
debug!(
|
||||||
|
self.log,
|
||||||
self.request_block_roots(
|
"NaivePeer";
|
||||||
peer_id,
|
"peer" => format!("{:?}", peer_id),
|
||||||
BeaconBlockRootsRequest {
|
"reason" => "best block is known"
|
||||||
start_slot,
|
|
||||||
count: required_slots.into(),
|
|
||||||
},
|
|
||||||
network,
|
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
debug!(self.log, "Nothing to request from peer"; "peer" => format!("{:?}", peer_id));
|
// The remote node has an equal or great finalized epoch and we don't know it's head.
|
||||||
|
//
|
||||||
|
// Therefore, there are some blocks between the local finalized epoch and the remote
|
||||||
|
// head that are worth downloading.
|
||||||
|
debug!(self.log, "UsefulPeer"; "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
|
let start_slot = local
|
||||||
|
.latest_finalized_epoch
|
||||||
|
.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
let required_slots = remote.best_slot - start_slot;
|
||||||
|
|
||||||
|
self.request_block_roots(
|
||||||
|
peer_id,
|
||||||
|
BeaconBlockRootsRequest {
|
||||||
|
start_slot,
|
||||||
|
count: required_slots.into(),
|
||||||
|
},
|
||||||
|
network,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn root_at_slot(&self, target_slot: Slot) -> Option<Hash256> {
|
||||||
|
self.chain
|
||||||
|
.rev_iter_block_roots(target_slot)
|
||||||
|
.take(1)
|
||||||
|
.find(|(_root, slot)| *slot == target_slot)
|
||||||
|
.map(|(root, _slot)| root)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle a `BeaconBlockRoots` request from the peer.
|
/// Handle a `BeaconBlockRoots` request from the peer.
|
||||||
@ -275,11 +278,13 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if roots.len() as u64 != req.count {
|
if roots.len() as u64 != req.count {
|
||||||
debug!(
|
warn!(
|
||||||
self.log,
|
self.log,
|
||||||
"BlockRootsRequest";
|
"BlockRootsRequest";
|
||||||
"peer" => format!("{:?}", peer_id),
|
"peer" => format!("{:?}", peer_id),
|
||||||
"msg" => "Failed to return all requested hashes",
|
"msg" => "Failed to return all requested hashes",
|
||||||
|
"start_slot" => req.start_slot,
|
||||||
|
"current_slot" => self.chain.current_state().slot,
|
||||||
"requested" => req.count,
|
"requested" => req.count,
|
||||||
"returned" => roots.len(),
|
"returned" => roots.len(),
|
||||||
);
|
);
|
||||||
@ -351,7 +356,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
BeaconBlockHeadersRequest {
|
BeaconBlockHeadersRequest {
|
||||||
start_root: first.block_root,
|
start_root: first.block_root,
|
||||||
start_slot: first.slot,
|
start_slot: first.slot,
|
||||||
max_headers: (last.slot - first.slot + 1).as_u64(),
|
max_headers: (last.slot - first.slot).as_u64(),
|
||||||
skip_slots: 0,
|
skip_slots: 0,
|
||||||
},
|
},
|
||||||
network,
|
network,
|
||||||
@ -433,8 +438,10 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
.import_queue
|
.import_queue
|
||||||
.enqueue_headers(res.headers, peer_id.clone());
|
.enqueue_headers(res.headers, peer_id.clone());
|
||||||
|
|
||||||
|
if !block_roots.is_empty() {
|
||||||
self.request_block_bodies(peer_id, BeaconBlockBodiesRequest { block_roots }, network);
|
self.request_block_bodies(peer_id, BeaconBlockBodiesRequest { block_roots }, network);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Handle a `BeaconBlockBodies` request from the peer.
|
/// Handle a `BeaconBlockBodies` request from the peer.
|
||||||
pub fn on_beacon_block_bodies_request(
|
pub fn on_beacon_block_bodies_request(
|
||||||
@ -518,9 +525,32 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
{
|
{
|
||||||
match outcome {
|
match outcome {
|
||||||
BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK,
|
BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK,
|
||||||
BlockProcessingOutcome::ParentUnknown { .. } => {
|
BlockProcessingOutcome::ParentUnknown { parent } => {
|
||||||
|
// Clean the stale entries from the queue.
|
||||||
|
self.import_queue.remove_stale();
|
||||||
|
|
||||||
|
// Add this block to the queue
|
||||||
self.import_queue
|
self.import_queue
|
||||||
.enqueue_full_blocks(vec![block], peer_id.clone());
|
.enqueue_full_blocks(vec![block], peer_id.clone());
|
||||||
|
trace!(
|
||||||
|
self.log,
|
||||||
|
"NewGossipBlock";
|
||||||
|
"peer" => format!("{:?}", peer_id),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Unless the parent is in the queue, request the parent block from the peer.
|
||||||
|
//
|
||||||
|
// It is likely that this is duplicate work, given we already send a hello
|
||||||
|
// request. However, I believe there are some edge-cases where the hello
|
||||||
|
// message doesn't suffice, so we perform this request as well.
|
||||||
|
if !self.import_queue.contains_block_root(parent) {
|
||||||
|
// Send a hello to learn of the clients best slot so we can then sync the required
|
||||||
|
// parent(s).
|
||||||
|
network.send_rpc_request(
|
||||||
|
peer_id.clone(),
|
||||||
|
RPCRequest::Hello(hello_message(&self.chain)),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
SHOULD_FORWARD_GOSSIP_BLOCK
|
SHOULD_FORWARD_GOSSIP_BLOCK
|
||||||
}
|
}
|
||||||
@ -696,7 +726,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
if let Ok(outcome) = processing_result {
|
if let Ok(outcome) = processing_result {
|
||||||
match outcome {
|
match outcome {
|
||||||
BlockProcessingOutcome::Processed { block_root } => {
|
BlockProcessingOutcome::Processed { block_root } => {
|
||||||
info!(
|
debug!(
|
||||||
self.log, "Imported block from network";
|
self.log, "Imported block from network";
|
||||||
"source" => source,
|
"source" => source,
|
||||||
"slot" => block.slot,
|
"slot" => block.slot,
|
||||||
@ -713,28 +743,19 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
"peer" => format!("{:?}", peer_id),
|
"peer" => format!("{:?}", peer_id),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Unless the parent is in the queue, request the parent block from the peer.
|
||||||
|
//
|
||||||
|
// It is likely that this is duplicate work, given we already send a hello
|
||||||
|
// request. However, I believe there are some edge-cases where the hello
|
||||||
|
// message doesn't suffice, so we perform this request as well.
|
||||||
|
if !self.import_queue.contains_block_root(parent) {
|
||||||
// Send a hello to learn of the clients best slot so we can then sync the require
|
// Send a hello to learn of the clients best slot so we can then sync the require
|
||||||
// parent(s).
|
// parent(s).
|
||||||
network.send_rpc_request(
|
network.send_rpc_request(
|
||||||
peer_id.clone(),
|
peer_id.clone(),
|
||||||
RPCRequest::Hello(hello_message(&self.chain)),
|
RPCRequest::Hello(hello_message(&self.chain)),
|
||||||
);
|
);
|
||||||
|
}
|
||||||
// Explicitly request the parent block from the peer.
|
|
||||||
//
|
|
||||||
// It is likely that this is duplicate work, given we already send a hello
|
|
||||||
// request. However, I believe there are some edge-cases where the hello
|
|
||||||
// message doesn't suffice, so we perform this request as well.
|
|
||||||
self.request_block_headers(
|
|
||||||
peer_id,
|
|
||||||
BeaconBlockHeadersRequest {
|
|
||||||
start_root: parent,
|
|
||||||
start_slot: block.slot - 1,
|
|
||||||
max_headers: 1,
|
|
||||||
skip_slots: 0,
|
|
||||||
},
|
|
||||||
network,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
BlockProcessingOutcome::FutureSlot {
|
BlockProcessingOutcome::FutureSlot {
|
||||||
present_slot,
|
present_slot,
|
||||||
|
@ -11,7 +11,7 @@ network = { path = "../network" }
|
|||||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||||
version = { path = "../version" }
|
version = { path = "../version" }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||||
protos = { path = "../../protos" }
|
protos = { path = "../../protos" }
|
||||||
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
|
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
use eth2_libp2p::PubsubMessage;
|
use eth2_libp2p::PubsubMessage;
|
||||||
|
use eth2_libp2p::TopicBuilder;
|
||||||
|
use eth2_libp2p::SHARD_TOPIC_PREFIX;
|
||||||
use futures::Future;
|
use futures::Future;
|
||||||
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
@ -136,11 +138,10 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
|
|||||||
"type" => "valid_attestation",
|
"type" => "valid_attestation",
|
||||||
);
|
);
|
||||||
|
|
||||||
// TODO: Obtain topics from the network service properly.
|
// valid attestation, propagate to the network
|
||||||
let topic = types::TopicBuilder::new("beacon_chain".to_string()).build();
|
let topic = TopicBuilder::new(SHARD_TOPIC_PREFIX).build();
|
||||||
let message = PubsubMessage::Attestation(attestation);
|
let message = PubsubMessage::Attestation(attestation);
|
||||||
|
|
||||||
// Publish the attestation to the p2p network via gossipsub.
|
|
||||||
self.network_chan
|
self.network_chan
|
||||||
.send(NetworkMessage::Publish {
|
.send(NetworkMessage::Publish {
|
||||||
topics: vec![topic],
|
topics: vec![topic],
|
||||||
@ -150,7 +151,7 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
|
|||||||
error!(
|
error!(
|
||||||
self.log,
|
self.log,
|
||||||
"PublishAttestation";
|
"PublishAttestation";
|
||||||
"type" => "failed to publish to gossipsub",
|
"type" => "failed to publish attestation to gossipsub",
|
||||||
"error" => format!("{:?}", e)
|
"error" => format!("{:?}", e)
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
||||||
use crossbeam_channel;
|
use crossbeam_channel;
|
||||||
use eth2_libp2p::PubsubMessage;
|
use eth2_libp2p::BEACON_PUBSUB_TOPIC;
|
||||||
|
use eth2_libp2p::{PubsubMessage, TopicBuilder};
|
||||||
use futures::Future;
|
use futures::Future;
|
||||||
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
@ -104,9 +105,8 @@ impl<T: BeaconChainTypes> BeaconBlockService for BeaconBlockServiceInstance<T> {
|
|||||||
"block_root" => format!("{}", block_root),
|
"block_root" => format!("{}", block_root),
|
||||||
);
|
);
|
||||||
|
|
||||||
// TODO: Obtain topics from the network service properly.
|
// get the network topic to send on
|
||||||
let topic =
|
let topic = TopicBuilder::new(BEACON_PUBSUB_TOPIC).build();
|
||||||
types::TopicBuilder::new("beacon_chain".to_string()).build();
|
|
||||||
let message = PubsubMessage::Block(block);
|
let message = PubsubMessage::Block(block);
|
||||||
|
|
||||||
// Publish the block to the p2p network via gossipsub.
|
// Publish the block to the p2p network via gossipsub.
|
||||||
|
@ -60,8 +60,8 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
|||||||
};
|
};
|
||||||
let attestation_service = {
|
let attestation_service = {
|
||||||
let instance = AttestationServiceInstance {
|
let instance = AttestationServiceInstance {
|
||||||
chain: beacon_chain.clone(),
|
|
||||||
network_chan,
|
network_chan,
|
||||||
|
chain: beacon_chain.clone(),
|
||||||
log: log.clone(),
|
log: log.clone(),
|
||||||
};
|
};
|
||||||
create_attestation_service(instance)
|
create_attestation_service(instance)
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
extern crate slog;
|
|
||||||
|
|
||||||
mod run;
|
mod run;
|
||||||
|
|
||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use client::{ClientConfig, Eth2Config};
|
use client::{ClientConfig, Eth2Config};
|
||||||
use eth2_config::{get_data_dir, read_from_file, write_to_file};
|
use env_logger::{Builder, Env};
|
||||||
use slog::{crit, o, Drain};
|
use eth2_config::{read_from_file, write_to_file};
|
||||||
|
use slog::{crit, o, Drain, Level};
|
||||||
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
pub const DEFAULT_DATA_DIR: &str = ".lighthouse";
|
pub const DEFAULT_DATA_DIR: &str = ".lighthouse";
|
||||||
@ -14,10 +14,8 @@ pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml";
|
|||||||
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
|
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let decorator = slog_term::TermDecorator::new().build();
|
// debugging output for libp2p and external crates
|
||||||
let drain = slog_term::CompactFormat::new(decorator).build().fuse();
|
Builder::from_env(Env::default()).init();
|
||||||
let drain = slog_async::Async::new(drain).build().fuse();
|
|
||||||
let logger = slog::Logger::root(drain, o!());
|
|
||||||
|
|
||||||
let matches = App::new("Lighthouse")
|
let matches = App::new("Lighthouse")
|
||||||
.version(version::version().as_str())
|
.version(version::version().as_str())
|
||||||
@ -30,21 +28,48 @@ fn main() {
|
|||||||
.value_name("DIR")
|
.value_name("DIR")
|
||||||
.help("Data directory for keys and databases.")
|
.help("Data directory for keys and databases.")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.default_value(DEFAULT_DATA_DIR),
|
|
||||||
)
|
)
|
||||||
// network related arguments
|
// network related arguments
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("listen-address")
|
Arg::with_name("listen-address")
|
||||||
.long("listen-address")
|
.long("listen-address")
|
||||||
.value_name("Listen Address")
|
.value_name("Address")
|
||||||
.help("One or more comma-delimited multi-addresses to listen for p2p connections.")
|
.help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("maxpeers")
|
||||||
|
.long("maxpeers")
|
||||||
|
.help("The maximum number of peers (default 10).")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("boot-nodes")
|
Arg::with_name("boot-nodes")
|
||||||
.long("boot-nodes")
|
.long("boot-nodes")
|
||||||
|
.allow_hyphen_values(true)
|
||||||
.value_name("BOOTNODES")
|
.value_name("BOOTNODES")
|
||||||
.help("One or more comma-delimited multi-addresses to bootstrap the p2p network.")
|
.help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("port")
|
||||||
|
.long("port")
|
||||||
|
.value_name("Lighthouse Port")
|
||||||
|
.help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("discovery-port")
|
||||||
|
.long("disc-port")
|
||||||
|
.value_name("DiscoveryPort")
|
||||||
|
.help("The discovery UDP port.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("discovery-address")
|
||||||
|
.long("discovery-address")
|
||||||
|
.value_name("Address")
|
||||||
|
.help("The IP address to broadcast to other peers on how to reach this node.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
// rpc related arguments
|
// rpc related arguments
|
||||||
@ -58,14 +83,13 @@ fn main() {
|
|||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("rpc-address")
|
Arg::with_name("rpc-address")
|
||||||
.long("rpc-address")
|
.long("rpc-address")
|
||||||
.value_name("RPCADDRESS")
|
.value_name("Address")
|
||||||
.help("Listen address for RPC endpoint.")
|
.help("Listen address for RPC endpoint.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("rpc-port")
|
Arg::with_name("rpc-port")
|
||||||
.long("rpc-port")
|
.long("rpc-port")
|
||||||
.value_name("RPCPORT")
|
|
||||||
.help("Listen port for RPC endpoint.")
|
.help("Listen port for RPC endpoint.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
@ -73,21 +97,19 @@ fn main() {
|
|||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("http")
|
Arg::with_name("http")
|
||||||
.long("http")
|
.long("http")
|
||||||
.value_name("HTTP")
|
|
||||||
.help("Enable the HTTP server.")
|
.help("Enable the HTTP server.")
|
||||||
.takes_value(false),
|
.takes_value(false),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("http-address")
|
Arg::with_name("http-address")
|
||||||
.long("http-address")
|
.long("http-address")
|
||||||
.value_name("HTTPADDRESS")
|
.value_name("Address")
|
||||||
.help("Listen address for the HTTP server.")
|
.help("Listen address for the HTTP server.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("http-port")
|
Arg::with_name("http-port")
|
||||||
.long("http-port")
|
.long("http-port")
|
||||||
.value_name("HTTPPORT")
|
|
||||||
.help("Listen port for the HTTP server.")
|
.help("Listen port for the HTTP server.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
@ -116,19 +138,60 @@ fn main() {
|
|||||||
.short("r")
|
.short("r")
|
||||||
.help("When present, genesis will be within 30 minutes prior. Only for testing"),
|
.help("When present, genesis will be within 30 minutes prior. Only for testing"),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("verbosity")
|
||||||
|
.short("v")
|
||||||
|
.multiple(true)
|
||||||
|
.help("Sets the verbosity level")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) {
|
// build the initial logger
|
||||||
Ok(dir) => dir,
|
let decorator = slog_term::TermDecorator::new().build();
|
||||||
Err(e) => {
|
let drain = slog_term::CompactFormat::new(decorator).build().fuse();
|
||||||
crit!(logger, "Failed to initialize data dir"; "error" => format!("{:?}", e));
|
let drain = slog_async::Async::new(drain).build();
|
||||||
|
|
||||||
|
let drain = match matches.occurrences_of("verbosity") {
|
||||||
|
0 => drain.filter_level(Level::Info),
|
||||||
|
1 => drain.filter_level(Level::Debug),
|
||||||
|
2 => drain.filter_level(Level::Trace),
|
||||||
|
_ => drain.filter_level(Level::Info),
|
||||||
|
};
|
||||||
|
|
||||||
|
let log = slog::Logger::root(drain.fuse(), o!());
|
||||||
|
|
||||||
|
let data_dir = match matches
|
||||||
|
.value_of("datadir")
|
||||||
|
.and_then(|v| Some(PathBuf::from(v)))
|
||||||
|
{
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
// use the default
|
||||||
|
let mut default_dir = match dirs::home_dir() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
crit!(log, "Failed to find a home directory");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
default_dir.push(DEFAULT_DATA_DIR);
|
||||||
|
PathBuf::from(default_dir)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// create the directory if needed
|
||||||
|
match fs::create_dir_all(&data_dir) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => {
|
||||||
|
crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME);
|
let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME);
|
||||||
|
|
||||||
// Attempt to lead the `ClientConfig` from disk.
|
// Attempt to load the `ClientConfig` from disk.
|
||||||
//
|
//
|
||||||
// If file doesn't exist, create a new, default one.
|
// If file doesn't exist, create a new, default one.
|
||||||
let mut client_config = match read_from_file::<ClientConfig>(client_config_path.clone()) {
|
let mut client_config = match read_from_file::<ClientConfig>(client_config_path.clone()) {
|
||||||
@ -136,13 +199,13 @@ fn main() {
|
|||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
let default = ClientConfig::default();
|
let default = ClientConfig::default();
|
||||||
if let Err(e) = write_to_file(client_config_path, &default) {
|
if let Err(e) = write_to_file(client_config_path, &default) {
|
||||||
crit!(logger, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e));
|
crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
default
|
default
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
crit!(logger, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e));
|
crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -154,7 +217,7 @@ fn main() {
|
|||||||
match client_config.apply_cli_args(&matches) {
|
match client_config.apply_cli_args(&matches) {
|
||||||
Ok(()) => (),
|
Ok(()) => (),
|
||||||
Err(s) => {
|
Err(s) => {
|
||||||
crit!(logger, "Failed to parse ClientConfig CLI arguments"; "error" => s);
|
crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -173,13 +236,13 @@ fn main() {
|
|||||||
_ => unreachable!(), // Guarded by slog.
|
_ => unreachable!(), // Guarded by slog.
|
||||||
};
|
};
|
||||||
if let Err(e) = write_to_file(eth2_config_path, &default) {
|
if let Err(e) = write_to_file(eth2_config_path, &default) {
|
||||||
crit!(logger, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
default
|
default
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
crit!(logger, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e));
|
crit!(log, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -188,13 +251,13 @@ fn main() {
|
|||||||
match eth2_config.apply_cli_args(&matches) {
|
match eth2_config.apply_cli_args(&matches) {
|
||||||
Ok(()) => (),
|
Ok(()) => (),
|
||||||
Err(s) => {
|
Err(s) => {
|
||||||
crit!(logger, "Failed to parse Eth2Config CLI arguments"; "error" => s);
|
crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match run::run_beacon_node(client_config, eth2_config, &logger) {
|
match run::run_beacon_node(client_config, eth2_config, &log) {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(e) => crit!(logger, "Beacon node failed to start"; "reason" => format!("{:}", e)),
|
Err(e) => crit!(log, "Beacon node failed to start"; "reason" => format!("{:}", e)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -41,6 +41,15 @@ pub fn run_beacon_node(
|
|||||||
"This software is EXPERIMENTAL and provides no guarantees or warranties."
|
"This software is EXPERIMENTAL and provides no guarantees or warranties."
|
||||||
);
|
);
|
||||||
|
|
||||||
|
info!(
|
||||||
|
log,
|
||||||
|
"Starting beacon node";
|
||||||
|
"p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address),
|
||||||
|
"data_dir" => format!("{:?}", other_client_config.data_dir()),
|
||||||
|
"spec_constants" => &spec_constants,
|
||||||
|
"db_type" => &other_client_config.db_type,
|
||||||
|
);
|
||||||
|
|
||||||
let result = match (db_type.as_str(), spec_constants.as_str()) {
|
let result = match (db_type.as_str(), spec_constants.as_str()) {
|
||||||
("disk", "minimal") => run::<ClientType<DiskStore, MinimalEthSpec>>(
|
("disk", "minimal") => run::<ClientType<DiskStore, MinimalEthSpec>>(
|
||||||
&db_path,
|
&db_path,
|
||||||
@ -80,17 +89,6 @@ pub fn run_beacon_node(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if result.is_ok() {
|
|
||||||
info!(
|
|
||||||
log,
|
|
||||||
"Started beacon node";
|
|
||||||
"p2p_listen_addresses" => format!("{:?}", &other_client_config.network.listen_addresses()),
|
|
||||||
"data_dir" => format!("{:?}", other_client_config.data_dir()),
|
|
||||||
"spec_constants" => &spec_constants,
|
|
||||||
"db_type" => &other_client_config.db_type,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ bytes = "0.4.10"
|
|||||||
db-key = "0.0.5"
|
db-key = "0.0.5"
|
||||||
leveldb = "0.8.4"
|
leveldb = "0.8.4"
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" }
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
|
@ -139,8 +139,7 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> {
|
|||||||
Err(BeaconStateError::SlotOutOfBounds) => {
|
Err(BeaconStateError::SlotOutOfBounds) => {
|
||||||
// Read a `BeaconState` from the store that has access to prior historical root.
|
// Read a `BeaconState` from the store that has access to prior historical root.
|
||||||
let beacon_state: BeaconState<T> = {
|
let beacon_state: BeaconState<T> = {
|
||||||
// Load the earlier state from disk. Skip forward one slot, because a state
|
// Load the earliest state from disk.
|
||||||
// doesn't return it's own state root.
|
|
||||||
let new_state_root = self.beacon_state.get_oldest_state_root().ok()?;
|
let new_state_root = self.beacon_state.get_oldest_state_root().ok()?;
|
||||||
|
|
||||||
self.store.get(&new_state_root).ok()?
|
self.store.get(&new_state_root).ok()?
|
||||||
|
@ -7,7 +7,7 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
store = { path = "../../beacon_node/store" }
|
store = { path = "../../beacon_node/store" }
|
||||||
ssz = { path = "../utils/ssz" }
|
eth2_ssz = { path = "../utils/ssz" }
|
||||||
state_processing = { path = "../state_processing" }
|
state_processing = { path = "../state_processing" }
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
log = "0.4.6"
|
log = "0.4.6"
|
||||||
|
@ -5,9 +5,11 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
boolean-bitfield = { path = "../utils/boolean-bitfield" }
|
||||||
int_to_bytes = { path = "../utils/int_to_bytes" }
|
int_to_bytes = { path = "../utils/int_to_bytes" }
|
||||||
itertools = "0.8"
|
itertools = "0.8"
|
||||||
parking_lot = "0.7"
|
parking_lot = "0.7"
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
state_processing = { path = "../state_processing" }
|
state_processing = { path = "../state_processing" }
|
||||||
ssz = { path = "../utils/ssz" }
|
eth2_ssz = { path = "../utils/ssz" }
|
||||||
|
eth2_ssz_derive = { path = "../utils/ssz_derive" }
|
||||||
|
91
eth2/operation_pool/src/attestation.rs
Normal file
91
eth2/operation_pool/src/attestation.rs
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
use crate::max_cover::MaxCover;
|
||||||
|
use boolean_bitfield::BooleanBitfield;
|
||||||
|
use types::{Attestation, BeaconState, EthSpec};
|
||||||
|
|
||||||
|
pub struct AttMaxCover<'a> {
|
||||||
|
/// Underlying attestation.
|
||||||
|
att: &'a Attestation,
|
||||||
|
/// Bitfield of validators that are covered by this attestation.
|
||||||
|
fresh_validators: BooleanBitfield,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> AttMaxCover<'a> {
|
||||||
|
pub fn new(att: &'a Attestation, fresh_validators: BooleanBitfield) -> Self {
|
||||||
|
Self {
|
||||||
|
att,
|
||||||
|
fresh_validators,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> MaxCover for AttMaxCover<'a> {
|
||||||
|
type Object = Attestation;
|
||||||
|
type Set = BooleanBitfield;
|
||||||
|
|
||||||
|
fn object(&self) -> Attestation {
|
||||||
|
self.att.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn covering_set(&self) -> &BooleanBitfield {
|
||||||
|
&self.fresh_validators
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sneaky: we keep all the attestations together in one bucket, even though
|
||||||
|
/// their aggregation bitfields refer to different committees. In order to avoid
|
||||||
|
/// confusing committees when updating covering sets, we update only those attestations
|
||||||
|
/// whose shard and epoch match the attestation being included in the solution, by the logic
|
||||||
|
/// that a shard and epoch uniquely identify a committee.
|
||||||
|
fn update_covering_set(
|
||||||
|
&mut self,
|
||||||
|
best_att: &Attestation,
|
||||||
|
covered_validators: &BooleanBitfield,
|
||||||
|
) {
|
||||||
|
if self.att.data.shard == best_att.data.shard
|
||||||
|
&& self.att.data.target_epoch == best_att.data.target_epoch
|
||||||
|
{
|
||||||
|
self.fresh_validators.difference_inplace(covered_validators);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn score(&self) -> usize {
|
||||||
|
self.fresh_validators.num_set_bits()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract the validators for which `attestation` would be their earliest in the epoch.
|
||||||
|
///
|
||||||
|
/// The reward paid to a proposer for including an attestation is proportional to the number
|
||||||
|
/// of validators for which the included attestation is their first in the epoch. The attestation
|
||||||
|
/// is judged against the state's `current_epoch_attestations` or `previous_epoch_attestations`
|
||||||
|
/// depending on when it was created, and all those validators who have already attested are
|
||||||
|
/// removed from the `aggregation_bitfield` before returning it.
|
||||||
|
// TODO: This could be optimised with a map from validator index to whether that validator has
|
||||||
|
// attested in each of the current and previous epochs. Currently quadratic in number of validators.
|
||||||
|
pub fn earliest_attestation_validators<T: EthSpec>(
|
||||||
|
attestation: &Attestation,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
) -> BooleanBitfield {
|
||||||
|
// Bitfield of validators whose attestations are new/fresh.
|
||||||
|
let mut new_validators = attestation.aggregation_bitfield.clone();
|
||||||
|
|
||||||
|
let state_attestations = if attestation.data.target_epoch == state.current_epoch() {
|
||||||
|
&state.current_epoch_attestations
|
||||||
|
} else if attestation.data.target_epoch == state.previous_epoch() {
|
||||||
|
&state.previous_epoch_attestations
|
||||||
|
} else {
|
||||||
|
return BooleanBitfield::from_elem(attestation.aggregation_bitfield.len(), false);
|
||||||
|
};
|
||||||
|
|
||||||
|
state_attestations
|
||||||
|
.iter()
|
||||||
|
// In a single epoch, an attester should only be attesting for one shard.
|
||||||
|
// TODO: we avoid including slashable attestations in the state here,
|
||||||
|
// but maybe we should do something else with them (like construct slashings).
|
||||||
|
.filter(|existing_attestation| existing_attestation.data.shard == attestation.data.shard)
|
||||||
|
.for_each(|existing_attestation| {
|
||||||
|
// Remove the validators who have signed the existing attestation (they are not new)
|
||||||
|
new_validators.difference_inplace(&existing_attestation.aggregation_bitfield);
|
||||||
|
});
|
||||||
|
|
||||||
|
new_validators
|
||||||
|
}
|
38
eth2/operation_pool/src/attestation_id.rs
Normal file
38
eth2/operation_pool/src/attestation_id.rs
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
use int_to_bytes::int_to_bytes8;
|
||||||
|
use ssz::ssz_encode;
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use types::{AttestationData, BeaconState, ChainSpec, Domain, Epoch, EthSpec};
|
||||||
|
|
||||||
|
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
|
||||||
|
#[derive(PartialEq, Eq, Clone, Hash, Debug, PartialOrd, Ord, Encode, Decode)]
|
||||||
|
pub struct AttestationId {
|
||||||
|
v: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Number of domain bytes that the end of an attestation ID is padded with.
|
||||||
|
const DOMAIN_BYTES_LEN: usize = 8;
|
||||||
|
|
||||||
|
impl AttestationId {
|
||||||
|
pub fn from_data<T: EthSpec>(
|
||||||
|
attestation: &AttestationData,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Self {
|
||||||
|
let mut bytes = ssz_encode(attestation);
|
||||||
|
let epoch = attestation.target_epoch;
|
||||||
|
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
|
||||||
|
AttestationId { v: bytes }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compute_domain_bytes<T: EthSpec>(
|
||||||
|
epoch: Epoch,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn domain_bytes_match(&self, domain_bytes: &[u8]) -> bool {
|
||||||
|
&self.v[self.v.len() - DOMAIN_BYTES_LEN..] == domain_bytes
|
||||||
|
}
|
||||||
|
}
|
@ -1,13 +1,19 @@
|
|||||||
use int_to_bytes::int_to_bytes8;
|
mod attestation;
|
||||||
|
mod attestation_id;
|
||||||
|
mod max_cover;
|
||||||
|
mod persistence;
|
||||||
|
|
||||||
|
pub use persistence::PersistedOperationPool;
|
||||||
|
|
||||||
|
use attestation::{earliest_attestation_validators, AttMaxCover};
|
||||||
|
use attestation_id::AttestationId;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
use max_cover::maximum_cover;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use ssz::ssz_encode;
|
|
||||||
use state_processing::per_block_processing::errors::{
|
use state_processing::per_block_processing::errors::{
|
||||||
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
|
AttestationValidationError, AttesterSlashingValidationError, DepositValidationError,
|
||||||
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
|
ExitValidationError, ProposerSlashingValidationError, TransferValidationError,
|
||||||
};
|
};
|
||||||
#[cfg(not(test))]
|
|
||||||
use state_processing::per_block_processing::verify_deposit_merkle_proof;
|
|
||||||
use state_processing::per_block_processing::{
|
use state_processing::per_block_processing::{
|
||||||
get_slashable_indices_modular, validate_attestation,
|
get_slashable_indices_modular, validate_attestation,
|
||||||
validate_attestation_time_independent_only, verify_attester_slashing, verify_exit,
|
validate_attestation_time_independent_only, verify_attester_slashing, verify_exit,
|
||||||
@ -16,13 +22,12 @@ use state_processing::per_block_processing::{
|
|||||||
};
|
};
|
||||||
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
|
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::chain_spec::Domain;
|
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttestationData, AttesterSlashing, BeaconState, ChainSpec, Deposit, Epoch,
|
Attestation, AttesterSlashing, BeaconState, ChainSpec, Deposit, EthSpec, ProposerSlashing,
|
||||||
EthSpec, ProposerSlashing, Transfer, Validator, VoluntaryExit,
|
Transfer, Validator, VoluntaryExit,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default, Debug)]
|
||||||
pub struct OperationPool<T: EthSpec + Default> {
|
pub struct OperationPool<T: EthSpec + Default> {
|
||||||
/// Map from attestation ID (see below) to vectors of attestations.
|
/// Map from attestation ID (see below) to vectors of attestations.
|
||||||
attestations: RwLock<HashMap<AttestationId, Vec<Attestation>>>,
|
attestations: RwLock<HashMap<AttestationId, Vec<Attestation>>>,
|
||||||
@ -43,71 +48,6 @@ pub struct OperationPool<T: EthSpec + Default> {
|
|||||||
_phantom: PhantomData<T>,
|
_phantom: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
|
|
||||||
#[derive(PartialEq, Eq, Clone, Hash, Debug)]
|
|
||||||
struct AttestationId(Vec<u8>);
|
|
||||||
|
|
||||||
/// Number of domain bytes that the end of an attestation ID is padded with.
|
|
||||||
const DOMAIN_BYTES_LEN: usize = 8;
|
|
||||||
|
|
||||||
impl AttestationId {
|
|
||||||
fn from_data<T: EthSpec>(
|
|
||||||
attestation: &AttestationData,
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Self {
|
|
||||||
let mut bytes = ssz_encode(attestation);
|
|
||||||
let epoch = attestation.target_epoch;
|
|
||||||
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
|
|
||||||
AttestationId(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn compute_domain_bytes<T: EthSpec>(
|
|
||||||
epoch: Epoch,
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Vec<u8> {
|
|
||||||
int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn domain_bytes_match(&self, domain_bytes: &[u8]) -> bool {
|
|
||||||
&self.0[self.0.len() - DOMAIN_BYTES_LEN..] == domain_bytes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute a fitness score for an attestation.
|
|
||||||
///
|
|
||||||
/// The score is calculated by determining the number of *new* attestations that
|
|
||||||
/// the aggregate attestation introduces, and is proportional to the size of the reward we will
|
|
||||||
/// receive for including it in a block.
|
|
||||||
// TODO: this could be optimised with a map from validator index to whether that validator has
|
|
||||||
// attested in each of the current and previous epochs. Currently quadractic in number of validators.
|
|
||||||
fn attestation_score<T: EthSpec>(attestation: &Attestation, state: &BeaconState<T>) -> usize {
|
|
||||||
// Bitfield of validators whose attestations are new/fresh.
|
|
||||||
let mut new_validators = attestation.aggregation_bitfield.clone();
|
|
||||||
|
|
||||||
let state_attestations = if attestation.data.target_epoch == state.current_epoch() {
|
|
||||||
&state.current_epoch_attestations
|
|
||||||
} else if attestation.data.target_epoch == state.previous_epoch() {
|
|
||||||
&state.previous_epoch_attestations
|
|
||||||
} else {
|
|
||||||
return 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
state_attestations
|
|
||||||
.iter()
|
|
||||||
// In a single epoch, an attester should only be attesting for one shard.
|
|
||||||
// TODO: we avoid including slashable attestations in the state here,
|
|
||||||
// but maybe we should do something else with them (like construct slashings).
|
|
||||||
.filter(|current_attestation| current_attestation.data.shard == attestation.data.shard)
|
|
||||||
.for_each(|current_attestation| {
|
|
||||||
// Remove the validators who have signed the existing attestation (they are not new)
|
|
||||||
new_validators.difference_inplace(¤t_attestation.aggregation_bitfield);
|
|
||||||
});
|
|
||||||
|
|
||||||
new_validators.num_set_bits()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum DepositInsertStatus {
|
pub enum DepositInsertStatus {
|
||||||
/// The deposit was not already in the pool.
|
/// The deposit was not already in the pool.
|
||||||
@ -176,29 +116,19 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
let current_epoch = state.current_epoch();
|
let current_epoch = state.current_epoch();
|
||||||
let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, state, spec);
|
let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, state, spec);
|
||||||
let curr_domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec);
|
let curr_domain_bytes = AttestationId::compute_domain_bytes(current_epoch, state, spec);
|
||||||
self.attestations
|
let reader = self.attestations.read();
|
||||||
.read()
|
let valid_attestations = reader
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|(key, _)| {
|
.filter(|(key, _)| {
|
||||||
key.domain_bytes_match(&prev_domain_bytes)
|
key.domain_bytes_match(&prev_domain_bytes)
|
||||||
|| key.domain_bytes_match(&curr_domain_bytes)
|
|| key.domain_bytes_match(&curr_domain_bytes)
|
||||||
})
|
})
|
||||||
.flat_map(|(_, attestations)| attestations)
|
.flat_map(|(_, attestations)| attestations)
|
||||||
// That are not superseded by an attestation included in the state...
|
|
||||||
.filter(|attestation| !superior_attestation_exists_in_state(state, attestation))
|
|
||||||
// That are valid...
|
// That are valid...
|
||||||
.filter(|attestation| validate_attestation(state, attestation, spec).is_ok())
|
.filter(|attestation| validate_attestation(state, attestation, spec).is_ok())
|
||||||
// Scored by the number of new attestations they introduce (descending)
|
.map(|att| AttMaxCover::new(att, earliest_attestation_validators(att, state)));
|
||||||
// TODO: need to consider attestations introduced in THIS block
|
|
||||||
.map(|att| (att, attestation_score(att, state)))
|
maximum_cover(valid_attestations, spec.max_attestations as usize)
|
||||||
// Don't include any useless attestations (score 0)
|
|
||||||
.filter(|&(_, score)| score != 0)
|
|
||||||
.sorted_by_key(|&(_, score)| std::cmp::Reverse(score))
|
|
||||||
// Limited to the maximum number of attestations per block
|
|
||||||
.take(spec.max_attestations as usize)
|
|
||||||
.map(|(att, _)| att)
|
|
||||||
.cloned()
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove attestations which are too old to be included in a block.
|
/// Remove attestations which are too old to be included in a block.
|
||||||
@ -219,20 +149,14 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
/// Add a deposit to the pool.
|
/// Add a deposit to the pool.
|
||||||
///
|
///
|
||||||
/// No two distinct deposits should be added with the same index.
|
/// No two distinct deposits should be added with the same index.
|
||||||
#[cfg_attr(test, allow(unused_variables))]
|
|
||||||
pub fn insert_deposit(
|
pub fn insert_deposit(
|
||||||
&self,
|
&self,
|
||||||
deposit: Deposit,
|
deposit: Deposit,
|
||||||
state: &BeaconState<T>,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<DepositInsertStatus, DepositValidationError> {
|
) -> Result<DepositInsertStatus, DepositValidationError> {
|
||||||
use DepositInsertStatus::*;
|
use DepositInsertStatus::*;
|
||||||
|
|
||||||
match self.deposits.write().entry(deposit.index) {
|
match self.deposits.write().entry(deposit.index) {
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
// TODO: fix tests to generate valid merkle proofs
|
|
||||||
#[cfg(not(test))]
|
|
||||||
verify_deposit_merkle_proof(state, &deposit, spec)?;
|
|
||||||
entry.insert(deposit);
|
entry.insert(deposit);
|
||||||
Ok(Fresh)
|
Ok(Fresh)
|
||||||
}
|
}
|
||||||
@ -240,9 +164,6 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
if entry.get() == &deposit {
|
if entry.get() == &deposit {
|
||||||
Ok(Duplicate)
|
Ok(Duplicate)
|
||||||
} else {
|
} else {
|
||||||
// TODO: fix tests to generate valid merkle proofs
|
|
||||||
#[cfg(not(test))]
|
|
||||||
verify_deposit_merkle_proof(state, &deposit, spec)?;
|
|
||||||
Ok(Replaced(Box::new(entry.insert(deposit))))
|
Ok(Replaced(Box::new(entry.insert(deposit))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -253,7 +174,9 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
///
|
///
|
||||||
/// Take at most the maximum number of deposits, beginning from the current deposit index.
|
/// Take at most the maximum number of deposits, beginning from the current deposit index.
|
||||||
pub fn get_deposits(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Vec<Deposit> {
|
pub fn get_deposits(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Vec<Deposit> {
|
||||||
// TODO: might want to re-check the Merkle proof to account for Eth1 forking
|
// TODO: We need to update the Merkle proofs for existing deposits as more deposits
|
||||||
|
// are added. It probably makes sense to construct the proofs from scratch when forming
|
||||||
|
// a block, using fresh info from the ETH1 chain for the current deposit root.
|
||||||
let start_idx = state.deposit_index;
|
let start_idx = state.deposit_index;
|
||||||
(start_idx..start_idx + spec.max_deposits)
|
(start_idx..start_idx + spec.max_deposits)
|
||||||
.map(|idx| self.deposits.read().get(&idx).cloned())
|
.map(|idx| self.deposits.read().get(&idx).cloned())
|
||||||
@ -484,34 +407,6 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if the state already contains a `PendingAttestation` that is superior to the
|
|
||||||
/// given `attestation`.
|
|
||||||
///
|
|
||||||
/// A validator has nothing to gain from re-including an attestation and it adds load to the
|
|
||||||
/// network.
|
|
||||||
///
|
|
||||||
/// An existing `PendingAttestation` is superior to an existing `attestation` if:
|
|
||||||
///
|
|
||||||
/// - Their `AttestationData` is equal.
|
|
||||||
/// - `attestation` does not contain any signatures that `PendingAttestation` does not have.
|
|
||||||
fn superior_attestation_exists_in_state<T: EthSpec>(
|
|
||||||
state: &BeaconState<T>,
|
|
||||||
attestation: &Attestation,
|
|
||||||
) -> bool {
|
|
||||||
state
|
|
||||||
.current_epoch_attestations
|
|
||||||
.iter()
|
|
||||||
.chain(state.previous_epoch_attestations.iter())
|
|
||||||
.any(|existing_attestation| {
|
|
||||||
let bitfield = &attestation.aggregation_bitfield;
|
|
||||||
let existing_bitfield = &existing_attestation.aggregation_bitfield;
|
|
||||||
|
|
||||||
existing_attestation.data == attestation.data
|
|
||||||
&& bitfield.intersection(existing_bitfield).num_set_bits()
|
|
||||||
== bitfield.num_set_bits()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Filter up to a maximum number of operations out of an iterator.
|
/// Filter up to a maximum number of operations out of an iterator.
|
||||||
fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec<T>
|
fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec<T>
|
||||||
where
|
where
|
||||||
@ -547,6 +442,18 @@ fn prune_validator_hash_map<T, F, E: EthSpec>(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Compare two operation pools.
|
||||||
|
impl<T: EthSpec + Default> PartialEq for OperationPool<T> {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
*self.attestations.read() == *other.attestations.read()
|
||||||
|
&& *self.deposits.read() == *other.deposits.read()
|
||||||
|
&& *self.attester_slashings.read() == *other.attester_slashings.read()
|
||||||
|
&& *self.proposer_slashings.read() == *other.proposer_slashings.read()
|
||||||
|
&& *self.voluntary_exits.read() == *other.voluntary_exits.read()
|
||||||
|
&& *self.transfers.read() == *other.transfers.read()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::DepositInsertStatus::*;
|
use super::DepositInsertStatus::*;
|
||||||
@ -557,22 +464,15 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn insert_deposit() {
|
fn insert_deposit() {
|
||||||
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||||
let (ref spec, ref state) = test_state(rng);
|
let op_pool = OperationPool::<MinimalEthSpec>::new();
|
||||||
let op_pool = OperationPool::new();
|
|
||||||
let deposit1 = make_deposit(rng);
|
let deposit1 = make_deposit(rng);
|
||||||
let mut deposit2 = make_deposit(rng);
|
let mut deposit2 = make_deposit(rng);
|
||||||
deposit2.index = deposit1.index;
|
deposit2.index = deposit1.index;
|
||||||
|
|
||||||
|
assert_eq!(op_pool.insert_deposit(deposit1.clone()), Ok(Fresh));
|
||||||
|
assert_eq!(op_pool.insert_deposit(deposit1.clone()), Ok(Duplicate));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
op_pool.insert_deposit(deposit1.clone(), state, spec),
|
op_pool.insert_deposit(deposit2),
|
||||||
Ok(Fresh)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
op_pool.insert_deposit(deposit1.clone(), state, spec),
|
|
||||||
Ok(Duplicate)
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
op_pool.insert_deposit(deposit2, state, spec),
|
|
||||||
Ok(Replaced(Box::new(deposit1)))
|
Ok(Replaced(Box::new(deposit1)))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -591,10 +491,7 @@ mod tests {
|
|||||||
let deposits = dummy_deposits(rng, start, max_deposits + extra);
|
let deposits = dummy_deposits(rng, start, max_deposits + extra);
|
||||||
|
|
||||||
for deposit in &deposits {
|
for deposit in &deposits {
|
||||||
assert_eq!(
|
assert_eq!(op_pool.insert_deposit(deposit.clone()), Ok(Fresh));
|
||||||
op_pool.insert_deposit(deposit.clone(), &state, &spec),
|
|
||||||
Ok(Fresh)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
state.deposit_index = start + offset;
|
state.deposit_index = start + offset;
|
||||||
@ -610,8 +507,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn prune_deposits() {
|
fn prune_deposits() {
|
||||||
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||||
let (spec, state) = test_state(rng);
|
let op_pool = OperationPool::<MinimalEthSpec>::new();
|
||||||
let op_pool = OperationPool::new();
|
|
||||||
|
|
||||||
let start1 = 100;
|
let start1 = 100;
|
||||||
// test is super slow in debug mode if this parameter is too high
|
// test is super slow in debug mode if this parameter is too high
|
||||||
@ -623,7 +519,7 @@ mod tests {
|
|||||||
let deposits2 = dummy_deposits(rng, start2, count);
|
let deposits2 = dummy_deposits(rng, start2, count);
|
||||||
|
|
||||||
for d in deposits1.into_iter().chain(deposits2) {
|
for d in deposits1.into_iter().chain(deposits2) {
|
||||||
assert!(op_pool.insert_deposit(d, &state, &spec).is_ok());
|
assert!(op_pool.insert_deposit(d).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(op_pool.num_deposits(), 2 * count as usize);
|
assert_eq!(op_pool.num_deposits(), 2 * count as usize);
|
||||||
@ -734,15 +630,13 @@ mod tests {
|
|||||||
state_builder.teleport_to_slot(slot);
|
state_builder.teleport_to_slot(slot);
|
||||||
state_builder.build_caches(&spec).unwrap();
|
state_builder.build_caches(&spec).unwrap();
|
||||||
let (state, keypairs) = state_builder.build();
|
let (state, keypairs) = state_builder.build();
|
||||||
|
|
||||||
(state, keypairs, MainnetEthSpec::default_spec())
|
(state, keypairs, MainnetEthSpec::default_spec())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_attestation_score() {
|
fn test_earliest_attestation() {
|
||||||
let (ref mut state, ref keypairs, ref spec) =
|
let (ref mut state, ref keypairs, ref spec) =
|
||||||
attestation_test_state::<MainnetEthSpec>(1);
|
attestation_test_state::<MainnetEthSpec>(1);
|
||||||
|
|
||||||
let slot = state.slot - 1;
|
let slot = state.slot - 1;
|
||||||
let committees = state
|
let committees = state
|
||||||
.get_crosslink_committees_at_slot(slot)
|
.get_crosslink_committees_at_slot(slot)
|
||||||
@ -775,9 +669,8 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
att1.aggregation_bitfield.num_set_bits(),
|
att1.aggregation_bitfield.num_set_bits(),
|
||||||
attestation_score(&att1, state)
|
earliest_attestation_validators(&att1, state).num_set_bits()
|
||||||
);
|
);
|
||||||
|
|
||||||
state.current_epoch_attestations.push(PendingAttestation {
|
state.current_epoch_attestations.push(PendingAttestation {
|
||||||
aggregation_bitfield: att1.aggregation_bitfield.clone(),
|
aggregation_bitfield: att1.aggregation_bitfield.clone(),
|
||||||
data: att1.data.clone(),
|
data: att1.data.clone(),
|
||||||
@ -785,7 +678,10 @@ mod tests {
|
|||||||
proposer_index: 0,
|
proposer_index: 0,
|
||||||
});
|
});
|
||||||
|
|
||||||
assert_eq!(cc.committee.len() - 2, attestation_score(&att2, state));
|
assert_eq!(
|
||||||
|
cc.committee.len() - 2,
|
||||||
|
earliest_attestation_validators(&att2, state).num_set_bits()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
189
eth2/operation_pool/src/max_cover.rs
Normal file
189
eth2/operation_pool/src/max_cover.rs
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
/// Trait for types that we can compute a maximum cover for.
|
||||||
|
///
|
||||||
|
/// Terminology:
|
||||||
|
/// * `item`: something that implements this trait
|
||||||
|
/// * `element`: something contained in a set, and covered by the covering set of an item
|
||||||
|
/// * `object`: something extracted from an item in order to comprise a solution
|
||||||
|
/// See: https://en.wikipedia.org/wiki/Maximum_coverage_problem
|
||||||
|
pub trait MaxCover {
|
||||||
|
/// The result type, of which we would eventually like a collection of maximal quality.
|
||||||
|
type Object;
|
||||||
|
/// The type used to represent sets.
|
||||||
|
type Set: Clone;
|
||||||
|
|
||||||
|
/// Extract an object for inclusion in a solution.
|
||||||
|
fn object(&self) -> Self::Object;
|
||||||
|
|
||||||
|
/// Get the set of elements covered.
|
||||||
|
fn covering_set(&self) -> &Self::Set;
|
||||||
|
/// Update the set of items covered, for the inclusion of some object in the solution.
|
||||||
|
fn update_covering_set(&mut self, max_obj: &Self::Object, max_set: &Self::Set);
|
||||||
|
/// The quality of this item's covering set, usually its cardinality.
|
||||||
|
fn score(&self) -> usize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper struct to track which items of the input are still available for inclusion.
|
||||||
|
/// Saves removing elements from the work vector.
|
||||||
|
struct MaxCoverItem<T> {
|
||||||
|
item: T,
|
||||||
|
available: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> MaxCoverItem<T> {
|
||||||
|
fn new(item: T) -> Self {
|
||||||
|
MaxCoverItem {
|
||||||
|
item,
|
||||||
|
available: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute an approximate maximum cover using a greedy algorithm.
|
||||||
|
///
|
||||||
|
/// * Time complexity: `O(limit * items_iter.len())`
|
||||||
|
/// * Space complexity: `O(item_iter.len())`
|
||||||
|
pub fn maximum_cover<'a, I, T>(items_iter: I, limit: usize) -> Vec<T::Object>
|
||||||
|
where
|
||||||
|
I: IntoIterator<Item = T>,
|
||||||
|
T: MaxCover,
|
||||||
|
{
|
||||||
|
// Construct an initial vec of all items, marked available.
|
||||||
|
let mut all_items: Vec<_> = items_iter
|
||||||
|
.into_iter()
|
||||||
|
.map(MaxCoverItem::new)
|
||||||
|
.filter(|x| x.item.score() != 0)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut result = vec![];
|
||||||
|
|
||||||
|
for _ in 0..limit {
|
||||||
|
// Select the item with the maximum score.
|
||||||
|
let (best_item, best_cover) = match all_items
|
||||||
|
.iter_mut()
|
||||||
|
.filter(|x| x.available && x.item.score() != 0)
|
||||||
|
.max_by_key(|x| x.item.score())
|
||||||
|
{
|
||||||
|
Some(x) => {
|
||||||
|
x.available = false;
|
||||||
|
(x.item.object(), x.item.covering_set().clone())
|
||||||
|
}
|
||||||
|
None => return result,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Update the covering sets of the other items, for the inclusion of the selected item.
|
||||||
|
// Items covered by the selected item can't be re-covered.
|
||||||
|
all_items
|
||||||
|
.iter_mut()
|
||||||
|
.filter(|x| x.available && x.item.score() != 0)
|
||||||
|
.for_each(|x| x.item.update_covering_set(&best_item, &best_cover));
|
||||||
|
|
||||||
|
result.push(best_item);
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
use std::iter::FromIterator;
|
||||||
|
use std::{collections::HashSet, hash::Hash};
|
||||||
|
|
||||||
|
impl<T> MaxCover for HashSet<T>
|
||||||
|
where
|
||||||
|
T: Clone + Eq + Hash,
|
||||||
|
{
|
||||||
|
type Object = Self;
|
||||||
|
type Set = Self;
|
||||||
|
|
||||||
|
fn object(&self) -> Self {
|
||||||
|
self.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn covering_set(&self) -> &Self {
|
||||||
|
&self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_covering_set(&mut self, _: &Self, other: &Self) {
|
||||||
|
let mut difference = &*self - other;
|
||||||
|
std::mem::swap(self, &mut difference);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn score(&self) -> usize {
|
||||||
|
self.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn example_system() -> Vec<HashSet<usize>> {
|
||||||
|
vec![
|
||||||
|
HashSet::from_iter(vec![3]),
|
||||||
|
HashSet::from_iter(vec![1, 2, 4, 5]),
|
||||||
|
HashSet::from_iter(vec![1, 2, 4, 5]),
|
||||||
|
HashSet::from_iter(vec![1]),
|
||||||
|
HashSet::from_iter(vec![2, 4, 5]),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn zero_limit() {
|
||||||
|
let cover = maximum_cover(example_system(), 0);
|
||||||
|
assert_eq!(cover.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn one_limit() {
|
||||||
|
let sets = example_system();
|
||||||
|
let cover = maximum_cover(sets.clone(), 1);
|
||||||
|
assert_eq!(cover.len(), 1);
|
||||||
|
assert_eq!(cover[0], sets[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that even if the limit provides room, we don't include useless items in the soln.
|
||||||
|
#[test]
|
||||||
|
fn exclude_zero_score() {
|
||||||
|
let sets = example_system();
|
||||||
|
for k in 2..10 {
|
||||||
|
let cover = maximum_cover(sets.clone(), k);
|
||||||
|
assert_eq!(cover.len(), 2);
|
||||||
|
assert_eq!(cover[0], sets[1]);
|
||||||
|
assert_eq!(cover[1], sets[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn quality<T: Eq + Hash>(solution: &[HashSet<T>]) -> usize {
|
||||||
|
solution.iter().map(HashSet::len).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optimal solution is the first three sets (quality 15) but our greedy algorithm
|
||||||
|
// will select the last three (quality 11). The comment at the end of each line
|
||||||
|
// shows that set's score at each iteration, with a * indicating that it will be chosen.
|
||||||
|
#[test]
|
||||||
|
fn suboptimal() {
|
||||||
|
let sets = vec![
|
||||||
|
HashSet::from_iter(vec![0, 1, 8, 11, 14]), // 5, 3, 2
|
||||||
|
HashSet::from_iter(vec![2, 3, 7, 9, 10]), // 5, 3, 2
|
||||||
|
HashSet::from_iter(vec![4, 5, 6, 12, 13]), // 5, 4, 2
|
||||||
|
HashSet::from_iter(vec![9, 10]), // 4, 4, 2*
|
||||||
|
HashSet::from_iter(vec![5, 6, 7, 8]), // 4, 4*
|
||||||
|
HashSet::from_iter(vec![0, 1, 2, 3, 4]), // 5*
|
||||||
|
];
|
||||||
|
let cover = maximum_cover(sets.clone(), 3);
|
||||||
|
assert_eq!(quality(&cover), 11);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn intersecting_ok() {
|
||||||
|
let sets = vec![
|
||||||
|
HashSet::from_iter(vec![1, 2, 3, 4, 5, 6, 7, 8]),
|
||||||
|
HashSet::from_iter(vec![1, 2, 3, 9, 10, 11]),
|
||||||
|
HashSet::from_iter(vec![4, 5, 6, 12, 13, 14]),
|
||||||
|
HashSet::from_iter(vec![7, 8, 15, 16, 17, 18]),
|
||||||
|
HashSet::from_iter(vec![1, 2, 9, 10]),
|
||||||
|
HashSet::from_iter(vec![1, 5, 6, 8]),
|
||||||
|
HashSet::from_iter(vec![1, 7, 11, 19]),
|
||||||
|
];
|
||||||
|
let cover = maximum_cover(sets.clone(), 5);
|
||||||
|
assert_eq!(quality(&cover), 19);
|
||||||
|
assert_eq!(cover.len(), 5);
|
||||||
|
}
|
||||||
|
}
|
121
eth2/operation_pool/src/persistence.rs
Normal file
121
eth2/operation_pool/src/persistence.rs
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
use crate::attestation_id::AttestationId;
|
||||||
|
use crate::OperationPool;
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
/// SSZ-serializable version of `OperationPool`.
|
||||||
|
///
|
||||||
|
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
|
||||||
|
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
|
||||||
|
#[derive(Encode, Decode)]
|
||||||
|
pub struct PersistedOperationPool {
|
||||||
|
/// Mapping from attestation ID to attestation mappings.
|
||||||
|
// We could save space by not storing the attestation ID, but it might
|
||||||
|
// be difficult to make that roundtrip due to eager aggregation.
|
||||||
|
attestations: Vec<(AttestationId, Vec<Attestation>)>,
|
||||||
|
deposits: Vec<Deposit>,
|
||||||
|
/// Attester slashings.
|
||||||
|
attester_slashings: Vec<AttesterSlashing>,
|
||||||
|
/// Proposer slashings.
|
||||||
|
proposer_slashings: Vec<ProposerSlashing>,
|
||||||
|
/// Voluntary exits.
|
||||||
|
voluntary_exits: Vec<VoluntaryExit>,
|
||||||
|
/// Transfers.
|
||||||
|
transfers: Vec<Transfer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PersistedOperationPool {
|
||||||
|
/// Convert an `OperationPool` into serializable form.
|
||||||
|
pub fn from_operation_pool<T: EthSpec>(operation_pool: &OperationPool<T>) -> Self {
|
||||||
|
let attestations = operation_pool
|
||||||
|
.attestations
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(att_id, att)| (att_id.clone(), att.clone()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let deposits = operation_pool
|
||||||
|
.deposits
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, d)| d.clone())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let attester_slashings = operation_pool
|
||||||
|
.attester_slashings
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, slashing)| slashing.clone())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let proposer_slashings = operation_pool
|
||||||
|
.proposer_slashings
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, slashing)| slashing.clone())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let voluntary_exits = operation_pool
|
||||||
|
.voluntary_exits
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, exit)| exit.clone())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let transfers = operation_pool.transfers.read().iter().cloned().collect();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
attestations,
|
||||||
|
deposits,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
transfers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reconstruct an `OperationPool`.
|
||||||
|
pub fn into_operation_pool<T: EthSpec>(
|
||||||
|
self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> OperationPool<T> {
|
||||||
|
let attestations = RwLock::new(self.attestations.into_iter().collect());
|
||||||
|
let deposits = RwLock::new(self.deposits.into_iter().map(|d| (d.index, d)).collect());
|
||||||
|
let attester_slashings = RwLock::new(
|
||||||
|
self.attester_slashings
|
||||||
|
.into_iter()
|
||||||
|
.map(|slashing| {
|
||||||
|
(
|
||||||
|
OperationPool::attester_slashing_id(&slashing, state, spec),
|
||||||
|
slashing,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
let proposer_slashings = RwLock::new(
|
||||||
|
self.proposer_slashings
|
||||||
|
.into_iter()
|
||||||
|
.map(|slashing| (slashing.proposer_index, slashing))
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
let voluntary_exits = RwLock::new(
|
||||||
|
self.voluntary_exits
|
||||||
|
.into_iter()
|
||||||
|
.map(|exit| (exit.validator_index, exit))
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
let transfers = RwLock::new(self.transfers.into_iter().collect());
|
||||||
|
|
||||||
|
OperationPool {
|
||||||
|
attestations,
|
||||||
|
deposits,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
transfers,
|
||||||
|
_phantom: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -24,8 +24,8 @@ integer-sqrt = "0.1"
|
|||||||
itertools = "0.8"
|
itertools = "0.8"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
merkle_proof = { path = "../utils/merkle_proof" }
|
merkle_proof = { path = "../utils/merkle_proof" }
|
||||||
ssz = { path = "../utils/ssz" }
|
eth2_ssz = { path = "../utils/ssz" }
|
||||||
ssz_derive = { path = "../utils/ssz_derive" }
|
eth2_ssz_derive = { path = "../utils/ssz_derive" }
|
||||||
tree_hash = { path = "../utils/tree_hash" }
|
tree_hash = { path = "../utils/tree_hash" }
|
||||||
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
|
@ -26,13 +26,12 @@ serde_derive = "1.0"
|
|||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
serde_yaml = "0.8"
|
serde_yaml = "0.8"
|
||||||
slog = "^2.2.3"
|
slog = "^2.2.3"
|
||||||
ssz = { path = "../utils/ssz" }
|
eth2_ssz = { path = "../utils/ssz" }
|
||||||
ssz_derive = { path = "../utils/ssz_derive" }
|
eth2_ssz_derive = { path = "../utils/ssz_derive" }
|
||||||
swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" }
|
swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" }
|
||||||
test_random_derive = { path = "../utils/test_random_derive" }
|
test_random_derive = { path = "../utils/test_random_derive" }
|
||||||
tree_hash = { path = "../utils/tree_hash" }
|
tree_hash = { path = "../utils/tree_hash" }
|
||||||
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
||||||
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
env_logger = "0.6.0"
|
env_logger = "0.6.0"
|
||||||
|
@ -104,11 +104,7 @@ pub struct ChainSpec {
|
|||||||
domain_voluntary_exit: u32,
|
domain_voluntary_exit: u32,
|
||||||
domain_transfer: u32,
|
domain_transfer: u32,
|
||||||
|
|
||||||
/*
|
pub boot_nodes: Vec<String>,
|
||||||
* Network specific parameters
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
pub boot_nodes: Vec<Multiaddr>,
|
|
||||||
pub chain_id: u8,
|
pub chain_id: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,7 +212,7 @@ impl ChainSpec {
|
|||||||
domain_transfer: 5,
|
domain_transfer: 5,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Boot nodes
|
* Network specific
|
||||||
*/
|
*/
|
||||||
boot_nodes: vec![],
|
boot_nodes: vec![],
|
||||||
chain_id: 1, // mainnet chain id
|
chain_id: 1, // mainnet chain id
|
||||||
@ -231,12 +227,8 @@ impl ChainSpec {
|
|||||||
pub fn minimal() -> Self {
|
pub fn minimal() -> Self {
|
||||||
let genesis_slot = Slot::new(0);
|
let genesis_slot = Slot::new(0);
|
||||||
|
|
||||||
// Note: these bootnodes are placeholders.
|
// Note: bootnodes to be updated when static nodes exist.
|
||||||
//
|
let boot_nodes = vec![];
|
||||||
// Should be updated once static bootnodes exist.
|
|
||||||
let boot_nodes = vec!["/ip4/127.0.0.1/tcp/9000"
|
|
||||||
.parse()
|
|
||||||
.expect("correct multiaddr")];
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
target_committee_size: 4,
|
target_committee_size: 4,
|
||||||
|
@ -82,6 +82,3 @@ pub type ProposerMap = HashMap<u64, usize>;
|
|||||||
|
|
||||||
pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature};
|
pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature};
|
||||||
pub use fixed_len_vec::{typenum, typenum::Unsigned, FixedLenVec};
|
pub use fixed_len_vec::{typenum, typenum::Unsigned, FixedLenVec};
|
||||||
pub use libp2p::floodsub::{Topic, TopicBuilder, TopicHash};
|
|
||||||
pub use libp2p::multiaddr;
|
|
||||||
pub use libp2p::Multiaddr;
|
|
||||||
|
@ -13,7 +13,7 @@ rand = "^0.5"
|
|||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_hex = { path = "../serde_hex" }
|
serde_hex = { path = "../serde_hex" }
|
||||||
ssz = { path = "../ssz" }
|
eth2_ssz = { path = "../ssz" }
|
||||||
tree_hash = { path = "../tree_hash" }
|
tree_hash = { path = "../tree_hash" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
@ -7,7 +7,7 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
cached_tree_hash = { path = "../cached_tree_hash" }
|
cached_tree_hash = { path = "../cached_tree_hash" }
|
||||||
serde_hex = { path = "../serde_hex" }
|
serde_hex = { path = "../serde_hex" }
|
||||||
ssz = { path = "../ssz" }
|
eth2_ssz = { path = "../ssz" }
|
||||||
bit-vec = "0.5.0"
|
bit-vec = "0.5.0"
|
||||||
bit_reverse = "0.1"
|
bit_reverse = "0.1"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
|
@ -9,7 +9,7 @@ publish = false
|
|||||||
cargo-fuzz = true
|
cargo-fuzz = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ssz = { path = "../../ssz" }
|
eth2_ssz = { path = "../../ssz" }
|
||||||
|
|
||||||
[dependencies.boolean-bitfield]
|
[dependencies.boolean-bitfield]
|
||||||
path = ".."
|
path = ".."
|
||||||
|
@ -13,7 +13,7 @@ use std::default;
|
|||||||
|
|
||||||
/// A BooleanBitfield represents a set of booleans compactly stored as a vector of bits.
|
/// A BooleanBitfield represents a set of booleans compactly stored as a vector of bits.
|
||||||
/// The BooleanBitfield is given a fixed size during construction. Reads outside of the current size return an out-of-bounds error. Writes outside of the current size expand the size of the set.
|
/// The BooleanBitfield is given a fixed size during construction. Reads outside of the current size return an out-of-bounds error. Writes outside of the current size expand the size of the set.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, Hash)]
|
||||||
pub struct BooleanBitfield(BitVec);
|
pub struct BooleanBitfield(BitVec);
|
||||||
|
|
||||||
/// Error represents some reason a request against a bitfield was not satisfied
|
/// Error represents some reason a request against a bitfield was not satisfied
|
||||||
@ -170,6 +170,7 @@ impl cmp::PartialEq for BooleanBitfield {
|
|||||||
ssz::ssz_encode(self) == ssz::ssz_encode(other)
|
ssz::ssz_encode(self) == ssz::ssz_encode(other)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl Eq for BooleanBitfield {}
|
||||||
|
|
||||||
/// Create a new bitfield that is a union of two other bitfields.
|
/// Create a new bitfield that is a union of two other bitfields.
|
||||||
///
|
///
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::fs;
|
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::prelude::*;
|
use std::io::prelude::*;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@ -105,15 +104,3 @@ where
|
|||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_data_dir(args: &ArgMatches, default_data_dir: PathBuf) -> Result<PathBuf, &'static str> {
|
|
||||||
if let Some(data_dir) = args.value_of("data_dir") {
|
|
||||||
Ok(PathBuf::from(data_dir))
|
|
||||||
} else {
|
|
||||||
let path = dirs::home_dir()
|
|
||||||
.ok_or_else(|| "Unable to locate home directory")?
|
|
||||||
.join(&default_data_dir);
|
|
||||||
fs::create_dir_all(&path).map_err(|_| "Unable to create data_dir")?;
|
|
||||||
Ok(path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -9,5 +9,5 @@ cached_tree_hash = { path = "../cached_tree_hash" }
|
|||||||
tree_hash = { path = "../tree_hash" }
|
tree_hash = { path = "../tree_hash" }
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
ssz = { path = "../ssz" }
|
eth2_ssz = { path = "../ssz" }
|
||||||
typenum = "1.10"
|
typenum = "1.10"
|
||||||
|
2
eth2/utils/hashing/.cargo/config
Normal file
2
eth2/utils/hashing/.cargo/config
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
[target.wasm32-unknown-unknown]
|
||||||
|
runner = 'wasm-bindgen-test-runner'
|
@ -4,5 +4,14 @@ version = "0.1.0"
|
|||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||||
ring = "0.14.6"
|
ring = "0.14.6"
|
||||||
|
|
||||||
|
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||||
|
sha2 = "0.8.0"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rustc-hex = "2.0.1"
|
||||||
|
|
||||||
|
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
|
||||||
|
wasm-bindgen-test = "0.2.47"
|
||||||
|
@ -1,7 +1,17 @@
|
|||||||
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
use ring::digest::{digest, SHA256};
|
use ring::digest::{digest, SHA256};
|
||||||
|
|
||||||
|
#[cfg(target_arch = "wasm32")]
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
pub fn hash(input: &[u8]) -> Vec<u8> {
|
pub fn hash(input: &[u8]) -> Vec<u8> {
|
||||||
digest(&SHA256, input).as_ref().into()
|
#[cfg(not(target_arch = "wasm32"))]
|
||||||
|
let h = digest(&SHA256, input).as_ref().into();
|
||||||
|
|
||||||
|
#[cfg(target_arch = "wasm32")]
|
||||||
|
let h = Sha256::digest(input).as_ref().into();
|
||||||
|
|
||||||
|
h
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get merkle root of some hashed values - the input leaf nodes is expected to already be hashed
|
/// Get merkle root of some hashed values - the input leaf nodes is expected to already be hashed
|
||||||
@ -37,19 +47,24 @@ pub fn merkle_root(values: &[Vec<u8>]) -> Option<Vec<u8>> {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
use ring::test;
|
use rustc_hex::FromHex;
|
||||||
|
|
||||||
#[test]
|
#[cfg(target_arch = "wasm32")]
|
||||||
|
use wasm_bindgen_test::*;
|
||||||
|
|
||||||
|
#[cfg_attr(not(target_arch = "wasm32"), test)]
|
||||||
|
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
|
||||||
fn test_hashing() {
|
fn test_hashing() {
|
||||||
let input: Vec<u8> = b"hello world".as_ref().into();
|
let input: Vec<u8> = b"hello world".as_ref().into();
|
||||||
|
|
||||||
let output = hash(input.as_ref());
|
let output = hash(input.as_ref());
|
||||||
let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9";
|
let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9";
|
||||||
let expected: Vec<u8> = test::from_hex(expected_hex).unwrap();
|
let expected: Vec<u8> = expected_hex.from_hex().unwrap();
|
||||||
assert_eq!(expected, output);
|
assert_eq!(expected, output);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[cfg_attr(not(target_arch = "wasm32"), test)]
|
||||||
|
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
|
||||||
fn test_merkle_root() {
|
fn test_merkle_root() {
|
||||||
// hash the leaf nodes
|
// hash the leaf nodes
|
||||||
let mut input = vec![
|
let mut input = vec![
|
||||||
@ -79,13 +94,17 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(&expected[..], output.unwrap().as_slice());
|
assert_eq!(&expected[..], output.unwrap().as_slice());
|
||||||
}
|
}
|
||||||
#[test]
|
|
||||||
|
#[cfg_attr(not(target_arch = "wasm32"), test)]
|
||||||
|
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
|
||||||
fn test_empty_input_merkle_root() {
|
fn test_empty_input_merkle_root() {
|
||||||
let input = vec![];
|
let input = vec![];
|
||||||
let output = merkle_root(&input[..]);
|
let output = merkle_root(&input[..]);
|
||||||
assert_eq!(None, output);
|
assert_eq!(None, output);
|
||||||
}
|
}
|
||||||
#[test]
|
|
||||||
|
#[cfg_attr(not(target_arch = "wasm32"), test)]
|
||||||
|
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
|
||||||
fn test_odd_leaf_merkle_root() {
|
fn test_odd_leaf_merkle_root() {
|
||||||
let input = vec![
|
let input = vec![
|
||||||
hash("a".as_bytes()),
|
hash("a".as_bytes()),
|
||||||
|
@ -1,8 +1,12 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ssz"
|
name = "eth2_ssz"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@sigmaprime.io>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
description = "SimpleSerialize (SSZ) as used in Ethereum 2.0"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "ssz"
|
||||||
|
|
||||||
[[bench]]
|
[[bench]]
|
||||||
name = "benches"
|
name = "benches"
|
||||||
@ -10,7 +14,7 @@ harness = false
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion = "0.2"
|
criterion = "0.2"
|
||||||
ssz_derive = { path = "../ssz_derive" }
|
eth2_ssz_derive = { path = "../ssz_derive" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bytes = "0.4.9"
|
bytes = "0.4.9"
|
||||||
|
@ -34,8 +34,160 @@ impl_decodable_for_uint!(u8, 8);
|
|||||||
impl_decodable_for_uint!(u16, 16);
|
impl_decodable_for_uint!(u16, 16);
|
||||||
impl_decodable_for_uint!(u32, 32);
|
impl_decodable_for_uint!(u32, 32);
|
||||||
impl_decodable_for_uint!(u64, 64);
|
impl_decodable_for_uint!(u64, 64);
|
||||||
|
|
||||||
|
#[cfg(target_pointer_width = "32")]
|
||||||
|
impl_decodable_for_uint!(usize, 32);
|
||||||
|
|
||||||
|
#[cfg(target_pointer_width = "64")]
|
||||||
impl_decodable_for_uint!(usize, 64);
|
impl_decodable_for_uint!(usize, 64);
|
||||||
|
|
||||||
|
macro_rules! impl_decode_for_tuples {
|
||||||
|
($(
|
||||||
|
$Tuple:ident {
|
||||||
|
$(($idx:tt) -> $T:ident)+
|
||||||
|
}
|
||||||
|
)+) => {
|
||||||
|
$(
|
||||||
|
impl<$($T: Decode),+> Decode for ($($T,)+) {
|
||||||
|
fn is_ssz_fixed_len() -> bool {
|
||||||
|
$(
|
||||||
|
<$T as Decode>::is_ssz_fixed_len() &&
|
||||||
|
)*
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ssz_fixed_len() -> usize {
|
||||||
|
if <Self as Decode>::is_ssz_fixed_len() {
|
||||||
|
$(
|
||||||
|
<$T as Decode>::ssz_fixed_len() +
|
||||||
|
)*
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
BYTES_PER_LENGTH_OFFSET
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
|
||||||
|
let mut builder = SszDecoderBuilder::new(bytes);
|
||||||
|
|
||||||
|
$(
|
||||||
|
builder.register_type::<$T>()?;
|
||||||
|
)*
|
||||||
|
|
||||||
|
let mut decoder = builder.build()?;
|
||||||
|
|
||||||
|
Ok(($(
|
||||||
|
decoder.decode_next::<$T>()?,
|
||||||
|
)*
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)+
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_decode_for_tuples! {
|
||||||
|
Tuple2 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
}
|
||||||
|
Tuple3 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
}
|
||||||
|
Tuple4 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
}
|
||||||
|
Tuple5 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
}
|
||||||
|
Tuple6 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
}
|
||||||
|
Tuple7 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
}
|
||||||
|
Tuple8 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
}
|
||||||
|
Tuple9 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
}
|
||||||
|
Tuple10 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
(9) -> J
|
||||||
|
}
|
||||||
|
Tuple11 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
(9) -> J
|
||||||
|
(10) -> K
|
||||||
|
}
|
||||||
|
Tuple12 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
(9) -> J
|
||||||
|
(10) -> K
|
||||||
|
(11) -> L
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Decode for bool {
|
impl Decode for bool {
|
||||||
fn is_ssz_fixed_len() -> bool {
|
fn is_ssz_fixed_len() -> bool {
|
||||||
true
|
true
|
||||||
@ -515,4 +667,15 @@ mod tests {
|
|||||||
})
|
})
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tuple() {
|
||||||
|
assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 0, 0, 0]), Ok((0, 0)));
|
||||||
|
assert_eq!(<(u16, u16)>::from_ssz_bytes(&[16, 0, 17, 0]), Ok((16, 17)));
|
||||||
|
assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 1, 2, 0]), Ok((256, 2)));
|
||||||
|
assert_eq!(
|
||||||
|
<(u16, u16)>::from_ssz_bytes(&[255, 255, 0, 0]),
|
||||||
|
Ok((65535, 0))
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,8 +24,161 @@ impl_encodable_for_uint!(u8, 8);
|
|||||||
impl_encodable_for_uint!(u16, 16);
|
impl_encodable_for_uint!(u16, 16);
|
||||||
impl_encodable_for_uint!(u32, 32);
|
impl_encodable_for_uint!(u32, 32);
|
||||||
impl_encodable_for_uint!(u64, 64);
|
impl_encodable_for_uint!(u64, 64);
|
||||||
|
|
||||||
|
#[cfg(target_pointer_width = "32")]
|
||||||
|
impl_encodable_for_uint!(usize, 32);
|
||||||
|
|
||||||
|
#[cfg(target_pointer_width = "64")]
|
||||||
impl_encodable_for_uint!(usize, 64);
|
impl_encodable_for_uint!(usize, 64);
|
||||||
|
|
||||||
|
// Based on the `tuple_impls` macro from the standard library.
|
||||||
|
macro_rules! impl_encode_for_tuples {
|
||||||
|
($(
|
||||||
|
$Tuple:ident {
|
||||||
|
$(($idx:tt) -> $T:ident)+
|
||||||
|
}
|
||||||
|
)+) => {
|
||||||
|
$(
|
||||||
|
impl<$($T: Encode),+> Encode for ($($T,)+) {
|
||||||
|
fn is_ssz_fixed_len() -> bool {
|
||||||
|
$(
|
||||||
|
<$T as Encode>::is_ssz_fixed_len() &&
|
||||||
|
)*
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ssz_fixed_len() -> usize {
|
||||||
|
if <Self as Encode>::is_ssz_fixed_len() {
|
||||||
|
$(
|
||||||
|
<$T as Encode>::ssz_fixed_len() +
|
||||||
|
)*
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
BYTES_PER_LENGTH_OFFSET
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
||||||
|
let offset = $(
|
||||||
|
<$T as Encode>::ssz_fixed_len() +
|
||||||
|
)*
|
||||||
|
0;
|
||||||
|
|
||||||
|
let mut encoder = SszEncoder::container(buf, offset);
|
||||||
|
|
||||||
|
$(
|
||||||
|
encoder.append(&self.$idx);
|
||||||
|
)*
|
||||||
|
|
||||||
|
encoder.finalize();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)+
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_encode_for_tuples! {
|
||||||
|
Tuple2 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
}
|
||||||
|
Tuple3 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
}
|
||||||
|
Tuple4 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
}
|
||||||
|
Tuple5 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
}
|
||||||
|
Tuple6 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
}
|
||||||
|
Tuple7 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
}
|
||||||
|
Tuple8 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
}
|
||||||
|
Tuple9 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
}
|
||||||
|
Tuple10 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
(9) -> J
|
||||||
|
}
|
||||||
|
Tuple11 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
(9) -> J
|
||||||
|
(10) -> K
|
||||||
|
}
|
||||||
|
Tuple12 {
|
||||||
|
(0) -> A
|
||||||
|
(1) -> B
|
||||||
|
(2) -> C
|
||||||
|
(3) -> D
|
||||||
|
(4) -> E
|
||||||
|
(5) -> F
|
||||||
|
(6) -> G
|
||||||
|
(7) -> H
|
||||||
|
(8) -> I
|
||||||
|
(9) -> J
|
||||||
|
(10) -> K
|
||||||
|
(11) -> L
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The SSZ "union" type.
|
/// The SSZ "union" type.
|
||||||
impl<T: Encode> Encode for Option<T> {
|
impl<T: Encode> Encode for Option<T> {
|
||||||
fn is_ssz_fixed_len() -> bool {
|
fn is_ssz_fixed_len() -> bool {
|
||||||
@ -287,4 +440,11 @@ mod tests {
|
|||||||
assert_eq!([1, 0, 0, 0].as_ssz_bytes(), vec![1, 0, 0, 0]);
|
assert_eq!([1, 0, 0, 0].as_ssz_bytes(), vec![1, 0, 0, 0]);
|
||||||
assert_eq!([1, 2, 3, 4].as_ssz_bytes(), vec![1, 2, 3, 4]);
|
assert_eq!([1, 2, 3, 4].as_ssz_bytes(), vec![1, 2, 3, 4]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tuple() {
|
||||||
|
assert_eq!((10u8, 11u8).as_ssz_bytes(), vec![10, 11]);
|
||||||
|
assert_eq!((10u32, 11u8).as_ssz_bytes(), vec![10, 0, 0, 0, 11]);
|
||||||
|
assert_eq!((10u8, 11u8, 12u8).as_ssz_bytes(), vec![10, 11, 12]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
//! format designed for use in Ethereum 2.0.
|
//! format designed for use in Ethereum 2.0.
|
||||||
//!
|
//!
|
||||||
//! Conforms to
|
//! Conforms to
|
||||||
//! [v0.6.1](https://github.com/ethereum/eth2.0-specs/blob/v0.6.1/specs/simple-serialize.md) of the
|
//! [v0.7.1](https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/simple-serialize.md) of the
|
||||||
//! Ethereum 2.0 specification.
|
//! Ethereum 2.0 specification.
|
||||||
//!
|
//!
|
||||||
//! ## Example
|
//! ## Example
|
||||||
@ -46,7 +46,10 @@ pub use encode::{Encode, SszEncoder};
|
|||||||
/// The number of bytes used to represent an offset.
|
/// The number of bytes used to represent an offset.
|
||||||
pub const BYTES_PER_LENGTH_OFFSET: usize = 4;
|
pub const BYTES_PER_LENGTH_OFFSET: usize = 4;
|
||||||
/// The maximum value that can be represented using `BYTES_PER_LENGTH_OFFSET`.
|
/// The maximum value that can be represented using `BYTES_PER_LENGTH_OFFSET`.
|
||||||
pub const MAX_LENGTH_VALUE: usize = (1 << (BYTES_PER_LENGTH_OFFSET * 8)) - 1;
|
#[cfg(target_pointer_width = "32")]
|
||||||
|
pub const MAX_LENGTH_VALUE: usize = (std::u32::MAX >> 8 * (4 - BYTES_PER_LENGTH_OFFSET)) as usize;
|
||||||
|
#[cfg(target_pointer_width = "64")]
|
||||||
|
pub const MAX_LENGTH_VALUE: usize = (std::u64::MAX >> 8 * (8 - BYTES_PER_LENGTH_OFFSET)) as usize;
|
||||||
|
|
||||||
/// Convenience function to SSZ encode an object supporting ssz::Encode.
|
/// Convenience function to SSZ encode an object supporting ssz::Encode.
|
||||||
///
|
///
|
||||||
|
@ -346,4 +346,34 @@ mod round_trip {
|
|||||||
|
|
||||||
round_trip(vec);
|
round_trip(vec);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tuple_u8_u16() {
|
||||||
|
let vec: Vec<(u8, u16)> = vec![
|
||||||
|
(0, 0),
|
||||||
|
(0, 1),
|
||||||
|
(1, 0),
|
||||||
|
(u8::max_value(), u16::max_value()),
|
||||||
|
(0, u16::max_value()),
|
||||||
|
(u8::max_value(), 0),
|
||||||
|
(42, 12301),
|
||||||
|
];
|
||||||
|
|
||||||
|
round_trip(vec);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tuple_vec_vec() {
|
||||||
|
let vec: Vec<(u64, Vec<u8>, Vec<Vec<u16>>)> = vec![
|
||||||
|
(0, vec![], vec![vec![]]),
|
||||||
|
(99, vec![101], vec![vec![], vec![]]),
|
||||||
|
(
|
||||||
|
42,
|
||||||
|
vec![12, 13, 14],
|
||||||
|
vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]],
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
round_trip(vec);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,15 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ssz_derive"
|
name = "eth2_ssz_derive"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@sigmaprime.io>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
description = "Procedural derive macros for SSZ encoding and decoding."
|
description = "Procedural derive macros to accompany the eth2_ssz crate."
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
|
name = "ssz_derive"
|
||||||
proc-macro = true
|
proc-macro = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
syn = "0.15"
|
syn = "0.15"
|
||||||
quote = "0.6"
|
quote = "0.6"
|
||||||
ssz = { path = "../ssz" }
|
eth2_ssz = { path = "../ssz" }
|
||||||
|
@ -7,5 +7,5 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
bytes = "0.4.10"
|
bytes = "0.4.10"
|
||||||
hashing = { path = "../utils/hashing" }
|
hashing = { path = "../utils/hashing" }
|
||||||
ssz = { path = "../utils/ssz" }
|
eth2_ssz = { path = "../utils/ssz" }
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
|
@ -17,7 +17,7 @@ serde = "1.0"
|
|||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_repr = "0.1"
|
serde_repr = "0.1"
|
||||||
serde_yaml = "0.8"
|
serde_yaml = "0.8"
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
eth2_ssz = { path = "../../eth2/utils/ssz" }
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
||||||
cached_tree_hash = { path = "../../eth2/utils/cached_tree_hash" }
|
cached_tree_hash = { path = "../../eth2/utils/cached_tree_hash" }
|
||||||
state_processing = { path = "../../eth2/state_processing" }
|
state_processing = { path = "../../eth2/state_processing" }
|
||||||
|
@ -14,7 +14,7 @@ path = "src/lib.rs"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../eth2/utils/bls" }
|
bls = { path = "../eth2/utils/bls" }
|
||||||
ssz = { path = "../eth2/utils/ssz" }
|
eth2_ssz = { path = "../eth2/utils/ssz" }
|
||||||
eth2_config = { path = "../eth2/utils/eth2_config" }
|
eth2_config = { path = "../eth2/utils/eth2_config" }
|
||||||
tree_hash = { path = "../eth2/utils/tree_hash" }
|
tree_hash = { path = "../eth2/utils/tree_hash" }
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
@ -34,3 +34,4 @@ toml = "^0.5"
|
|||||||
error-chain = "0.12.0"
|
error-chain = "0.12.0"
|
||||||
bincode = "^1.1.2"
|
bincode = "^1.1.2"
|
||||||
futures = "0.1.25"
|
futures = "0.1.25"
|
||||||
|
dirs = "2.0.1"
|
||||||
|
@ -9,9 +9,10 @@ mod signer;
|
|||||||
use crate::config::Config as ValidatorClientConfig;
|
use crate::config::Config as ValidatorClientConfig;
|
||||||
use crate::service::Service as ValidatorService;
|
use crate::service::Service as ValidatorService;
|
||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use eth2_config::{get_data_dir, read_from_file, write_to_file, Eth2Config};
|
use eth2_config::{read_from_file, write_to_file, Eth2Config};
|
||||||
use protos::services_grpc::ValidatorServiceClient;
|
use protos::services_grpc::ValidatorServiceClient;
|
||||||
use slog::{crit, error, info, o, Drain};
|
use slog::{crit, error, info, o, Drain};
|
||||||
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::{Keypair, MainnetEthSpec, MinimalEthSpec};
|
use types::{Keypair, MainnetEthSpec, MinimalEthSpec};
|
||||||
|
|
||||||
@ -35,6 +36,7 @@ fn main() {
|
|||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("datadir")
|
Arg::with_name("datadir")
|
||||||
.long("datadir")
|
.long("datadir")
|
||||||
|
.short("d")
|
||||||
.value_name("DIR")
|
.value_name("DIR")
|
||||||
.help("Data directory for keys and databases.")
|
.help("Data directory for keys and databases.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
@ -66,13 +68,33 @@ fn main() {
|
|||||||
)
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) {
|
let data_dir = match matches
|
||||||
Ok(dir) => dir,
|
.value_of("datadir")
|
||||||
Err(e) => {
|
.and_then(|v| Some(PathBuf::from(v)))
|
||||||
crit!(log, "Failed to initialize data dir"; "error" => format!("{:?}", e));
|
{
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
// use the default
|
||||||
|
let mut default_dir = match dirs::home_dir() {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
crit!(log, "Failed to find a home directory");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
default_dir.push(DEFAULT_DATA_DIR);
|
||||||
|
PathBuf::from(default_dir)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// create the directory if needed
|
||||||
|
match fs::create_dir_all(&data_dir) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => {
|
||||||
|
crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME);
|
let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user