From 2763f7bc00123b399f1431fd2d58b3e545818fec Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 3 Oct 2018 13:13:51 +1000 Subject: [PATCH 01/14] Move shuffling to its own crate, update hash fn Previously blake2s-256 was being used, now blake2b-512[:32] is being used. --- Cargo.toml | 1 + beacon_chain/utils/shuffling/Cargo.toml | 7 + .../src/common => utils}/shuffling/README.md | 0 .../mod.rs => utils/shuffling/src/lib.rs} | 14 +- .../shuffling => utils/shuffling/src}/rng.rs | 30 ++-- .../test_vectors/shuffle_test_vectors.yaml | 162 ++++++++++++++++++ 6 files changed, 185 insertions(+), 29 deletions(-) create mode 100644 beacon_chain/utils/shuffling/Cargo.toml rename beacon_chain/{types/src/common => utils}/shuffling/README.md (100%) rename beacon_chain/{types/src/common/shuffling/mod.rs => utils/shuffling/src/lib.rs} (72%) rename beacon_chain/{types/src/common/shuffling => utils/shuffling/src}/rng.rs (75%) create mode 100644 beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml diff --git a/Cargo.toml b/Cargo.toml index d9f9d14bc..de69506de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ members = [ "beacon_chain/utils/bls", "beacon_chain/utils/boolean-bitfield", "beacon_chain/utils/hashing", + "beacon_chain/utils/shuffling", "beacon_chain/utils/ssz", "beacon_chain/utils/ssz_helpers", "lighthouse/db", diff --git a/beacon_chain/utils/shuffling/Cargo.toml b/beacon_chain/utils/shuffling/Cargo.toml new file mode 100644 index 000000000..df0a108ee --- /dev/null +++ b/beacon_chain/utils/shuffling/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "shuffling" +version = "0.1.0" +authors = ["Paul Hauner "] + +[dependencies] +hashing = { path = "../hashing" } diff --git a/beacon_chain/types/src/common/shuffling/README.md b/beacon_chain/utils/shuffling/README.md similarity index 100% rename from beacon_chain/types/src/common/shuffling/README.md rename to beacon_chain/utils/shuffling/README.md diff --git a/beacon_chain/types/src/common/shuffling/mod.rs b/beacon_chain/utils/shuffling/src/lib.rs similarity index 72% rename from beacon_chain/types/src/common/shuffling/mod.rs rename to beacon_chain/utils/shuffling/src/lib.rs index 432f38ff4..b99d400a1 100644 --- a/beacon_chain/types/src/common/shuffling/mod.rs +++ b/beacon_chain/utils/shuffling/src/lib.rs @@ -1,4 +1,4 @@ -extern crate blake2_rfc; +extern crate hashing; mod rng; @@ -33,20 +33,16 @@ pub fn shuffle( #[cfg(test)] mod tests { use super::*; - use super::blake2_rfc::blake2s::{ blake2s, Blake2sResult }; - - fn hash(seed: &[u8]) -> Blake2sResult { - blake2s(32, &[], seed) - } + use super::hashing::canonical_hash; #[test] fn test_shuffling() { - let seed = hash(b"4kn4driuctg8"); + let seed = canonical_hash(b"4kn4driuctg8"); let list: Vec = (0..12).collect(); - let s = shuffle(seed.as_bytes(), list).unwrap(); + let s = shuffle(&seed, list).unwrap(); assert_eq!( s, - vec![7, 4, 8, 6, 5, 3, 0, 11, 1, 2, 10, 9], + vec![7, 3, 2, 5, 11, 9, 1, 0, 4, 6, 10, 8], ) } } diff --git a/beacon_chain/types/src/common/shuffling/rng.rs b/beacon_chain/utils/shuffling/src/rng.rs similarity index 75% rename from beacon_chain/types/src/common/shuffling/rng.rs rename to beacon_chain/utils/shuffling/src/rng.rs index e43c582ff..7ca6f1866 100644 --- a/beacon_chain/types/src/common/shuffling/rng.rs +++ b/beacon_chain/utils/shuffling/src/rng.rs @@ -1,4 +1,4 @@ -use super::blake2_rfc::blake2s::{ Blake2s, Blake2sResult }; +use super::hashing::canonical_hash; const SEED_SIZE_BYTES: usize = 32; const RAND_BYTES: usize = 3; // 24 / 8 @@ -7,7 +7,7 @@ const RAND_MAX: u32 = 16_777_216; // 2**24 /// A pseudo-random number generator which given a seed /// uses successive blake2s hashing to generate "entropy". pub struct ShuffleRng { - seed: Blake2sResult, + seed: Vec, idx: usize, pub rand_max: u32, } @@ -16,7 +16,7 @@ impl ShuffleRng { /// Create a new instance given some "seed" bytes. pub fn new(initial_seed: &[u8]) -> Self { Self { - seed: hash(initial_seed), + seed: canonical_hash(initial_seed), idx: 0, rand_max: RAND_MAX, } @@ -24,7 +24,7 @@ impl ShuffleRng { /// "Regenerates" the seed by hashing it. fn rehash_seed(&mut self) { - self.seed = hash(self.seed.as_bytes()); + self.seed = canonical_hash(&self.seed); self.idx = 0; } @@ -36,7 +36,7 @@ impl ShuffleRng { self.rand() } else { int_from_byte_slice( - self.seed.as_bytes(), + &self.seed, self.idx - RAND_BYTES, ) } @@ -68,13 +68,6 @@ fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 { ) } -/// Peform a blake2s hash on the given bytes. -fn hash(bytes: &[u8]) -> Blake2sResult { - let mut hasher = Blake2s::new(SEED_SIZE_BYTES); - hasher.update(bytes); - hasher.finalize() -} - #[cfg(test)] mod tests { @@ -115,15 +108,12 @@ mod tests { #[test] fn test_shuffling_hash_fn() { - let digest = hash(hash(b"4kn4driuctg8").as_bytes()); // double-hash is intentional - let digest_bytes = digest.as_bytes(); + let digest = canonical_hash(&canonical_hash(&"4kn4driuctg8".as_bytes())); // double-hash is intentional let expected = [ - 0xff, 0xff, 0xff, 0x8f, 0xbb, 0xc7, 0xab, 0x64, 0x43, 0x9a, - 0xe5, 0x12, 0x44, 0xd8, 0x70, 0xcf, 0xe5, 0x79, 0xf6, 0x55, - 0x6b, 0xbd, 0x81, 0x43, 0xc5, 0xcd, 0x70, 0x2b, 0xbe, 0xe3, - 0x87, 0xc7, + 103, 21, 99, 143, 60, 75, 116, 81, 248, 175, 190, 114, 54, 65, 23, 8, 3, 116, + 160, 178, 7, 75, 63, 47, 180, 239, 191, 247, 57, 194, 144, 88 ]; - assert_eq!(digest_bytes.len(), expected.len()); - assert_eq!(digest_bytes, expected) + assert_eq!(digest.len(), expected.len()); + assert_eq!(digest, expected) } } diff --git a/beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml b/beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml new file mode 100644 index 000000000..e96f9e2d0 --- /dev/null +++ b/beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml @@ -0,0 +1,162 @@ +- input: [] + output: [] + seed: !!binary "" +- input: [0] + output: [0] + seed: !!binary "" +- input: [255] + output: [255] + seed: !!binary "" +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [1, 6, 4, 1, 6, 6, 2, 2, 4, 5] + seed: !!binary "" +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [4, 7, 10, 13, 3, 1, 2, 9, 12, 6, 11, 8, 5] + seed: !!binary "" +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [1, 6, 65, 1, 6, 6, 2, 2, 4, 5] + seed: !!binary "" +- input: [] + output: [] + seed: !!binary | + JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= +- input: [0] + output: [0] + seed: !!binary | + JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= +- input: [255] + output: [255] + seed: !!binary | + JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 4, 2, 5, 4, 2, 6, 6, 1, 1] + seed: !!binary | + JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [13, 1, 9, 8, 3, 10, 6, 2, 5, 12, 11, 4, 7] + seed: !!binary | + JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 65, 2, 5, 4, 2, 6, 6, 1, 1] + seed: !!binary | + JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= +- input: [] + output: [] + seed: !!binary | + Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= +- input: [0] + output: [0] + seed: !!binary | + Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= +- input: [255] + output: [255] + seed: !!binary | + Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 5, 1, 6, 4, 1, 2, 4, 6] + seed: !!binary | + Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [3, 8, 10, 4, 7, 11, 6, 1, 2, 5, 13, 9, 12] + seed: !!binary | + Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 5, 1, 6, 4, 1, 2, 65, 6] + seed: !!binary | + Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= +- input: [] + output: [] + seed: !!binary | + zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= +- input: [0] + output: [0] + seed: !!binary | + zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= +- input: [255] + output: [255] + seed: !!binary | + zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [5, 6, 2, 1, 6, 4, 6, 4, 1, 2] + seed: !!binary | + zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [12, 4, 11, 6, 13, 10, 9, 2, 3, 7, 8, 1, 5] + seed: !!binary | + zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [5, 6, 2, 1, 6, 65, 6, 4, 1, 2] + seed: !!binary | + zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= +- input: [] + output: [] + seed: !!binary | + 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= +- input: [0] + output: [0] + seed: !!binary | + 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= +- input: [255] + output: [255] + seed: !!binary | + 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 6, 5, 4, 4, 1, 6, 2, 1] + seed: !!binary | + 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [10, 12, 13, 3, 7, 11, 2, 4, 9, 8, 6, 5, 1] + seed: !!binary | + 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 6, 5, 65, 4, 1, 6, 2, 1] + seed: !!binary | + 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= +- input: [] + output: [] + seed: !!binary | + mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= +- input: [0] + output: [0] + seed: !!binary | + mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= +- input: [255] + output: [255] + seed: !!binary | + mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 4, 1, 2, 6, 5, 1, 6, 4] + seed: !!binary | + mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [11, 8, 12, 9, 2, 1, 10, 4, 13, 5, 7, 3, 6] + seed: !!binary | + mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [6, 2, 65, 1, 2, 6, 5, 1, 6, 4] + seed: !!binary | + mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= +- input: [] + output: [] + seed: !!binary | + /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= +- input: [0] + output: [0] + seed: !!binary | + /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= +- input: [255] + output: [255] + seed: !!binary | + /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= +- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 5, 1, 6, 1, 2, 6, 6, 4, 4] + seed: !!binary | + /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= +- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + output: [5, 13, 9, 7, 11, 10, 12, 2, 6, 8, 3, 1, 4] + seed: !!binary | + /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= +- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] + output: [2, 5, 1, 6, 1, 2, 6, 6, 65, 4] + seed: !!binary | + /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= From 6d4a3bba11ce0f9b2feb5ace47986e4c8fe102ad Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 3 Oct 2018 13:43:46 +1000 Subject: [PATCH 02/14] Update shuffling comments --- beacon_chain/utils/shuffling/README.md | 2 -- beacon_chain/utils/shuffling/src/lib.rs | 15 +++++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) delete mode 100644 beacon_chain/utils/shuffling/README.md diff --git a/beacon_chain/utils/shuffling/README.md b/beacon_chain/utils/shuffling/README.md deleted file mode 100644 index b0f4f5245..000000000 --- a/beacon_chain/utils/shuffling/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This module includes the fundamental shuffling function. It does not do the -full validator delegation amongst slots. diff --git a/beacon_chain/utils/shuffling/src/lib.rs b/beacon_chain/utils/shuffling/src/lib.rs index b99d400a1..7acb7408a 100644 --- a/beacon_chain/utils/shuffling/src/lib.rs +++ b/beacon_chain/utils/shuffling/src/lib.rs @@ -1,3 +1,7 @@ +/// A library for performing deterministic, pseudo-random shuffling on a vector. +/// +/// This library is designed to confirm to the Ethereum 2.0 specification. + extern crate hashing; mod rng; @@ -9,13 +13,16 @@ pub enum ShuffleErr { ExceedsListLength, } -/// Performs a deterministic, in-place shuffle of a vector of bytes. +/// Performs a deterministic, in-place shuffle of a vector. +/// /// The final order of the shuffle is determined by successive hashes /// of the supplied `seed`. -pub fn shuffle( +/// +/// This is a Fisher-Yates-Durtstenfeld shuffle. +pub fn shuffle( seed: &[u8], - mut list: Vec) - -> Result, ShuffleErr> + mut list: Vec) + -> Result, ShuffleErr> { let mut rng = ShuffleRng::new(seed); if list.len() > rng.rand_max as usize { From eca4448207700c0fe59497b41318b6f284f8e2ab Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 3 Oct 2018 13:45:57 +1000 Subject: [PATCH 03/14] Remove test vectors These will be added in a future PR. --- .../test_vectors/shuffle_test_vectors.yaml | 162 ------------------ 1 file changed, 162 deletions(-) delete mode 100644 beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml diff --git a/beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml b/beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml deleted file mode 100644 index e96f9e2d0..000000000 --- a/beacon_chain/utils/shuffling/test_vectors/shuffle_test_vectors.yaml +++ /dev/null @@ -1,162 +0,0 @@ -- input: [] - output: [] - seed: !!binary "" -- input: [0] - output: [0] - seed: !!binary "" -- input: [255] - output: [255] - seed: !!binary "" -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [1, 6, 4, 1, 6, 6, 2, 2, 4, 5] - seed: !!binary "" -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [4, 7, 10, 13, 3, 1, 2, 9, 12, 6, 11, 8, 5] - seed: !!binary "" -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [1, 6, 65, 1, 6, 6, 2, 2, 4, 5] - seed: !!binary "" -- input: [] - output: [] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -- input: [0] - output: [0] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -- input: [255] - output: [255] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 4, 2, 5, 4, 2, 6, 6, 1, 1] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [13, 1, 9, 8, 3, 10, 6, 2, 5, 12, 11, 4, 7] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 65, 2, 5, 4, 2, 6, 6, 1, 1] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -- input: [] - output: [] - seed: !!binary | - Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= -- input: [0] - output: [0] - seed: !!binary | - Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= -- input: [255] - output: [255] - seed: !!binary | - Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 2, 5, 1, 6, 4, 1, 2, 4, 6] - seed: !!binary | - Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [3, 8, 10, 4, 7, 11, 6, 1, 2, 5, 13, 9, 12] - seed: !!binary | - Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 2, 5, 1, 6, 4, 1, 2, 65, 6] - seed: !!binary | - Jlpt1sTgPixEqKaQaX4rmZgxnh48U7igzJK1EjAkK0Q= -- input: [] - output: [] - seed: !!binary | - zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= -- input: [0] - output: [0] - seed: !!binary | - zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= -- input: [255] - output: [255] - seed: !!binary | - zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [5, 6, 2, 1, 6, 4, 6, 4, 1, 2] - seed: !!binary | - zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [12, 4, 11, 6, 13, 10, 9, 2, 3, 7, 8, 1, 5] - seed: !!binary | - zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [5, 6, 2, 1, 6, 65, 6, 4, 1, 2] - seed: !!binary | - zoulKyFpZkZG9AaUh1zxxcQ+unrNxwux10Svi4eBMmA= -- input: [] - output: [] - seed: !!binary | - 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= -- input: [0] - output: [0] - seed: !!binary | - 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= -- input: [255] - output: [255] - seed: !!binary | - 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 2, 6, 5, 4, 4, 1, 6, 2, 1] - seed: !!binary | - 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [10, 12, 13, 3, 7, 11, 2, 4, 9, 8, 6, 5, 1] - seed: !!binary | - 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 2, 6, 5, 65, 4, 1, 6, 2, 1] - seed: !!binary | - 3gEwWl7gY5XRPg5iZgowSp9hyk4Sf/u92B+t88JdAEw= -- input: [] - output: [] - seed: !!binary | - mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= -- input: [0] - output: [0] - seed: !!binary | - mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= -- input: [255] - output: [255] - seed: !!binary | - mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 2, 4, 1, 2, 6, 5, 1, 6, 4] - seed: !!binary | - mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [11, 8, 12, 9, 2, 1, 10, 4, 13, 5, 7, 3, 6] - seed: !!binary | - mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 2, 65, 1, 2, 6, 5, 1, 6, 4] - seed: !!binary | - mHQhqYMuvHeBZNa6VLLs3lZiqD1jWKP0cRzntvM5N+U= -- input: [] - output: [] - seed: !!binary | - /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= -- input: [0] - output: [0] - seed: !!binary | - /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= -- input: [255] - output: [255] - seed: !!binary | - /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [2, 5, 1, 6, 1, 2, 6, 6, 4, 4] - seed: !!binary | - /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [5, 13, 9, 7, 11, 10, 12, 2, 6, 8, 3, 1, 4] - seed: !!binary | - /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [2, 5, 1, 6, 1, 2, 6, 6, 65, 4] - seed: !!binary | - /1mYGDx96+OeS3HV1avIeE/pPQROpuhSjAj+t+H59DE= From 8ae3c9adc17253df9e0d7a8f91ebcfd2fffed6f3 Mon Sep 17 00:00:00 2001 From: Age Date: Wed, 3 Oct 2018 13:51:05 +1000 Subject: [PATCH 04/14] small clippy fixes --- lighthouse/client/mod.rs | 6 +++--- lighthouse/sync/network.rs | 1 - lighthouse/sync/wire_protocol.rs | 5 ++++- network-libp2p/src/service.rs | 6 +++--- network-libp2p/src/state.rs | 26 +++++++++++++------------- 5 files changed, 23 insertions(+), 21 deletions(-) diff --git a/lighthouse/client/mod.rs b/lighthouse/client/mod.rs index 8c65da1a5..4f8a8377a 100644 --- a/lighthouse/client/mod.rs +++ b/lighthouse/client/mod.rs @@ -38,7 +38,7 @@ impl Client { // Start the network thread let network_state = NetworkState::new( &config.data_dir, - &config.p2p_listen_port, + config.p2p_listen_port, &log).expect("Network setup failed"); let (network_thread, network_tx, network_rx) = { let (message_sender, message_receiver) = unbounded(); let (event_sender, event_receiver) = unbounded(); @@ -46,9 +46,9 @@ impl Client { let thread = thread::spawn(move || { network_listen( network_state, - event_sender, + &event_sender, message_receiver, - network_log, + &network_log, ); }); (thread, message_sender, event_receiver) diff --git a/lighthouse/sync/network.rs b/lighthouse/sync/network.rs index 1204a2093..45035c84d 100644 --- a/lighthouse/sync/network.rs +++ b/lighthouse/sync/network.rs @@ -70,7 +70,6 @@ fn handle_network_message( ); Ok(()) } - _ => Ok(()) } } Err(_) => { diff --git a/lighthouse/sync/wire_protocol.rs b/lighthouse/sync/wire_protocol.rs index 8f9c8bd57..e5dd75d30 100644 --- a/lighthouse/sync/wire_protocol.rs +++ b/lighthouse/sync/wire_protocol.rs @@ -4,13 +4,16 @@ pub enum WireMessageDecodeError { } pub enum WireMessageHeader { + Blocks, + /* + // Leave out until used Status, NewBlockHashes, GetBlockHashes, BlockHashes, GetBlocks, - Blocks, NewBlock, + */ } pub struct WireMessage<'a> { diff --git a/network-libp2p/src/service.rs b/network-libp2p/src/service.rs index 5994ef0d5..f9b062f85 100644 --- a/network-libp2p/src/service.rs +++ b/network-libp2p/src/service.rs @@ -37,9 +37,9 @@ use self::bytes::Bytes; pub use self::libp2p_floodsub::Message; pub fn listen(state: NetworkState, - events_to_app: UnboundedSender, + events_to_app: &UnboundedSender, raw_rx: UnboundedReceiver, - log: Logger) + log: &Logger) { let peer_store = state.peer_store; let peer_id = state.peer_id; @@ -83,7 +83,7 @@ pub fn listen(state: NetworkState, let kad_config = libp2p_kad::KademliaConfig { parallelism: 3, record_store: (), - peer_store: peer_store, + peer_store, local_peer_id: peer_id.clone(), timeout: Duration::from_secs(2) }; diff --git a/network-libp2p/src/state.rs b/network-libp2p/src/state.rs index 713def77c..e45741fe6 100644 --- a/network-libp2p/src/state.rs +++ b/network-libp2p/src/state.rs @@ -31,17 +31,17 @@ pub struct NetworkState { } impl NetworkState { - /// Create a new libp2p network state. Used to initialize + /// Create a new libp2p network state. Used to initialize /// network service. pub fn new( - // config: LighthouseConfig, + // config: LighthouseConfig, base_dir: &Path, - listen_port: &u16, - log: &Logger) + listen_port: u16, + log: &Logger) -> Result > { let curve = Secp256k1::new(); - let seckey = match + let seckey = match NetworkState::load_secret_key_from_pem_file(base_dir, &curve) { Ok(k) => k, @@ -71,23 +71,23 @@ impl NetworkState { /// Return a TCP multiaddress on 0.0.0.0 for a given port. pub fn multiaddr_on_port(port: &str) -> Multiaddr { - return format!("/ip4/0.0.0.0/tcp/{}", port) + format!("/ip4/0.0.0.0/tcp/{}", port) .parse::().unwrap() } pub fn add_peer(&mut self, - peer_id: PeerId, + peer_id: &PeerId, multiaddr: Multiaddr, duration_secs: u64) { self.peer_store.peer_or_create(&peer_id) .add_addr(multiaddr, Duration::from_secs(duration_secs)); } - /// Instantiate a SecretKey from a .pem file on disk. + /// Instantiate a SecretKey from a .pem file on disk. pub fn load_secret_key_from_pem_file( base_dir: &Path, curve: &Secp256k1) - -> Result> + -> Result> { let path = base_dir.join(LOCAL_PEM_FILE); let mut contents = String::new(); @@ -97,12 +97,12 @@ impl NetworkState { let key = SecretKey::from_slice(curve, &pem_key.contents)?; Ok(key) } - - /// Generate a new SecretKey and store it on disk as a .pem file. + + /// Generate a new SecretKey and store it on disk as a .pem file. pub fn generate_new_secret_key( base_dir: &Path, curve: &Secp256k1) - -> Result> + -> Result> { let mut rng = rand::thread_rng(); let sk = SecretKey::new(&curve, &mut rng); @@ -113,7 +113,7 @@ impl NetworkState { let s_string = pem::encode(&pem_key); let path = base_dir.join(LOCAL_PEM_FILE); let mut s_file = File::create(path)?; - s_file.write(s_string.as_bytes())?; + s_file.write_all(s_string.as_bytes())?; Ok(sk) } } From 95213609e8899ed7ee6a6798ed8d68c9d6ee9ddc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 3 Oct 2018 18:37:28 +1000 Subject: [PATCH 05/14] Re-write readme --- README.md | 274 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 230 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index a3e82b162..ef3052c20 100644 --- a/README.md +++ b/README.md @@ -1,59 +1,245 @@ -# Lighthouse: a (future) Ethereum 2.0 client +# Lighthouse: an Ethereum 2.0 client [![Build Status](https://travis-ci.org/sigp/lighthouse.svg?branch=master)](https://travis-ci.org/sigp/lighthouse) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sigp/lighthouse?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) -A **work-in-progress** implementation of the Ethereum 2.0 Beacon Chain in Rust. +A work-in-progress, open-source implementation of the Ethereum 2.0 Beacon Chain, maintained +by Sigma Prime. -It is an implementation of the [Full PoS Casper chain -v2](https://notes.ethereum.org/SCIg8AH5SA-O4C1G1LYZHQ?view) spec and is also -largely based upon the -[ethereum/beacon_chain](https://github.com/ethereum/beacon_chain) repo. +## Introduction -**NOTE: the cryptography libraries used in this implementation are very +Lighthouse is an open-source Ethereum 2.0 client, in development. It is an +Ethereum 2.0-_only_ client, meaning it won't re-implement the present +proof-of-work protocol. This will help it stay focussed on 2.0 without +reproducing the work that other clients are already doing very well. + +This readme is split into two major sections: + +- [Lighthouse Client](#lighthouse-client): information about this + implemenation. +- [What is Ethereum 2.0](#what-is-ethereum-20): an introduction to Ethereum 2.0. + +If you'd like some background on Sigma Prime, please see the [Lighthouse Update +\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post. + +## Lighthouse Client + + +### Goals + +We aim to contribute to the research and development of a secure, efficient and +decentralised Ethereum protocol through the development of an open-source +Ethereum 2.0 client. + +We aim to provide a secure and efficient open-source client. As well as +building an implementation, we seek to help maintain and improve the protocol +wherever possible. + +### Components + +Lighthouse is presently focussed on the Beacon Chain implementation. Here are +some of the components actively under development by the team: + +- **BLS cryptography**: we presently use the [Apache + Milagro](https://milagro.apache.org/) cryptography library to create and +verify BLS aggregate signatures. These signatures are core to Eth 2.0 and allow +the signatures of many validators to be compressed into a constant 96 bytes and +verified efficiently. We're presently maintaining our own [BLS aggregates +library](https://github.com/sigp/signature-schemes), gratefully forked from +@lovesh. +- **DoS-resistant block pre-processing**: processing blocks in proof-of-stake + is more resource intensive than proof-of-work. As such, clients need to +ensure that bad blocks can be rejected as efficiently as possible. We can +presently process a block with 10 million ETH staked in 0.006 seconds and +reject valid blocks even quicker. See the +[issue](https://github.com/ethereum/beacon_chain/issues/103) on +[ethereum/beacon_chain](https://github.com/ethereum/beacon_chain) +. +- **P2P networking**: Eth 2.0 is likely use the [libp2p + framework](https://libp2p.io/), lighthouse hopes to work alongside +[Parity](https://www.parity.io/) to get +[libp2p-rust](https://github.com/libp2p/rust-libp2p) fit-for-purpose. +- **Validator duties** : providing "validator" services to users who wish to + stake ETH. This involves maintaining a consistent view of the chain and +voting upon shard and beacon chain blocks. +- **New serialization formats**: lighthouse is working alongside the EF + researchers to develop "simpleserialize" a purpose-built serialization format +for sending information across the network. Check our our [SSZ +implementation](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) +and our +[research](https://github.com/sigp/serialization_sandbox/blob/report/report/serialization_report.md) +on serialization formats. +- **Casper FFG fork-choice**: the [Casper + FFG](https://arxiv.org/abs/1710.09437) fork-choice rules allow the chain to +select a canonical chain in the case of a fork. +- **Efficient state transition logic**: "state transition" logic deals with + updating the validator set as valiators log in/out, penalising/rewarding +validators, rotating validators across shards, and many other core tasks. +- **Fuzzing and testing environments**: we preparing to implement lab + environments with CI work-flows to provide automated security analysis. + +On top of these components we're also working on database schemas, +RPC frameworks, specification development, database optimizations (e.g., +bloom-filters) and tons of other interesting stuff (at least we think so). + +### Contributing + +**Lighthouse welcomes contributors with open-arms.** + +Layer-1 infrastructure is a critical component of the ecosystem and relies +heavily on community contribution. Building Ethereum 2.0 is a huge task and we +refuse to "do an ICO" or charge licensing fees. Instead, we fund development +through grants and support from Sigma Prime. + +If you would like to learn more about Ethereum 2.0 and/or +[Rust](https://www.rust-lang.org/), we would be more than happy to on-board you +and assign you to some tasks. We aim to be as accepting and understanding as +possible; we are more than happy to up-skill contributors in exchange for their +help on the project. + +Alternatively, if you an ETH/Rust veteran we'd love to have your input. We're +always looking for the best way to do things and will always consider any +respectfully presented criticism. + +If you'd like to contribute, try having a look through the [open +issues](https://github.com/sigp/lighthouse/issues) (tip: look for the [good +first +issue](https://github.com/sigp/lighthouse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) +tag) and ping us on the [gitter](https://gitter.im/sigp/lighthouse). We need +your support! + +### Running + +**NOTE: the cryptography libraries used in this implementation are experimental and as such all cryptography should be assumed to be insecure.** -## Motivation +The code-base is still under-development and does not provide any user-facing +functionality. However, there are a lot of tests and some benchmarks if that +sort of thing interests you. -The objective of this project is to build a purely Ethereum 2.0 client from -the ground up. - -As such, the early days of Lighthouse will be very much a research effort -- it -will be evolving on the bleeding-edge of specification without requiring to -maintain prod-grade stability or backwards-compatibility for the existing PoW -chain. - -Whilst the Beacon Chain relies upon the PoW chain for block hashes, Lighthouse -will need to run alongside an existing client (e.g., Geth, Parity Ethereum), -only being able to stand by itself once the PoW chain has been deprecated. - -Lighthouse aims to assist in advancing the progress of the following Ethereum -technologies: - - - Proof-of-Stake - - Sharding - - EVM alternatives (e.g., WASM) - - Scalable, topic-based P2P networks (e.g., libp2p-gossipsub) - - Scalable signature schemes (e.g, BLS aggregates) - -## Progress - -As of 02/08/2018, there is a basic libp2p implementation alongside a series of -state objects and state transition functions. There are no syncing capabilities. - -## Usage - -You can run the tests like this: +To run the tests, use ``` -$ git clone -$ cd rust_beacon_chain -$ cargo test +$ cargo test --all ``` +To run benchmarks, use + +``` +$ cargo bench --all +``` + +Lighthouse presently runs on Rust `stable`, however benchmarks require the +`nightly` version. + +### Engineering Ethos + +Lighthouse aims to produce many small, easily-tested components, each separated +into individual crates wherever possible. + +Generally, tests can be kept in the same file, as is typical in Rust. +Integration tests should be placed in the `tests` directory in the crates root. +Particularity large (line-count) tests should be separated into another file. + +A function is not complete until it is tested. We produce tests to help +ourselves and others understand others code and to provide protection from +regression (breaking stuff accidentally). + +Each PR is to be reviewed by at-least one "core developer" (i.e., someone with +write-access to the repository). This helps to detect bugs, improve consistency +and relieves any one individual of the responsibility of an error. + +Discussion should be respectful and intellectual. Have fun, make jokes but +respect other peoples limits. + +### Directory Structure + +Here we provide an overview of the directory structure: + +- `\beacon_chain`: contains logic derived directly from the specification. + E.g., shuffling algorithms, state transition logic and structs, block +validation, BLS crypto, etc. +- `\lighthouse`: contains logic specific to this client implementation. E.g., + CLI parsing, RPC end-points, databases, etc. +- `\network-libp2p`: contains a proof-of-concept libp2p implementation. Will be + replaced once research around p2p has been finalized. + ## Contact -This repo is presently authored by Paul Hauner as a -[Sigma Prime](https://github.com/sigp) project. +The best place for discussion is the [sigp/lighthouse](https://gitter.im/sigp/lighthouse) gitter. +Ping @paulhauner or @AgeManning to get the quickest response. + + +# What is Ethereum 2.0 + +Ethereum 2.0 is the name that has been given to a new blockchain being +developed by the Ethereum Foundation and the Ethereum community. Ethereum 2.0 +consists of 1,025 proof-of-stake blockchains; the "beacon chain" and 1,024 +"shard chains". + +## Beacon Chain + +The Beacon Chain is a little different to more common blockchains like Bitcoin +and present-Ethereum in that it doesn't process "transactions", per say. +Instead, it maintains a set of bonded (staked) validators and co-ordinates them +to provide services to a static set of "sub-blockchains" (shards). These shards +then process the normal "5 ETH from A to B" transactions in _parallel_ whilst +deferring consensus to the Beacon Chain. + +Here are some of the major services that the beacon chain provides to its +shards: + +- A source of entropy, likely using a [RANDAO + VDF + scheme](https://ethresear.ch/t/minimal-vdf-randomness-beacon/3566). +- Valdidator management, including: + - Inducting and ejecting validators. + - Delegating randomly-shuffled subsets of validators to validate shards. + - Penalising and rewarding validators. +- Proof-of-stake consensus for shard chain blocks. + +## Shard Chains + +Shards can be thought of like CPU cores, they're a lane where transactions can +execute in series (one-after-another). Presently, Ethereum is single-core and +can only _fully_ process one transaction at a time. Sharding allows multiple +transactions to happen in parallel, greatly increasing the per-second +transaction capacity of Ethereum. + +Each shard is proof-of-stake and shares its validators (stakers) with the other +shards as the beacon chain rotates validators pseudo-randomly across shards. +Shards will likely be the basis of very interesting layer-2 transaction +processing schemes, however we won't get into that here. + +## The Proof-of-Work Chain + +The proof-of-work chain will hold a contract to allow addresses to deposit 32 +ETH, a BLS public key and some [other +parameters](https://github.com/ethereum/eth2.0-specs/blob/master/specs/casper_sharding_v2.1.md#pow-chain-changes) +to allow them to become Beacon Chain validators. Each Beacon Chain will +reference a PoW block hash allowing PoW clients to use the Beacon Chain as a +source of [Casper FFG finality](https://arxiv.org/abs/1710.09437), if they +desire. + +## Ethereum 2.0 Progress + +Ethereum 2.0 is not fully specified and there's no working implementation. Some +teams have demos available, however these demos indicate _progress_, not +completion. We look forward to providing user functionality once we are ready +to provide a minimum-viable user experience. + +The work-in-progress specification lives +[here](https://github.com/ethereum/eth2.0-specs/blob/master/specs/casper_sharding_v2.1.md) +in the [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs) +repository. It is still in a draft phase, however there are several teams +already implementing it. This spec is being developed by Ethereum Foundation +researchers, most notably Vitalik Buterin. There is active discussion about the +spec in the [ethereum/sharding](https://gitter.im/ethereum/sharding) gitter +channel. There is a proof-of-concept implementation in Python at +[ethereum/beacon_chain](https://github.com/ethereum/beacon_chain). + +Presently, the spec almost exclusively defines the Beacon Chain as it +is the focus of present development efforts. Progress on shard chain +specification will soon follow. + + + -The best place for discussion is probably the [ethereum/sharding -gitter](https://gitter.im/ethereum/sharding). From f765f4e08d84346a936e77859b2cb91110a9c007 Mon Sep 17 00:00:00 2001 From: mehdi Date: Wed, 3 Oct 2018 20:29:39 +1000 Subject: [PATCH 06/14] Minor typo fixes --- README.md | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index ef3052c20..2e01f5b2b 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ reproducing the work that other clients are already doing very well. This readme is split into two major sections: - [Lighthouse Client](#lighthouse-client): information about this - implemenation. + implementation. - [What is Ethereum 2.0](#what-is-ethereum-20): an introduction to Ethereum 2.0. If you'd like some background on Sigma Prime, please see the [Lighthouse Update @@ -54,7 +54,7 @@ reject valid blocks even quicker. See the [issue](https://github.com/ethereum/beacon_chain/issues/103) on [ethereum/beacon_chain](https://github.com/ethereum/beacon_chain) . -- **P2P networking**: Eth 2.0 is likely use the [libp2p +- **P2P networking**: Eth 2.0 is likely to use the [libp2p framework](https://libp2p.io/), lighthouse hopes to work alongside [Parity](https://www.parity.io/) to get [libp2p-rust](https://github.com/libp2p/rust-libp2p) fit-for-purpose. @@ -72,9 +72,9 @@ on serialization formats. FFG](https://arxiv.org/abs/1710.09437) fork-choice rules allow the chain to select a canonical chain in the case of a fork. - **Efficient state transition logic**: "state transition" logic deals with - updating the validator set as valiators log in/out, penalising/rewarding + updating the validator set as validators log in/out, penalising/rewarding validators, rotating validators across shards, and many other core tasks. -- **Fuzzing and testing environments**: we preparing to implement lab +- **Fuzzing and testing environments**: we are preparing to implement lab environments with CI work-flows to provide automated security analysis. On top of these components we're also working on database schemas, @@ -149,7 +149,7 @@ write-access to the repository). This helps to detect bugs, improve consistency and relieves any one individual of the responsibility of an error. Discussion should be respectful and intellectual. Have fun, make jokes but -respect other peoples limits. +respect other people's limits. ### Directory Structure @@ -190,7 +190,7 @@ shards: - A source of entropy, likely using a [RANDAO + VDF scheme](https://ethresear.ch/t/minimal-vdf-randomness-beacon/3566). -- Valdidator management, including: +- Validator management, including: - Inducting and ejecting validators. - Delegating randomly-shuffled subsets of validators to validate shards. - Penalising and rewarding validators. @@ -239,7 +239,3 @@ channel. There is a proof-of-concept implementation in Python at Presently, the spec almost exclusively defines the Beacon Chain as it is the focus of present development efforts. Progress on shard chain specification will soon follow. - - - - From dde6353efdb9b48f22914bbb076c72fb755d4ae7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 4 Oct 2018 10:11:02 +1000 Subject: [PATCH 07/14] Implement @peanutFactory's comments --- README.md | 135 ++++++++++++++++++++++++++---------------------------- 1 file changed, 65 insertions(+), 70 deletions(-) diff --git a/README.md b/README.md index ef3052c20..c5c5809b2 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,11 @@ by Sigma Prime. ## Introduction -Lighthouse is an open-source Ethereum 2.0 client, in development. It is an -Ethereum 2.0-_only_ client, meaning it won't re-implement the present -proof-of-work protocol. This will help it stay focussed on 2.0 without -reproducing the work that other clients are already doing very well. +Lighthouse is an open-source Ethereum 2.0 client, in development. Designed as +an Ethereum 2.0-only client, Lighthouse will not re-implement the existing +proof-of-work protocol. Maintaining a forward-focus on Ethereum 2.0 ensures +that Lighthouse will avoid reproducing the high-quality work already undertaken +by existing clients. This readme is split into two major sections: @@ -30,40 +31,40 @@ We aim to contribute to the research and development of a secure, efficient and decentralised Ethereum protocol through the development of an open-source Ethereum 2.0 client. -We aim to provide a secure and efficient open-source client. As well as -building an implementation, we seek to help maintain and improve the protocol -wherever possible. +In addition to building an implementation, we seek to help maintain and improve +the protocol wherever possible. ### Components -Lighthouse is presently focussed on the Beacon Chain implementation. Here are -some of the components actively under development by the team: +The following list describes some of the components actively under development +by the team: - **BLS cryptography**: we presently use the [Apache Milagro](https://milagro.apache.org/) cryptography library to create and -verify BLS aggregate signatures. These signatures are core to Eth 2.0 and allow -the signatures of many validators to be compressed into a constant 96 bytes and -verified efficiently. We're presently maintaining our own [BLS aggregates -library](https://github.com/sigp/signature-schemes), gratefully forked from -@lovesh. +verify BLS aggregate signatures. BLS signatures are core to Eth 2.0 as they +allow the signatures of many validators to be compressed into a constant 96 +bytes and verified efficiently.. We're presently maintaining our own [BLS +aggregates library](https://github.com/sigp/signature-schemes), gratefully +forked from @lovesh. - **DoS-resistant block pre-processing**: processing blocks in proof-of-stake is more resource intensive than proof-of-work. As such, clients need to ensure that bad blocks can be rejected as efficiently as possible. We can presently process a block with 10 million ETH staked in 0.006 seconds and -reject valid blocks even quicker. See the +reject invalid blocks even quicker. See the [issue](https://github.com/ethereum/beacon_chain/issues/103) on [ethereum/beacon_chain](https://github.com/ethereum/beacon_chain) . -- **P2P networking**: Eth 2.0 is likely use the [libp2p - framework](https://libp2p.io/), lighthouse hopes to work alongside +- **P2P networking**: Eth 2.0 will likely use the [libp2p + framework](https://libp2p.io/). Lighthouse aims to work alongside [Parity](https://www.parity.io/) to get [libp2p-rust](https://github.com/libp2p/rust-libp2p) fit-for-purpose. -- **Validator duties** : providing "validator" services to users who wish to - stake ETH. This involves maintaining a consistent view of the chain and -voting upon shard and beacon chain blocks. -- **New serialization formats**: lighthouse is working alongside the EF - researchers to develop "simpleserialize" a purpose-built serialization format -for sending information across the network. Check our our [SSZ +- **Validator duties** : the project involves the development of "validator" + services for users who wish to stake ETH. To fulfil their duties, validators +require a consistent view of the chain and the ability to vote upon both shard +and beacon chain blocks.. +- **New serialization formats**: lighthouse is working alongside EF researchers + to develop "simpleserialize" a purpose-built serialization format for sending +information across the network. Check out our [SSZ implementation](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) and our [research](https://github.com/sigp/serialization_sandbox/blob/report/report/serialization_report.md) @@ -71,14 +72,14 @@ on serialization formats. - **Casper FFG fork-choice**: the [Casper FFG](https://arxiv.org/abs/1710.09437) fork-choice rules allow the chain to select a canonical chain in the case of a fork. -- **Efficient state transition logic**: "state transition" logic deals with - updating the validator set as valiators log in/out, penalising/rewarding -validators, rotating validators across shards, and many other core tasks. -- **Fuzzing and testing environments**: we preparing to implement lab - environments with CI work-flows to provide automated security analysis. +- **Efficient state transition logic**: "state transition" logic governs + updates to the validator set as validators log in/out, penalises/rewards +validators, rotates validators across shards, and implements other core tasks. +- **Fuzzing and testing environments**: we are preparing to implement lab +environments with CI work-flows to provide automated security analysis.. -On top of these components we're also working on database schemas, -RPC frameworks, specification development, database optimizations (e.g., +In addition to these components we're also working on database schemas, RPC +frameworks, specification development, database optimizations (e.g., bloom-filters) and tons of other interesting stuff (at least we think so). ### Contributing @@ -97,8 +98,8 @@ possible; we are more than happy to up-skill contributors in exchange for their help on the project. Alternatively, if you an ETH/Rust veteran we'd love to have your input. We're -always looking for the best way to do things and will always consider any -respectfully presented criticism. +always looking for the best way to implement things and will consider any +respectful criticism. If you'd like to contribute, try having a look through the [open issues](https://github.com/sigp/lighthouse/issues) (tip: look for the [good @@ -113,10 +114,10 @@ your support! experimental and as such all cryptography should be assumed to be insecure.** The code-base is still under-development and does not provide any user-facing -functionality. However, there are a lot of tests and some benchmarks if that -sort of thing interests you. +functionality. For developers and researchers, there are tests and benchmarks +which could be of interest. -To run the tests, use +To run tests, use ``` $ cargo test --all @@ -128,7 +129,7 @@ To run benchmarks, use $ cargo bench --all ``` -Lighthouse presently runs on Rust `stable`, however benchmarks require the +Lighthouse presently runs on Rust `stable`, however, benchmarks require the `nightly` version. ### Engineering Ethos @@ -140,9 +141,9 @@ Generally, tests can be kept in the same file, as is typical in Rust. Integration tests should be placed in the `tests` directory in the crates root. Particularity large (line-count) tests should be separated into another file. -A function is not complete until it is tested. We produce tests to help -ourselves and others understand others code and to provide protection from -regression (breaking stuff accidentally). +A function is not complete until it is tested. We produce tests to protect +against regression (accidentally breaking things) and to help those who read +our code to understand how the function should (or shouldn't) be used. Each PR is to be reviewed by at-least one "core developer" (i.e., someone with write-access to the repository). This helps to detect bugs, improve consistency @@ -171,22 +172,21 @@ Ping @paulhauner or @AgeManning to get the quickest response. # What is Ethereum 2.0 -Ethereum 2.0 is the name that has been given to a new blockchain being -developed by the Ethereum Foundation and the Ethereum community. Ethereum 2.0 +Ethereum 2.0 refers to a new blockchain currently under development +by the Ethereum Foundation and the Ethereum community. The Ethereum 2.0 blockchain consists of 1,025 proof-of-stake blockchains; the "beacon chain" and 1,024 "shard chains". ## Beacon Chain -The Beacon Chain is a little different to more common blockchains like Bitcoin -and present-Ethereum in that it doesn't process "transactions", per say. -Instead, it maintains a set of bonded (staked) validators and co-ordinates them -to provide services to a static set of "sub-blockchains" (shards). These shards -then process the normal "5 ETH from A to B" transactions in _parallel_ whilst -deferring consensus to the Beacon Chain. +The Beacon Chain differs from existing blockchains such as Bitcoin and +Ethereum, in that it doesn't process "transactions", per say. Instead, it +maintains a set of bonded (staked) validators and co-ordinates these to provide +services to a static set of "sub-blockchains" (shards). These shards process +normal transactions, such as "5 ETH from A to B", in parallel whilst deferring +consensus to the Beacon Chain. -Here are some of the major services that the beacon chain provides to its -shards: +Major services provided by the beacon chain to its shards include the following: - A source of entropy, likely using a [RANDAO + VDF scheme](https://ethresear.ch/t/minimal-vdf-randomness-beacon/3566). @@ -198,48 +198,43 @@ shards: ## Shard Chains -Shards can be thought of like CPU cores, they're a lane where transactions can +Shards can be thought of like CPU cores - they're a lane where transactions can execute in series (one-after-another). Presently, Ethereum is single-core and can only _fully_ process one transaction at a time. Sharding allows multiple transactions to happen in parallel, greatly increasing the per-second transaction capacity of Ethereum. -Each shard is proof-of-stake and shares its validators (stakers) with the other +Each shard uses proof-of-stake and shares its validators (stakers) with the other shards as the beacon chain rotates validators pseudo-randomly across shards. Shards will likely be the basis of very interesting layer-2 transaction -processing schemes, however we won't get into that here. +processing schemes, however, we won't get into that here. ## The Proof-of-Work Chain -The proof-of-work chain will hold a contract to allow addresses to deposit 32 +The proof-of-work chain will hold a contract that allows accounts to deposit 32 ETH, a BLS public key and some [other parameters](https://github.com/ethereum/eth2.0-specs/blob/master/specs/casper_sharding_v2.1.md#pow-chain-changes) to allow them to become Beacon Chain validators. Each Beacon Chain will reference a PoW block hash allowing PoW clients to use the Beacon Chain as a -source of [Casper FFG finality](https://arxiv.org/abs/1710.09437), if they -desire. +source of [Casper FFG finality](https://arxiv.org/abs/1710.09437), if desired. ## Ethereum 2.0 Progress Ethereum 2.0 is not fully specified and there's no working implementation. Some -teams have demos available, however these demos indicate _progress_, not -completion. We look forward to providing user functionality once we are ready -to provide a minimum-viable user experience. +teams have demos available which indicate progress, but not a complete product. +We look forward to providing user functionality once we are ready to provide a +minimum-viable user experience. The work-in-progress specification lives [here](https://github.com/ethereum/eth2.0-specs/blob/master/specs/casper_sharding_v2.1.md) in the [ethereum/eth2.0-specs](https://github.com/ethereum/eth2.0-specs) -repository. It is still in a draft phase, however there are several teams -already implementing it. This spec is being developed by Ethereum Foundation -researchers, most notably Vitalik Buterin. There is active discussion about the -spec in the [ethereum/sharding](https://gitter.im/ethereum/sharding) gitter -channel. There is a proof-of-concept implementation in Python at +repository. The spec is still in a draft phase, however there are several teams +already implementing it whilst the Ethereum Foundation research team fill in +the gaps. There is active discussion about the spec in the +[ethereum/sharding](https://gitter.im/ethereum/sharding) gitter channel. A +proof-of-concept implementation in Python is available at [ethereum/beacon_chain](https://github.com/ethereum/beacon_chain). -Presently, the spec almost exclusively defines the Beacon Chain as it -is the focus of present development efforts. Progress on shard chain -specification will soon follow. - - - - +Presently, the spec almost exclusively defines the Beacon Chain, which is the +focus of present development efforts. Progress on shard chain specification +will soon follow. From 277503a559ef61fe4e22d1c2ea619c269f013748 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 4 Oct 2018 10:18:21 +1000 Subject: [PATCH 08/14] Fix missed merge conflict (Doh!) --- README.md | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/README.md b/README.md index 02f98e933..5a2637dd5 100644 --- a/README.md +++ b/README.md @@ -54,13 +54,8 @@ reject invalid blocks even quicker. See the [issue](https://github.com/ethereum/beacon_chain/issues/103) on [ethereum/beacon_chain](https://github.com/ethereum/beacon_chain) . -<<<<<<< HEAD - **P2P networking**: Eth 2.0 will likely use the [libp2p framework](https://libp2p.io/). Lighthouse aims to work alongside -======= -- **P2P networking**: Eth 2.0 is likely to use the [libp2p - framework](https://libp2p.io/), lighthouse hopes to work alongside ->>>>>>> f765f4e08d84346a936e77859b2cb91110a9c007 [Parity](https://www.parity.io/) to get [libp2p-rust](https://github.com/libp2p/rust-libp2p) fit-for-purpose. - **Validator duties** : the project involves the development of "validator" @@ -77,7 +72,6 @@ on serialization formats. - **Casper FFG fork-choice**: the [Casper FFG](https://arxiv.org/abs/1710.09437) fork-choice rules allow the chain to select a canonical chain in the case of a fork. -<<<<<<< HEAD - **Efficient state transition logic**: "state transition" logic governs updates to the validator set as validators log in/out, penalises/rewards validators, rotates validators across shards, and implements other core tasks. @@ -86,16 +80,6 @@ environments with CI work-flows to provide automated security analysis.. In addition to these components we're also working on database schemas, RPC frameworks, specification development, database optimizations (e.g., -======= -- **Efficient state transition logic**: "state transition" logic deals with - updating the validator set as validators log in/out, penalising/rewarding -validators, rotating validators across shards, and many other core tasks. -- **Fuzzing and testing environments**: we are preparing to implement lab - environments with CI work-flows to provide automated security analysis. - -On top of these components we're also working on database schemas, -RPC frameworks, specification development, database optimizations (e.g., ->>>>>>> f765f4e08d84346a936e77859b2cb91110a9c007 bloom-filters) and tons of other interesting stuff (at least we think so). ### Contributing From 1d0d6a1b0c2226a5f25ae51abc8e5d1a474b8aa8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 4 Oct 2018 10:49:28 +1000 Subject: [PATCH 09/14] Implement MZ's comments --- README.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5a2637dd5..a3d11b19b 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,10 @@ Lighthouse is an open-source Ethereum 2.0 client, in development. Designed as an Ethereum 2.0-only client, Lighthouse will not re-implement the existing proof-of-work protocol. Maintaining a forward-focus on Ethereum 2.0 ensures that Lighthouse will avoid reproducing the high-quality work already undertaken -by existing clients. +by existing clients. For present-Ethereum functionality, Lighthouse will +connect to existing clients like +[Geth](https://github.com/ethereum/go-ethereum) or +[Parity-Ethereum](https://github.com/paritytech/parity-ethereum) via RPC. This readme is split into two major sections: @@ -20,7 +23,8 @@ This readme is split into two major sections: - [What is Ethereum 2.0](#what-is-ethereum-20): an introduction to Ethereum 2.0. If you'd like some background on Sigma Prime, please see the [Lighthouse Update -\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post. +\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or our +[website](https://sigmaprime.io). ## Lighthouse Client @@ -88,8 +92,8 @@ bloom-filters) and tons of other interesting stuff (at least we think so). Layer-1 infrastructure is a critical component of the ecosystem and relies heavily on community contribution. Building Ethereum 2.0 is a huge task and we -refuse to "do an ICO" or charge licensing fees. Instead, we fund development -through grants and support from Sigma Prime. +refuse to conduct an inappropriate ICO or charge licensing fees. Instead, we +fund development through grants and support from Sigma Prime. If you would like to learn more about Ethereum 2.0 and/or [Rust](https://www.rust-lang.org/), we would be more than happy to on-board you @@ -218,6 +222,10 @@ to allow them to become Beacon Chain validators. Each Beacon Chain will reference a PoW block hash allowing PoW clients to use the Beacon Chain as a source of [Casper FFG finality](https://arxiv.org/abs/1710.09437), if desired. +It is a requirement that ETH can move freely between shard chains and between +Eth 2.0 and present-Ethereum. The exact mechanics of these transfers are still +a topic of research and their details are yet to be confirmed. + ## Ethereum 2.0 Progress Ethereum 2.0 is not fully specified and there's no working implementation. Some From 175c19d8bf56292deebaa467f142c6af31f6120c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 4 Oct 2018 10:54:59 +1000 Subject: [PATCH 10/14] Update readme --- README.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index a3d11b19b..01ce24b59 100644 --- a/README.md +++ b/README.md @@ -7,15 +7,6 @@ by Sigma Prime. ## Introduction -Lighthouse is an open-source Ethereum 2.0 client, in development. Designed as -an Ethereum 2.0-only client, Lighthouse will not re-implement the existing -proof-of-work protocol. Maintaining a forward-focus on Ethereum 2.0 ensures -that Lighthouse will avoid reproducing the high-quality work already undertaken -by existing clients. For present-Ethereum functionality, Lighthouse will -connect to existing clients like -[Geth](https://github.com/ethereum/go-ethereum) or -[Parity-Ethereum](https://github.com/paritytech/parity-ethereum) via RPC. - This readme is split into two major sections: - [Lighthouse Client](#lighthouse-client): information about this @@ -28,6 +19,14 @@ If you'd like some background on Sigma Prime, please see the [Lighthouse Update ## Lighthouse Client +Lighthouse is an open-source Ethereum 2.0 client, in development. Designed as +an Ethereum 2.0-only client, Lighthouse will not re-implement the existing +proof-of-work protocol. Maintaining a forward-focus on Ethereum 2.0 ensures +that Lighthouse will avoid reproducing the high-quality work already undertaken +by existing clients. For present-Ethereum functionality, Lighthouse will +connect to existing clients like +[Geth](https://github.com/ethereum/go-ethereum) or +[Parity-Ethereum](https://github.com/paritytech/parity-ethereum) via RPC. ### Goals From fd0de57aa729646f6eff674ae78786efc91702e6 Mon Sep 17 00:00:00 2001 From: Age Date: Thu, 4 Oct 2018 14:51:27 +1000 Subject: [PATCH 11/14] Remove dead code --- beacon_chain/types/src/lib.rs | 8 -------- beacon_chain/types/src/mod.rs | 20 -------------------- 2 files changed, 28 deletions(-) delete mode 100644 beacon_chain/types/src/mod.rs diff --git a/beacon_chain/types/src/lib.rs b/beacon_chain/types/src/lib.rs index 683560ab0..4b2d91f9d 100644 --- a/beacon_chain/types/src/lib.rs +++ b/beacon_chain/types/src/lib.rs @@ -39,11 +39,3 @@ pub type AttesterMap = HashMap<(u64, u16), Vec>; /// Maps a slot to a block proposer. pub type ProposerMap = HashMap; - -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} diff --git a/beacon_chain/types/src/mod.rs b/beacon_chain/types/src/mod.rs deleted file mode 100644 index b49ab52cf..000000000 --- a/beacon_chain/types/src/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -extern crate rlp; -extern crate ethereum_types; -extern crate blake2_rfc as blake2; -extern crate bytes; -extern crate ssz; - -mod common; - -pub mod active_state; -pub mod attestation_record; -pub mod crystallized_state; -pub mod chain_config; -pub mod block; -pub mod crosslink_record; -pub mod shard_and_committee; -pub mod validator_record; - -use super::bls; -use super::db; -use super::utils; From 2770cb4490d03c2585bff554f67f34ead156694c Mon Sep 17 00:00:00 2001 From: John Omar Date: Fri, 5 Oct 2018 17:19:16 -0700 Subject: [PATCH 12/14] create onboarding.md Added some reading material for people new to Rust and/or Ethereum. My goal is to expand the onboarding docs to include specific guides on getting started on Lighthouse. --- docs/onboarding.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 docs/onboarding.md diff --git a/docs/onboarding.md b/docs/onboarding.md new file mode 100644 index 000000000..2600770a7 --- /dev/null +++ b/docs/onboarding.md @@ -0,0 +1,33 @@ +# Learn how to contribute to ETH 2.0! + +Lighthouse is an Ethereum 2.0 client built in Rust. + +If you are interested in contributing to the Ethereum ecosystem, and you want to learn Rust, Lighthouse is a great project to work on. + +Initially this wiki will contain reading material to help get you started in Rust and Ethereum. Eventually it will have guides specific to Lighthouse. + +## Learn Rust + +* [The Rust Programming Language](https://doc.rust-lang.org/book/2018-edition/index.html) + +## Learn Ethereum + +#### General Ethereum Resources +* [What is Ethereum](http://ethdocs.org/en/latest/introduction/what-is-ethereum.html) +* [Ethereum Introduction](https://github.com/ethereum/wiki/wiki/Ethereum-introduction) + +#### Ethereum 2.0 +* [Ethereum 2.0 Spec - Casper and Sharding](https://github.com/ethereum/eth2.0-specs/blob/master/specs/beacon-chain.md) + +#### Sharding + +* [How to Scale Ethereum: Sharding Explained](https://medium.com/prysmatic-labs/how-to-scale-ethereum-sharding-explained-ba2e283b7fce) + +#### Casper + +* [Proof of Stake - Casper FFG](https://www.youtube.com/watch?v=uQ3IqLDf-oo) +* [Beacon Casper Chain](https://www.youtube.com/watch?v=GAywmwGToUI) + +### TODO +- add reading material as we discover. +- start developing guides specific to lighthouse. From a49143f1f24a1fac97c7a147828aeed85c63cc7c Mon Sep 17 00:00:00 2001 From: John Omar Date: Fri, 5 Oct 2018 17:20:53 -0700 Subject: [PATCH 13/14] Typo on word usage --- docs/onboarding.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/onboarding.md b/docs/onboarding.md index 2600770a7..47fb140f1 100644 --- a/docs/onboarding.md +++ b/docs/onboarding.md @@ -4,7 +4,7 @@ Lighthouse is an Ethereum 2.0 client built in Rust. If you are interested in contributing to the Ethereum ecosystem, and you want to learn Rust, Lighthouse is a great project to work on. -Initially this wiki will contain reading material to help get you started in Rust and Ethereum. Eventually it will have guides specific to Lighthouse. +Initially this doc will contain reading material to help get you started in Rust and Ethereum. Eventually it will have guides specific to Lighthouse. ## Learn Rust From a65531ba950e41f5262222f4fd965778da81e321 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 9 Oct 2018 13:36:54 +1100 Subject: [PATCH 14/14] Remove all libp2p and syncing code This will all need to be rebuilt in the future. This code will be available at a "legacy_libp2p" branch. --- Cargo.toml | 1 - lighthouse/client/mod.rs | 83 -------- lighthouse/main.rs | 8 +- lighthouse/sync/mod.rs | 12 -- lighthouse/sync/network.rs | 86 --------- lighthouse/sync/sync_future.rs | 48 ----- lighthouse/sync/wire_protocol.rs | 92 --------- network-libp2p/Cargo.toml | 24 --- network-libp2p/README.md | 7 - network-libp2p/src/lib.rs | 10 - network-libp2p/src/message.rs | 18 -- network-libp2p/src/service.rs | 315 ------------------------------- network-libp2p/src/state.rs | 119 ------------ 13 files changed, 2 insertions(+), 821 deletions(-) delete mode 100644 lighthouse/client/mod.rs delete mode 100644 lighthouse/sync/mod.rs delete mode 100644 lighthouse/sync/network.rs delete mode 100644 lighthouse/sync/sync_future.rs delete mode 100644 lighthouse/sync/wire_protocol.rs delete mode 100644 network-libp2p/Cargo.toml delete mode 100644 network-libp2p/README.md delete mode 100644 network-libp2p/src/lib.rs delete mode 100644 network-libp2p/src/message.rs delete mode 100644 network-libp2p/src/service.rs delete mode 100644 network-libp2p/src/state.rs diff --git a/Cargo.toml b/Cargo.toml index de69506de..fa6728a29 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,6 @@ clap = "2.32.0" db = { path = "lighthouse/db" } dirs = "1.0.3" futures = "0.1.23" -network-libp2p = { path = "network-libp2p" } rand = "0.3" rlp = { git = "https://github.com/paritytech/parity-common" } slog = "^2.2.3" diff --git a/lighthouse/client/mod.rs b/lighthouse/client/mod.rs deleted file mode 100644 index 4f8a8377a..000000000 --- a/lighthouse/client/mod.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::sync::Arc; -use std::thread; -use super::db::{ DiskDB }; -use super::config::LighthouseConfig; -use super::futures::sync::mpsc::{ - unbounded, -}; -use super::network_libp2p::service::listen as network_listen; -use super::network_libp2p::state::NetworkState; -use super::slog::Logger; -use super::sync::run_sync_future; - -use super::db::ClientDB; - -/// Represents the co-ordination of the -/// networking, syncing and RPC (not-yet-implemented) threads. -pub struct Client { - pub db: Arc, - pub network_thread: thread::JoinHandle<()>, - pub sync_thread: thread::JoinHandle<()>, -} - -impl Client { - /// Instantiates a new "Client". - /// - /// Presently, this means starting network and sync threads - /// and plumbing them together. - pub fn new(config: &LighthouseConfig, - log: &Logger) - -> Self - { - // Open the local db - let db = { - let db = DiskDB::open(&config.data_dir, None); - Arc::new(db) - }; - - // Start the network thread - let network_state = NetworkState::new( - &config.data_dir, - config.p2p_listen_port, - &log).expect("Network setup failed"); let (network_thread, network_tx, network_rx) = { - let (message_sender, message_receiver) = unbounded(); - let (event_sender, event_receiver) = unbounded(); - let network_log = log.new(o!()); - let thread = thread::spawn(move || { - network_listen( - network_state, - &event_sender, - message_receiver, - &network_log, - ); - }); - (thread, message_sender, event_receiver) - }; - - // Start the sync thread - let (sync_thread, _sync_tx, _sync_rx) = { - let (sync_out_sender, sync_out_receiver) = unbounded(); - let (sync_in_sender, sync_in_receiver) = unbounded(); - let sync_log = log.new(o!()); - let sync_db = db.clone(); - let thread = thread::spawn(move || { - run_sync_future( - sync_db, - network_tx.clone(), - network_rx, - &sync_out_sender, - &sync_in_receiver, - sync_log, - ); - }); - (thread, sync_in_sender, sync_out_receiver) - }; - - // Return the client struct - Self { - db, - network_thread, - sync_thread, - } - } -} diff --git a/lighthouse/main.rs b/lighthouse/main.rs index ee90649f5..9cfbe45e4 100644 --- a/lighthouse/main.rs +++ b/lighthouse/main.rs @@ -4,13 +4,10 @@ extern crate slog_term; extern crate slog_async; // extern crate ssz; extern crate clap; -extern crate network_libp2p; extern crate futures; extern crate db; -mod client; -mod sync; mod config; use std::path::PathBuf; @@ -18,7 +15,6 @@ use std::path::PathBuf; use slog::Drain; use clap::{ Arg, App }; use config::LighthouseConfig; -use client::Client; fn main() { let decorator = slog_term::TermDecorator::new().build(); @@ -64,8 +60,8 @@ fn main() { "data_dir" => &config.data_dir.to_str(), "port" => &config.p2p_listen_port); - let client = Client::new(&config, &log); - client.sync_thread.join().unwrap(); + error!(log, + "Lighthouse under development and does not provide a user demo."); info!(log, "Exiting."); } diff --git a/lighthouse/sync/mod.rs b/lighthouse/sync/mod.rs deleted file mode 100644 index 6e1f0be11..000000000 --- a/lighthouse/sync/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -extern crate futures; -extern crate slog; -extern crate tokio; -extern crate network_libp2p; - -pub mod network; -pub mod sync_future; -pub mod wire_protocol; - -pub use self::sync_future::run_sync_future; - -use super::db; diff --git a/lighthouse/sync/network.rs b/lighthouse/sync/network.rs deleted file mode 100644 index 45035c84d..000000000 --- a/lighthouse/sync/network.rs +++ /dev/null @@ -1,86 +0,0 @@ -use std::sync::Arc; -use super::db::ClientDB; -use slog::Logger; - -use super::network_libp2p::message::{ - NetworkEvent, - OutgoingMessage, - NetworkEventType, -}; - -use super::wire_protocol::{ - WireMessage, - WireMessageHeader, -}; - -use super::futures::sync::mpsc::{ - UnboundedSender, -}; - -/// Accept a network event and perform all required processing. -/// -/// This function should be called whenever an underlying network -/// (e.g., libp2p) has an event to push up to the sync process. -pub fn handle_network_event( - event: NetworkEvent, - db: &Arc, - network_tx: &UnboundedSender, - log: &Logger) - -> Result<(), ()> -{ - debug!(&log, ""; - "network_event" => format!("{:?}", &event)); - match event.event { - NetworkEventType::PeerConnect => Ok(()), - NetworkEventType::PeerDrop => Ok(()), - NetworkEventType::Message => { - if let Some(data) = event.data { - handle_network_message( - &data, - &db, - &network_tx, - &log) - } else { - Ok(()) - } - } - } -} - -/// Accept a message from the network and perform all required -/// processing. -/// -/// This function should be called whenever a peer from a network -/// (e.g., libp2p) has sent a message to us. -fn handle_network_message( - message: &[u8], - db: &Arc, - _network_tx: &UnboundedSender, - log: &Logger) - -> Result<(), ()> -{ - match WireMessage::decode(&message) { - Ok(msg) => { - match msg.header { - WireMessageHeader::Blocks => { - process_unverified_blocks( - msg.body, - &db, - &log - ); - Ok(()) - } - } - } - Err(_) => { - Ok(()) // No need to pass the error back - } - } -} - -fn process_unverified_blocks(_message: &[u8], - _db: &Arc, - _log: &Logger) -{ - // -} diff --git a/lighthouse/sync/sync_future.rs b/lighthouse/sync/sync_future.rs deleted file mode 100644 index 31cc933ca..000000000 --- a/lighthouse/sync/sync_future.rs +++ /dev/null @@ -1,48 +0,0 @@ -use super::tokio; -use super::futures::{ Future, Stream }; -use super::futures::sync::mpsc::{ - UnboundedReceiver, - UnboundedSender, -}; -use super::network_libp2p::message::{ - NetworkEvent, - OutgoingMessage, -}; -use super::network::handle_network_event; -use std::sync::Arc; -use super::db::ClientDB; -use slog::Logger; - -type NetworkSender = UnboundedSender; -type NetworkReceiver = UnboundedReceiver; - -type SyncSender = UnboundedSender>; -type SyncReceiver = UnboundedReceiver>; - -/// Start a syncing tokio future. -/// -/// Uses green-threading to process messages -/// from the network and the RPC and update -/// the state. -pub fn run_sync_future( - db: Arc, - network_tx: NetworkSender, - network_rx: NetworkReceiver, - _sync_tx: &SyncSender, - _sync_rx: &SyncReceiver, - log: Logger) -{ - let network_future = { - network_rx - .for_each(move |event| { - handle_network_event( - event, - &db.clone(), - &network_tx.clone(), - &log.clone()) - }) - .map_err(|_| panic!("rx failed")) - }; - - tokio::run(network_future); -} diff --git a/lighthouse/sync/wire_protocol.rs b/lighthouse/sync/wire_protocol.rs deleted file mode 100644 index e5dd75d30..000000000 --- a/lighthouse/sync/wire_protocol.rs +++ /dev/null @@ -1,92 +0,0 @@ -pub enum WireMessageDecodeError { - TooShort, - UnknownType, -} - -pub enum WireMessageHeader { - Blocks, - /* - // Leave out until used - Status, - NewBlockHashes, - GetBlockHashes, - BlockHashes, - GetBlocks, - NewBlock, - */ -} - -pub struct WireMessage<'a> { - pub header: WireMessageHeader, - pub body: &'a [u8], -} - -impl<'a> WireMessage<'a> { - pub fn decode(bytes: &'a [u8]) - -> Result - { - if let Some((header_byte, body)) = bytes.split_first() { - let header = match header_byte { - 0x06 => Some(WireMessageHeader::Blocks), - _ => None - }; - match header { - Some(header) => Ok(Self{header, body}), - None => Err(WireMessageDecodeError::UnknownType) - } - } else { - Err(WireMessageDecodeError::TooShort) - } - } -} - -/* -pub fn decode_wire_message(bytes: &[u8]) - -> Result -{ - if let Some((header_byte, body)) = bytes.split_first() { - let header = match header_byte { - 0x06 => Some(WireMessageType::Blocks), - _ => None - }; - match header { - Some(header) => Ok((header, body)), - None => Err(WireMessageDecodeError::UnknownType) - } - } else { - Err(WireMessageDecodeError::TooShort) - } -} - - -/// Determines the message type of some given -/// message. -/// -/// Does not check the validity of the message data, -/// it just reads the first byte. -pub fn message_type(message: &Vec) - -> Option -{ - match message.get(0) { - Some(0x06) => Some(WireMessageType::Blocks), - _ => None - } -} - -pub fn identify_wire_protocol_message(message: &Vec) - -> Result<(WireMessageType, &[u8]), WireMessageDecodeError> -{ - fn strip_header(v: &Vec) -> &[u8] { - match v.get(1..v.len()) { - None => &vec![], - Some(s) => s - } - } - - match message.get(0) { - Some(0x06) => Ok((WireMessageType::Blocks, strip_header(message))), - None => Err(WireMessageDecodeError::TooShort), - _ => Err(WireMessageDecodeError::UnknownType), - } -} -*/ diff --git a/network-libp2p/Cargo.toml b/network-libp2p/Cargo.toml deleted file mode 100644 index de8077ac6..000000000 --- a/network-libp2p/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "network-libp2p" -version = "0.1.0" -authors = ["Paul Hauner "] - -[dependencies] -bigint = "4.2" -bytes = "" -eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } -futures = "0.1.23" -libp2p-peerstore = { git = "https://github.com/sigp/libp2p-rs", branch ="zksummit" } -libp2p-core = { git = "https://github.com/sigp/libp2p-rs", branch ="zksummit" } -libp2p-mplex = { git = "https://github.com/sigp/libp2p-rs", branch ="zksummit" } -libp2p-tcp-transport = { git = "https://github.com/sigp/libp2p-rs", branch ="zksummit" } -libp2p-floodsub = { git = "https://github.com/sigp/libp2p-rs", branch ="zksummit" } -libp2p-identify = { git = "https://github.com/sigp/libp2p-rs", branch ="zksummit" } -libp2p-kad = { git = "https://github.com/sigp/libp2p-rs", branch ="zksummit" } -pem = "0.5.0" -rand = "0.3" -slog = "^2.2.3" -tokio-core = "0.1" -tokio-io = "0.1" -tokio-stdin = "0.1" -tokio-timer = "0.1" diff --git a/network-libp2p/README.md b/network-libp2p/README.md deleted file mode 100644 index dd3c68997..000000000 --- a/network-libp2p/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# libp2p Network - -This is a fairly scrappy implementation of libp2p floodsub for the following -reasons: - - - There is not presently a gossip-sub implementation for Rust libp2p. - - The networking layer for the beacon_chain is not yet finalized. diff --git a/network-libp2p/src/lib.rs b/network-libp2p/src/lib.rs deleted file mode 100644 index bbec78c08..000000000 --- a/network-libp2p/src/lib.rs +++ /dev/null @@ -1,10 +0,0 @@ -extern crate libp2p_core; -extern crate libp2p_peerstore; -extern crate pem; -extern crate secp256k1; -#[macro_use] -extern crate slog; - -pub mod message; -pub mod service; -pub mod state; diff --git a/network-libp2p/src/message.rs b/network-libp2p/src/message.rs deleted file mode 100644 index aa3ef54f0..000000000 --- a/network-libp2p/src/message.rs +++ /dev/null @@ -1,18 +0,0 @@ -#[derive(Debug)] -pub enum NetworkEventType { - PeerConnect, - PeerDrop, - Message, -} - -#[derive(Debug)] -pub struct NetworkEvent { - pub event: NetworkEventType, - pub data: Option>, -} - -#[derive(Debug)] -pub struct OutgoingMessage { - pub peer: Option, - pub data: Vec, -} diff --git a/network-libp2p/src/service.rs b/network-libp2p/src/service.rs deleted file mode 100644 index f9b062f85..000000000 --- a/network-libp2p/src/service.rs +++ /dev/null @@ -1,315 +0,0 @@ -extern crate bigint; -extern crate bytes; -extern crate futures; -extern crate libp2p_peerstore; -extern crate libp2p_floodsub; -extern crate libp2p_identify; -extern crate libp2p_core; -extern crate libp2p_mplex; -extern crate libp2p_tcp_transport; -extern crate libp2p_kad; -extern crate slog; -extern crate tokio_core; -extern crate tokio_io; -extern crate tokio_timer; -extern crate tokio_stdin; - -use super::state::NetworkState; -use super::message::{ NetworkEvent, NetworkEventType, OutgoingMessage }; -use self::bigint::U512; -use self::futures::{ Future, Stream, Poll }; -use self::futures::sync::mpsc::{ - UnboundedSender, UnboundedReceiver -}; -use self::libp2p_core::{ AddrComponent, Endpoint, Multiaddr, - Transport, ConnectionUpgrade }; -use self::libp2p_kad::{ KademliaUpgrade, KademliaProcessingFuture}; -use self::libp2p_floodsub::{ FloodSubFuture, FloodSubUpgrade }; -use self::libp2p_identify::{ IdentifyInfo, IdentifyTransport, IdentifyOutput }; -use self::slog::Logger; -use std::sync::{ Arc, RwLock }; -use std::time::{ Duration, Instant }; -use std::ops::Deref; -use std::io::Error as IoError; -use self::tokio_io::{ AsyncRead, AsyncWrite }; -use self::bytes::Bytes; - -pub use self::libp2p_floodsub::Message; - -pub fn listen(state: NetworkState, - events_to_app: &UnboundedSender, - raw_rx: UnboundedReceiver, - log: &Logger) -{ - let peer_store = state.peer_store; - let peer_id = state.peer_id; - let listen_multiaddr = state.listen_multiaddr; - let listened_addrs = Arc::new(RwLock::new(vec![])); - let rx = ApplicationReciever{ inner: raw_rx }; - - // Build a tokio core - let mut core = tokio_core::reactor::Core::new().expect("tokio failure."); - // Build a base TCP libp2p transport - let transport = libp2p_tcp_transport::TcpConfig::new(core.handle()) - .with_upgrade(libp2p_core::upgrade::PlainTextConfig) - .with_upgrade(libp2p_mplex::BufferedMultiplexConfig::<[_; 256]>::new()) - .into_connection_reuse(); - - // Build an identify transport to allow identification and negotiation - // of layers running atop the TCP transport (e.g., kad) - let identify_transport = { - let listened_addrs = listened_addrs.clone(); - let listen_multiaddr = listen_multiaddr.clone(); - IdentifyTransport::new(transport.clone(), peer_store.clone()) - // Managed NAT'ed connections - ensuring the external IP - // is stored not the internal addr. - .map(move |out, _, _| { - if let(Some(ref observed), ref listen_multiaddr) = - (out.observed_addr, listen_multiaddr) - { - if let Some(viewed_from_outside) = - transport.nat_traversal(listen_multiaddr, observed) - { - listened_addrs.write().unwrap() - .push(viewed_from_outside); - } - } - out.socket - }) - }; - - // Configure and build a Kademlia upgrade to be applied - // to the base TCP transport. - let kad_config = libp2p_kad::KademliaConfig { - parallelism: 3, - record_store: (), - peer_store, - local_peer_id: peer_id.clone(), - timeout: Duration::from_secs(2) - }; - let kad_ctl_proto = libp2p_kad:: - KademliaControllerPrototype::new(kad_config); - let kad_upgrade = libp2p_kad:: - KademliaUpgrade::from_prototype(&kad_ctl_proto); - - // Build a floodsub upgrade to allow pushing topic'ed - // messages across the network. - let (floodsub_upgrade, floodsub_rx) = - FloodSubUpgrade::new(peer_id.clone()); - - // Combine the Kademlia and Identify upgrades into a single - // upgrader struct. - let upgrade = ConnectionUpgrader { - kad: kad_upgrade.clone(), - floodsub: floodsub_upgrade.clone(), - identify: libp2p_identify::IdentifyProtocolConfig, - }; - - // Build a Swarm to manage upgrading connections to peers. - let swarm_listened_addrs = listened_addrs.clone(); - let swarm_peer_id = peer_id.clone(); - let (swarm_ctl, swarm_future) = libp2p_core::swarm( - identify_transport.clone().with_upgrade(upgrade), - move |upgrade, client_addr| match upgrade { - FinalUpgrade::Kad(kad) => Box::new(kad) as Box<_>, - FinalUpgrade::FloodSub(future) => Box::new(future) as Box<_>, - FinalUpgrade::Identify(IdentifyOutput::Sender { sender, .. }) => sender.send( - IdentifyInfo { - public_key: swarm_peer_id.clone().into_bytes(), - agent_version: "lighthouse/1.0.0".to_owned(), - protocol_version: "rust-libp2p/1.0.0".to_owned(), - listen_addrs: swarm_listened_addrs.read().unwrap().to_vec(), - protocols: vec![ - "/ipfs/kad/1.0.0".to_owned(), - "/ipfs/id/1.0.0".to_owned(), - "/floodsub/1.0.0".to_owned(), - ] - }, - &client_addr - ), - FinalUpgrade::Identify(IdentifyOutput::RemoteInfo { .. }) => { - unreachable!("Never dial with the identify protocol.") - } - }, - ); - - // Start the Swarm controller listening on the local machine - let actual_addr = swarm_ctl - .listen_on(listen_multiaddr) - .expect("Failed to listen on multiaddr"); - info!(log, "libp2p listening"; "listen_addr" => actual_addr.to_string()); - - // Convert the kad prototype into a controller by providing it the - // newly built swarm. - let (kad_ctl, kad_init) = kad_ctl_proto.start( - swarm_ctl.clone(), - identify_transport.clone().with_upgrade(kad_upgrade.clone())); - - // Create a new floodsub controller using a specific topic - let topic = libp2p_floodsub::TopicBuilder::new("beacon_chain").build(); - let floodsub_ctl = libp2p_floodsub::FloodSubController::new(&floodsub_upgrade); - floodsub_ctl.subscribe(&topic); - - // Generate a tokio timer "wheel" future that sends kad FIND_NODE at - // a routine interval. - let kad_poll_log = log.new(o!()); - let kad_poll_event_tx = events_to_app.clone(); - let kad_poll = { - let polling_peer_id = peer_id.clone(); - tokio_timer::wheel() - .build() - .interval_at(Instant::now(), Duration::from_secs(30)) - .map_err(|_| -> IoError { unreachable!() }) - .and_then(move |()| kad_ctl.find_node(peer_id.clone())) - .for_each(move |peers| { - let local_hash = U512::from(polling_peer_id.hash()); - for peer in peers { - let peer_hash = U512::from(peer.hash()); - let distance = 512 - (local_hash ^ peer_hash).leading_zeros(); - info!(kad_poll_log, "Discovered peer"; - "distance" => distance, - "peer_id" => peer.to_base58()); - let peer_addr = AddrComponent::P2P(peer.into_bytes()).into(); - let dial_result = swarm_ctl.dial( - peer_addr, - identify_transport.clone().with_upgrade(floodsub_upgrade.clone()) - ); - if let Err(err) = dial_result { - warn!(kad_poll_log, "Dialling {:?} failed.", err) - }; - let event = NetworkEvent { - event: NetworkEventType::PeerConnect, - data: None, - }; - kad_poll_event_tx.unbounded_send(event) - .expect("Network unable to contact application."); - }; - Ok(()) - }) - }; - - // Create a future to handle message recieved from the network - let floodsub_rx = floodsub_rx.for_each(|msg| { - debug!(&log, "Network receive"; "msg" => format!("{:?}", msg)); - let event = NetworkEvent { - event: NetworkEventType::Message, - data: Some(msg.data), - }; - events_to_app.unbounded_send(event) - .expect("Network unable to contact application."); - Ok(()) - }); - - // Create a future to handle messages recieved from the application - let app_rx = rx.for_each(|msg| { - debug!(&log, "Network publish"; "msg" => format!("{:?}", msg)); - floodsub_ctl.publish(&topic, msg.data); - Ok(()) - }); - - // Generate a full future - let final_future = swarm_future - .select(floodsub_rx).map_err(|(err, _)| err).map(|((), _)| ()) - .select(app_rx).map_err(|(err, _)| err).map(|((), _)| ()) - .select(kad_poll).map_err(|(err, _)| err).map(|((), _)| ()) - .select(kad_init).map_err(|(err, _)| err).and_then(|((), n)| n); - - core.run(final_future).unwrap(); -} - -struct ApplicationReciever { - inner: UnboundedReceiver, -} - -impl Stream for ApplicationReciever { - type Item = OutgoingMessage; - type Error = IoError; - - fn poll(&mut self) -> Poll, Self::Error> { - self.inner - .poll() - .map_err(|_| unreachable!()) - } -} - -#[derive(Clone)] -struct ConnectionUpgrader { - kad: KademliaUpgrade, - identify: libp2p_identify::IdentifyProtocolConfig, - floodsub: FloodSubUpgrade, -} - -impl ConnectionUpgrade for ConnectionUpgrader -where - C: AsyncRead + AsyncWrite + 'static, - P: Deref + Clone + 'static, - for<'r> &'r Pc: libp2p_peerstore::Peerstore, - R: 'static -{ - type NamesIter = ::std::vec::IntoIter<(Bytes, usize)>; - type UpgradeIdentifier = usize; - type Output = FinalUpgrade; - type Future = Box, Error = IoError>>; - - #[inline] - fn protocol_names(&self) -> Self::NamesIter { - vec![ - (Bytes::from("/ipfs/kad/1.0.0"), 0), - (Bytes::from("/ipfs/id/1.0.0"), 1), - (Bytes::from("/floodsub/1.0.0"), 2), - ].into_iter() - } - - fn upgrade( - self, - socket: C, - id: Self::UpgradeIdentifier, - ty: Endpoint, - remote_addr: &Multiaddr) - -> Self::Future - { - match id { - 0 => Box::new( - self.kad - .upgrade(socket, (), ty, remote_addr) - .map(|upg| upg.into())), - 1 => Box::new( - self.identify - .upgrade(socket, (), ty, remote_addr) - .map(|upg| upg.into())), - 2 => Box::new( - self.floodsub - .upgrade(socket, (), ty, remote_addr) - .map(|upg| upg.into()), - ), - _ => unreachable!() - } - - } -} - -enum FinalUpgrade { - Kad(KademliaProcessingFuture), - Identify(IdentifyOutput), - FloodSub(FloodSubFuture), -} - -impl From for FinalUpgrade { #[inline] - fn from(upgrade: libp2p_kad::KademliaProcessingFuture) -> Self { - FinalUpgrade::Kad(upgrade) - } -} - -impl From> for FinalUpgrade { - #[inline] - fn from(upgrade: IdentifyOutput) -> Self { - FinalUpgrade::Identify(upgrade) - } -} - -impl From for FinalUpgrade { - #[inline] - fn from(upgr: FloodSubFuture) -> Self { - FinalUpgrade::FloodSub(upgr) - } -} diff --git a/network-libp2p/src/state.rs b/network-libp2p/src/state.rs deleted file mode 100644 index e45741fe6..000000000 --- a/network-libp2p/src/state.rs +++ /dev/null @@ -1,119 +0,0 @@ -extern crate rand; - -use std::io::{ Read, Write }; -use std::error::Error; -use std::fs::File; -use std::path::{ Path, PathBuf }; -use std::sync::Arc; -use std::time::Duration; - -use super::libp2p_core::Multiaddr; -use super::libp2p_peerstore::{ Peerstore, PeerAccess, PeerId }; -use super::libp2p_peerstore::json_peerstore::JsonPeerstore; -use super::pem; -use super::secp256k1::Secp256k1; -use super::secp256k1::key::{ SecretKey, PublicKey }; -use super::slog::Logger; - -/// Location of the libp2p peerstore inside the Network base dir. -const PEERS_FILE: &str = "peerstore.json"; -/// Location of the libp2p local peer secret key inside the Network base dir. -const LOCAL_PEM_FILE: &str = "local_peer_id.pem"; - -/// Represents the present state of a libp2p network. -pub struct NetworkState { - pub base_dir: PathBuf, - pub pubkey: PublicKey, - pub seckey: SecretKey, - pub peer_id: PeerId, - pub listen_multiaddr: Multiaddr, - pub peer_store: Arc, -} - -impl NetworkState { - /// Create a new libp2p network state. Used to initialize - /// network service. - pub fn new( - // config: LighthouseConfig, - base_dir: &Path, - listen_port: u16, - log: &Logger) - -> Result > - { - let curve = Secp256k1::new(); - let seckey = match - NetworkState::load_secret_key_from_pem_file(base_dir, &curve) - { - Ok(k) => k, - _ => NetworkState::generate_new_secret_key(base_dir, &curve)? - }; - let pubkey = PublicKey::from_secret_key(&curve, &seckey)?; - let peer_id = PeerId::from_public_key( - &pubkey.serialize_vec(&curve, false)); - info!(log, "Loaded keys"; "peer_id" => &peer_id.to_base58()); - let peer_store = { - let path = base_dir.join(PEERS_FILE); - let base = JsonPeerstore::new(path)?; - Arc::new(base) - }; - info!(log, "Loaded peerstore"; "peer_count" => &peer_store.peers().count()); - let listen_multiaddr = - NetworkState::multiaddr_on_port(&listen_port.to_string()); - Ok(Self { - base_dir: PathBuf::from(base_dir), - seckey, - pubkey, - peer_id, - listen_multiaddr, - peer_store, - }) - } - - /// Return a TCP multiaddress on 0.0.0.0 for a given port. - pub fn multiaddr_on_port(port: &str) -> Multiaddr { - format!("/ip4/0.0.0.0/tcp/{}", port) - .parse::().unwrap() - } - - pub fn add_peer(&mut self, - peer_id: &PeerId, - multiaddr: Multiaddr, - duration_secs: u64) { - self.peer_store.peer_or_create(&peer_id) - .add_addr(multiaddr, Duration::from_secs(duration_secs)); - } - - /// Instantiate a SecretKey from a .pem file on disk. - pub fn load_secret_key_from_pem_file( - base_dir: &Path, - curve: &Secp256k1) - -> Result> - { - let path = base_dir.join(LOCAL_PEM_FILE); - let mut contents = String::new(); - let mut file = File::open(path)?; - file.read_to_string(&mut contents)?; - let pem_key = pem::parse(contents)?; - let key = SecretKey::from_slice(curve, &pem_key.contents)?; - Ok(key) - } - - /// Generate a new SecretKey and store it on disk as a .pem file. - pub fn generate_new_secret_key( - base_dir: &Path, - curve: &Secp256k1) - -> Result> - { - let mut rng = rand::thread_rng(); - let sk = SecretKey::new(&curve, &mut rng); - let pem_key = pem::Pem { - tag: String::from("EC PRIVATE KEY"), - contents: sk[..].to_vec() - }; - let s_string = pem::encode(&pem_key); - let path = base_dir.join(LOCAL_PEM_FILE); - let mut s_file = File::create(path)?; - s_file.write_all(s_string.as_bytes())?; - Ok(sk) - } -}