From 3517ef6513d1a9585c309f9b099a3345f4b856b0 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Wed, 20 Feb 2019 14:16:07 +1100 Subject: [PATCH 001/154] Initialise fuzzing for ssz --- eth2/utils/ssz/fuzz/.gitignore | 4 ++++ eth2/utils/ssz/fuzz/Cargo.toml | 22 +++++++++++++++++++ .../ssz/fuzz/fuzz_targets/fuzz_target_1.rs | 7 ++++++ 3 files changed, 33 insertions(+) create mode 100644 eth2/utils/ssz/fuzz/.gitignore create mode 100644 eth2/utils/ssz/fuzz/Cargo.toml create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs diff --git a/eth2/utils/ssz/fuzz/.gitignore b/eth2/utils/ssz/fuzz/.gitignore new file mode 100644 index 000000000..572e03bdf --- /dev/null +++ b/eth2/utils/ssz/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml new file mode 100644 index 000000000..9c0a17f0d --- /dev/null +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -0,0 +1,22 @@ + +[package] +name = "ssz-fuzz" +version = "0.0.1" +authors = ["Automatically generated"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies.ssz] +path = ".." +[dependencies.libfuzzer-sys] +git = "https://github.com/rust-fuzz/libfuzzer-sys.git" + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_target_1" +path = "fuzz_targets/fuzz_target_1.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs new file mode 100644 index 000000000..1ca6f957d --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs @@ -0,0 +1,7 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +fuzz_target!(|data: &[u8]| { + // fuzzed code goes here +}); From 52347d8e6d4c21f7c4ffebfa9b8e35a58c69ef2d Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Wed, 20 Feb 2019 14:46:25 +1100 Subject: [PATCH 002/154] Write a fuzz test --- eth2/utils/ssz/fuzz/Cargo.toml | 4 ++-- eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs | 7 ------- eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs | 10 ++++++++++ 3 files changed, 12 insertions(+), 9 deletions(-) delete mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index 9c0a17f0d..9ffff016c 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -18,5 +18,5 @@ git = "https://github.com/rust-fuzz/libfuzzer-sys.git" members = ["."] [[bin]] -name = "fuzz_target_1" -path = "fuzz_targets/fuzz_target_1.rs" +name = "fuzz_target_u8" +path = "fuzz_targets/fuzz_target_u8.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs deleted file mode 100644 index 1ca6f957d..000000000 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_1.rs +++ /dev/null @@ -1,7 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate ssz; - -fuzz_target!(|data: &[u8]| { - // fuzzed code goes here -}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs new file mode 100644 index 000000000..6a8fd7673 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs @@ -0,0 +1,10 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable, Encodable}; + +// Fuzz ssz_decode(u8) +fuzz_target!(|data: &[u8]| { + let result: Result<(u8, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); From 38abcc4a240f3878c44203c783af57c66303838e Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Wed, 20 Feb 2019 15:03:32 +1100 Subject: [PATCH 003/154] Fuzz test for u8 fails --- eth2/utils/ssz/fuzz/Cargo.toml | 4 ++++ .../ssz/fuzz/fuzz_targets/fuzz_target_u16.rs | 19 +++++++++++++++++++ .../ssz/fuzz/fuzz_targets/fuzz_target_u8.rs | 9 +++++++++ 3 files changed, 32 insertions(+) create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16.rs diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index 9ffff016c..b640cc5f0 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -20,3 +20,7 @@ members = ["."] [[bin]] name = "fuzz_target_u8" path = "fuzz_targets/fuzz_target_u8.rs" + +[[bin]] +name = "fuzz_target_u16" +path = "fuzz_targets/fuzz_target_u16.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16.rs new file mode 100644 index 000000000..8bf2be8a4 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16.rs @@ -0,0 +1,19 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable, Encodable}; + +// Fuzz ssz_decode(u8) +fuzz_target!(|data: &[u8]| { + let result: Result<(u16, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() > 1 { + // Valid result + let (number_u16, index) = result.unwrap(); + assert_eq!(index, 2); + // TODO: add test for number? + } else { + // Length of 0 or 1 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs index 6a8fd7673..afab5eab5 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs @@ -7,4 +7,13 @@ use ssz::{DecodeError, Decodable, Encodable}; // Fuzz ssz_decode(u8) fuzz_target!(|data: &[u8]| { let result: Result<(u8, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() > 0 { + // Should have valid result + let (number_u8, index) = result.unwrap(); + assert_eq!(number_u8, data[0]); + assert_eq!(index, 2); + } else { + // Length of 0 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } }); From 532a854f8efbb313c75ec236acd74a283290ede3 Mon Sep 17 00:00:00 2001 From: mehdi Date: Wed, 20 Feb 2019 15:30:58 +1100 Subject: [PATCH 004/154] Fixing minor bug in assert statement --- eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs index afab5eab5..0320b7c10 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs @@ -11,7 +11,7 @@ fuzz_target!(|data: &[u8]| { // Should have valid result let (number_u8, index) = result.unwrap(); assert_eq!(number_u8, data[0]); - assert_eq!(index, 2); + assert_eq!(index, 1); } else { // Length of 0 should return error assert_eq!(result, Err(DecodeError::TooShort)); From d5c4771f0a3810d50334cadcaef75606065974f3 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Wed, 20 Feb 2019 15:34:15 +1100 Subject: [PATCH 005/154] Fuzz test decodes from u8 to u64 --- eth2/utils/ssz/fuzz/Cargo.toml | 16 +++++++--- ...arget_u16.rs => fuzz_target_u16_decode.rs} | 11 ++++--- .../fuzz_targets/fuzz_target_u32_decode.rs | 22 +++++++++++++ .../fuzz_targets/fuzz_target_u64_decode.rs | 31 +++++++++++++++++++ ..._target_u8.rs => fuzz_target_u8_decode.rs} | 10 +++--- 5 files changed, 78 insertions(+), 12 deletions(-) rename eth2/utils/ssz/fuzz/fuzz_targets/{fuzz_target_u16.rs => fuzz_target_u16_decode.rs} (59%) create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs rename eth2/utils/ssz/fuzz/fuzz_targets/{fuzz_target_u8.rs => fuzz_target_u8_decode.rs} (66%) diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index b640cc5f0..d0455a556 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -18,9 +18,17 @@ git = "https://github.com/rust-fuzz/libfuzzer-sys.git" members = ["."] [[bin]] -name = "fuzz_target_u8" -path = "fuzz_targets/fuzz_target_u8.rs" +name = "fuzz_target_u8_decode" +path = "fuzz_targets/fuzz_target_u8_decode.rs" [[bin]] -name = "fuzz_target_u16" -path = "fuzz_targets/fuzz_target_u16.rs" +name = "fuzz_target_u16_decode" +path = "fuzz_targets/fuzz_target_u16_decode.rs" + +[[bin]] +name = "fuzz_target_u32_decode" +path = "fuzz_targets/fuzz_target_u32_decode.rs" + +[[bin]] +name = "fuzz_target_u64_decode" +path = "fuzz_targets/fuzz_target_u64_decode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs similarity index 59% rename from eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16.rs rename to eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs index 8bf2be8a4..73395f3af 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs @@ -2,16 +2,19 @@ #[macro_use] extern crate libfuzzer_sys; extern crate ssz; -use ssz::{DecodeError, Decodable, Encodable}; +use ssz::{DecodeError, Decodable}; -// Fuzz ssz_decode(u8) +// Fuzz ssz_decode() fuzz_target!(|data: &[u8]| { let result: Result<(u16, usize), DecodeError> = Decodable::ssz_decode(data, 0); - if data.len() > 1 { + if data.len() >= 2 { // Valid result let (number_u16, index) = result.unwrap(); assert_eq!(index, 2); - // TODO: add test for number? + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u16::from_be_bytes([data[0], data[1]]); + assert_eq!(number_u16, val); } else { // Length of 0 or 1 should return error assert_eq!(result, Err(DecodeError::TooShort)); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs new file mode 100644 index 000000000..e99bf2fad --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(u32, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 4 { + // Valid result + let (number_u32, index) = result.unwrap(); + assert_eq!(index, 4); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u32::from_be_bytes([data[0], data[1], data[2], data[3]]); + assert_eq!(number_u32, val); + } else { + // Length less then 4 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs new file mode 100644 index 000000000..9e13ab604 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs @@ -0,0 +1,31 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(u64, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 8 { + // Valid result + let (number_u64, index) = result.unwrap(); + assert_eq!(index, 8); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]); + assert_eq!(number_u64, val); + } else { + // Length less then 4 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs similarity index 66% rename from eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs rename to eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs index afab5eab5..296b6fa3d 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs @@ -2,16 +2,18 @@ #[macro_use] extern crate libfuzzer_sys; extern crate ssz; -use ssz::{DecodeError, Decodable, Encodable}; +use ssz::{DecodeError, Decodable}; -// Fuzz ssz_decode(u8) +// Fuzz ssz_decode() fuzz_target!(|data: &[u8]| { let result: Result<(u8, usize), DecodeError> = Decodable::ssz_decode(data, 0); - if data.len() > 0 { + if data.len() >= 1 { // Should have valid result let (number_u8, index) = result.unwrap(); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 assert_eq!(number_u8, data[0]); - assert_eq!(index, 2); + assert_eq!(index, 1); } else { // Length of 0 should return error assert_eq!(result, Err(DecodeError::TooShort)); From b98db3773ed560bb12da54df787da620136c328b Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Wed, 20 Feb 2019 16:15:30 +1100 Subject: [PATCH 006/154] Fuzz test ssz_encode for u8 to u64 --- eth2/utils/ssz/fuzz/Cargo.toml | 16 ++++++++ .../fuzz_targets/fuzz_target_u16_encode.rs | 22 ++++++++++ .../fuzz_targets/fuzz_target_u32_encode.rs | 22 ++++++++++ .../fuzz_targets/fuzz_target_u64_decode.rs | 2 +- .../fuzz_targets/fuzz_target_u64_encode.rs | 40 +++++++++++++++++++ .../fuzz_targets/fuzz_target_u8_decode.rs | 2 +- .../fuzz_targets/fuzz_target_u8_encode.rs | 22 ++++++++++ 7 files changed, 124 insertions(+), 2 deletions(-) create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index d0455a556..5af3275cd 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -21,14 +21,30 @@ members = ["."] name = "fuzz_target_u8_decode" path = "fuzz_targets/fuzz_target_u8_decode.rs" +[[bin]] +name = "fuzz_target_u8_encode" +path = "fuzz_targets/fuzz_target_u8_encode.rs" + [[bin]] name = "fuzz_target_u16_decode" path = "fuzz_targets/fuzz_target_u16_decode.rs" +[[bin]] +name = "fuzz_target_u16_encode" +path = "fuzz_targets/fuzz_target_u16_encode.rs" + [[bin]] name = "fuzz_target_u32_decode" path = "fuzz_targets/fuzz_target_u32_decode.rs" +[[bin]] +name = "fuzz_target_u32_encode" +path = "fuzz_targets/fuzz_target_u32_encode.rs" + [[bin]] name = "fuzz_target_u64_decode" path = "fuzz_targets/fuzz_target_u64_decode.rs" + +[[bin]] +name = "fuzz_target_u64_encode" +path = "fuzz_targets/fuzz_target_u64_encode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs new file mode 100644 index 000000000..ce8a51845 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u16 = 0; + if data.len() >= 2 { + number_u16 = u16::from_be_bytes([data[0], data[1]]); + } + + ssz.append(&number_u16); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 2); + assert_eq!(number_u16, u16::from_be_bytes([ssz[0], ssz[1]])); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs new file mode 100644 index 000000000..c71bcecaf --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u32 = 0; + if data.len() >= 4 { + number_u32 = u32::from_be_bytes([data[0], data[1], data[2], data[3]]); + } + + ssz.append(&number_u32); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 4); + assert_eq!(number_u32, u32::from_be_bytes([ssz[0], ssz[1], ssz[2], ssz[3]])); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs index 9e13ab604..63eb60f55 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs @@ -25,7 +25,7 @@ fuzz_target!(|data: &[u8]| { ]); assert_eq!(number_u64, val); } else { - // Length less then 4 should return error + // Length less then 8 should return error assert_eq!(result, Err(DecodeError::TooShort)); } }); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs new file mode 100644 index 000000000..68616e0da --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs @@ -0,0 +1,40 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u64 = 0; + if data.len() >= 8 { + number_u64 = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]); + } + + ssz.append(&number_u64); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 8); + assert_eq!(number_u64, u64::from_be_bytes([ + ssz[0], + ssz[1], + ssz[2], + ssz[3], + ssz[4], + ssz[5], + ssz[6], + ssz[7], + ])); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs index 296b6fa3d..6f17a4c85 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs @@ -12,8 +12,8 @@ fuzz_target!(|data: &[u8]| { let (number_u8, index) = result.unwrap(); // TODO: change to little endian bytes // https://github.com/sigp/lighthouse/issues/215 - assert_eq!(number_u8, data[0]); assert_eq!(index, 1); + assert_eq!(number_u8, data[0]); } else { // Length of 0 should return error assert_eq!(result, Err(DecodeError::TooShort)); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs new file mode 100644 index 000000000..a135f2cd5 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u8 = 0; + if data.len() >= 1 { + number_u8 = data[0]; + } + + ssz.append(&number_u8); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(number_u8, ssz[0]); + assert_eq!(ssz.len(), 1); +}); From 00e5b571662cb2015d6977df1b812d0a5b17d876 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Wed, 20 Feb 2019 16:43:30 +1100 Subject: [PATCH 007/154] Fuzz test ssz_encode and ssz_decode for usize --- eth2/utils/ssz/fuzz/Cargo.toml | 8 ++++ .../fuzz_targets/fuzz_target_usize_decode.rs | 32 +++++++++++++++ .../fuzz_targets/fuzz_target_usize_encode.rs | 40 +++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index 5af3275cd..6a65fb5e2 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -48,3 +48,11 @@ path = "fuzz_targets/fuzz_target_u64_decode.rs" [[bin]] name = "fuzz_target_u64_encode" path = "fuzz_targets/fuzz_target_u64_encode.rs" + +[[bin]] +name = "fuzz_target_usize_decode" +path = "fuzz_targets/fuzz_target_usize_decode.rs" + +[[bin]] +name = "fuzz_target_usize_encode" +path = "fuzz_targets/fuzz_target_usize_encode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs new file mode 100644 index 000000000..1458bfae9 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs @@ -0,0 +1,32 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + // Note: we assume architecture is 64 bit -> usize == 64 bits + let result: Result<(usize, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 8 { + // Valid result + let (number_usize, index) = result.unwrap(); + assert_eq!(index, 8); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]); + assert_eq!(number_usize, val as usize); + } else { + // Length less then 8 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs new file mode 100644 index 000000000..d5aa9751f --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs @@ -0,0 +1,40 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_usize = 0; + if data.len() >= 8 { + number_usize = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]) as usize; + } + + ssz.append(&number_usize); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 8); + assert_eq!(number_usize, u64::from_be_bytes([ + ssz[0], + ssz[1], + ssz[2], + ssz[3], + ssz[4], + ssz[5], + ssz[6], + ssz[7], + ]) as usize); +}); From 274bdd491dcfa0d74b6ceb51f0b63d03cb14cce3 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Thu, 21 Feb 2019 13:43:09 +1100 Subject: [PATCH 008/154] Fuzz for address and Hash256 --- eth2/utils/ssz/fuzz/Cargo.toml | 27 ++++++++++++++++++ .../fuzz_target_address_decode.rs | 21 ++++++++++++++ .../fuzz_target_address_encode.rs | 20 +++++++++++++ .../fuzz_targets/fuzz_target_bool_decode.rs | 28 +++++++++++++++++++ .../fuzz_targets/fuzz_target_bool_encode.rs | 22 +++++++++++++++ .../fuzz_target_hash256_decode.rs | 21 ++++++++++++++ .../fuzz_target_hash256_encode.rs | 20 +++++++++++++ 7 files changed, 159 insertions(+) create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index 6a65fb5e2..081afdcb9 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -8,6 +8,9 @@ publish = false [package.metadata] cargo-fuzz = true +[dependencies] +ethereum-types = "0.4.0" + [dependencies.ssz] path = ".." [dependencies.libfuzzer-sys] @@ -17,6 +20,14 @@ git = "https://github.com/rust-fuzz/libfuzzer-sys.git" [workspace] members = ["."] +[[bin]] +name = "fuzz_target_bool_decode" +path = "fuzz_targets/fuzz_target_bool_decode.rs" + +[[bin]] +name = "fuzz_target_bool_encode" +path = "fuzz_targets/fuzz_target_bool_encode.rs" + [[bin]] name = "fuzz_target_u8_decode" path = "fuzz_targets/fuzz_target_u8_decode.rs" @@ -56,3 +67,19 @@ path = "fuzz_targets/fuzz_target_usize_decode.rs" [[bin]] name = "fuzz_target_usize_encode" path = "fuzz_targets/fuzz_target_usize_encode.rs" + +[[bin]] +name = "fuzz_target_hash256_decode" +path = "fuzz_targets/fuzz_target_hash256_decode.rs" + +[[bin]] +name = "fuzz_target_hash256_encode" +path = "fuzz_targets/fuzz_target_hash256_encode.rs" + +[[bin]] +name = "fuzz_target_address_decode" +path = "fuzz_targets/fuzz_target_address_decode.rs" + +[[bin]] +name = "fuzz_target_address_encode" +path = "fuzz_targets/fuzz_target_address_encode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs new file mode 100644 index 000000000..c49be500a --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs @@ -0,0 +1,21 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::Address; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(Address, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 20 { + // Should have valid result + let (address, index) = result.unwrap(); + assert_eq!(index, 20); + assert_eq!(address, Address::from_slice(&data[..20])); + } else { + // Length of less than 32 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs new file mode 100644 index 000000000..0e51e00ac --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs @@ -0,0 +1,20 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::Address; +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + if data.len() >= 20 { + let hash = Address::from_slice(&data[..20]); + ssz.append(&hash); + let ssz = ssz.drain(); + + assert_eq!(data[..20], ssz[..20]); + assert_eq!(ssz.len(), 20); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs new file mode 100644 index 000000000..4fb1052b1 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs @@ -0,0 +1,28 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(bool, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 1 { + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + if data[0] == u8::pow(2,7) { + let (val_bool, index) = result.unwrap(); + assert!(val_bool); + assert_eq!(index, 1); + } else if data[0] == 0 { + let (val_bool, index) = result.unwrap(); + assert!(!val_bool); + assert_eq!(index, 1); + } else { + assert_eq!(result, Err(DecodeError::Invalid)); + } + } else { + // Length of 0 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs new file mode 100644 index 000000000..4f344cb7d --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut val_bool = 0; + if data.len() >= 1 { + val_bool = data[0] % u8::pow(2, 6); + } + + ssz.append(&val_bool); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(val_bool, ssz[0] % u8::pow(2, 6)); + assert_eq!(ssz.len(), 1); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs new file mode 100644 index 000000000..e4ccc56a4 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs @@ -0,0 +1,21 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::H256; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(H256, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 32 { + // Should have valid result + let (hash, index) = result.unwrap(); + assert_eq!(index, 32); + assert_eq!(hash, H256::from_slice(&data[..32])); + } else { + // Length of less than 32 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs new file mode 100644 index 000000000..537d9cdf9 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs @@ -0,0 +1,20 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::H256; +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + if data.len() >= 32 { + let hash = H256::from_slice(&data[..32]); + ssz.append(&hash); + let ssz = ssz.drain(); + + assert_eq!(data[..32], ssz[..32]); + assert_eq!(ssz.len(), 32); + } +}); From 68017b66fdbcc1ea5b19876252ce813d20cab60c Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Thu, 21 Feb 2019 14:23:21 +1100 Subject: [PATCH 009/154] Fuzzing for Vec --- eth2/utils/ssz/fuzz/Cargo.toml | 8 +++++++ .../fuzz_targets/fuzz_target_vec_decode.rs | 21 +++++++++++++++++++ .../fuzz_targets/fuzz_target_vec_encode.rs | 15 +++++++++++++ 3 files changed, 44 insertions(+) create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index 081afdcb9..c76cbbbde 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -83,3 +83,11 @@ path = "fuzz_targets/fuzz_target_address_decode.rs" [[bin]] name = "fuzz_target_address_encode" path = "fuzz_targets/fuzz_target_address_encode.rs" + +[[bin]] +name = "fuzz_target_vec_decode" +path = "fuzz_targets/fuzz_target_vec_decode.rs" + +[[bin]] +name = "fuzz_target_vec_encode" +path = "fuzz_targets/fuzz_target_vec_encode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs new file mode 100644 index 000000000..048d19ee5 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs @@ -0,0 +1,21 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::{Address, H256}; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); + /* + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); + let _result: Result<(Vec
, usize), DecodeError> = Decodable::ssz_decode(data, 0); + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); + */ +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs new file mode 100644 index 000000000..6980e5d20 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs @@ -0,0 +1,15 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::{Address, H256}; +use ssz::SszStream; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + + let mut ssz = SszStream::new(); + let data_vec = data.to_vec(); + ssz.append(&data_vec); +}); From ab1dc7bfceceb45a8d272bdfdeab870531fce285 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Fri, 22 Feb 2019 16:50:14 +1100 Subject: [PATCH 010/154] Add simple fuzz tests for hashing and boolean-bitfield --- eth2/utils/boolean-bitfield/fuzz/.gitignore | 4 +++ eth2/utils/boolean-bitfield/fuzz/Cargo.toml | 33 +++++++++++++++++++ .../fuzz_targets/fuzz_target_from_bytes.rs | 9 +++++ .../fuzz_targets/fuzz_target_ssz_decode.rs | 11 +++++++ .../fuzz_targets/fuzz_target_ssz_encode.rs | 13 ++++++++ eth2/utils/hashing/fuzz/.gitignore | 4 +++ eth2/utils/hashing/fuzz/Cargo.toml | 22 +++++++++++++ .../fuzz/fuzz_targets/fuzz_target_hash.rs | 9 +++++ 8 files changed, 105 insertions(+) create mode 100644 eth2/utils/boolean-bitfield/fuzz/.gitignore create mode 100644 eth2/utils/boolean-bitfield/fuzz/Cargo.toml create mode 100644 eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs create mode 100644 eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs create mode 100644 eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs create mode 100644 eth2/utils/hashing/fuzz/.gitignore create mode 100644 eth2/utils/hashing/fuzz/Cargo.toml create mode 100644 eth2/utils/hashing/fuzz/fuzz_targets/fuzz_target_hash.rs diff --git a/eth2/utils/boolean-bitfield/fuzz/.gitignore b/eth2/utils/boolean-bitfield/fuzz/.gitignore new file mode 100644 index 000000000..572e03bdf --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/eth2/utils/boolean-bitfield/fuzz/Cargo.toml b/eth2/utils/boolean-bitfield/fuzz/Cargo.toml new file mode 100644 index 000000000..9769fc50e --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/Cargo.toml @@ -0,0 +1,33 @@ + +[package] +name = "boolean-bitfield-fuzz" +version = "0.0.1" +authors = ["Automatically generated"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies] +ssz = { path = "../../ssz" } + +[dependencies.boolean-bitfield] +path = ".." +[dependencies.libfuzzer-sys] +git = "https://github.com/rust-fuzz/libfuzzer-sys.git" + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_target_from_bytes" +path = "fuzz_targets/fuzz_target_from_bytes.rs" + +[[bin]] +name = "fuzz_target_ssz_decode" +path = "fuzz_targets/fuzz_target_ssz_decode.rs" + +[[bin]] +name = "fuzz_target_ssz_encode" +path = "fuzz_targets/fuzz_target_ssz_encode.rs" diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs new file mode 100644 index 000000000..0c71c6d68 --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs @@ -0,0 +1,9 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate boolean_bitfield; + +use boolean_bitfield::BooleanBitfield; + +fuzz_target!(|data: &[u8]| { + let _result = BooleanBitfield::from_bytes(data); +}); diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs new file mode 100644 index 000000000..14ddbb0a9 --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs @@ -0,0 +1,11 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate boolean_bitfield; +extern crate ssz; + +use boolean_bitfield::BooleanBitfield; +use ssz::{Decodable, DecodeError}; + +fuzz_target!(|data: &[u8]| { + let result: Result<(BooleanBitfield, usize), DecodeError> = <_>::ssz_decode(data, 0); +}); diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs new file mode 100644 index 000000000..0626e5db7 --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs @@ -0,0 +1,13 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate boolean_bitfield; +extern crate ssz; + +use boolean_bitfield::BooleanBitfield; +use ssz::SszStream; + +fuzz_target!(|data: &[u8]| { + let bitfield = BooleanBitfield::from_bytes(data); + let mut ssz = SszStream::new(); + ssz.append(&bitfield); +}); diff --git a/eth2/utils/hashing/fuzz/.gitignore b/eth2/utils/hashing/fuzz/.gitignore new file mode 100644 index 000000000..572e03bdf --- /dev/null +++ b/eth2/utils/hashing/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/eth2/utils/hashing/fuzz/Cargo.toml b/eth2/utils/hashing/fuzz/Cargo.toml new file mode 100644 index 000000000..57e0172eb --- /dev/null +++ b/eth2/utils/hashing/fuzz/Cargo.toml @@ -0,0 +1,22 @@ + +[package] +name = "hashing-fuzz" +version = "0.0.1" +authors = ["Automatically generated"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies.hashing] +path = ".." +[dependencies.libfuzzer-sys] +git = "https://github.com/rust-fuzz/libfuzzer-sys.git" + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_target_hash" +path = "fuzz_targets/fuzz_target_hash.rs" diff --git a/eth2/utils/hashing/fuzz/fuzz_targets/fuzz_target_hash.rs b/eth2/utils/hashing/fuzz/fuzz_targets/fuzz_target_hash.rs new file mode 100644 index 000000000..dd78d1ac8 --- /dev/null +++ b/eth2/utils/hashing/fuzz/fuzz_targets/fuzz_target_hash.rs @@ -0,0 +1,9 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate hashing; + +use hashing::hash; + +fuzz_target!(|data: &[u8]| { + let _result = hash(data); +}); From 19a64f906e14e72f205e3c4d391446d4069aac9b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 28 Feb 2019 10:24:27 +1100 Subject: [PATCH 011/154] Initial beacon node setup. - Add network crate. - Add sync crate. - Add version crate. - Add lighthouse configuration. - Add network configuration. --- Cargo.toml | 3 + beacon_node/Cargo.toml | 9 +- beacon_node/db/src/lib.rs | 7 ++ .../db/src/stores/beacon_block_store.rs | 2 +- beacon_node/network/Cargo.toml | 10 ++ beacon_node/network/src/lib.rs | 4 + .../network/src/network_configuration.rs | 39 ++++++ beacon_node/src/config.rs | 85 +++++++++++++ beacon_node/src/config/mod.rs | 30 ----- beacon_node/src/error.rs | 8 ++ beacon_node/src/main.rs | 119 +++--------------- beacon_node/src/run.rs | 54 ++++++++ beacon_node/sync/Cargo.toml | 8 ++ beacon_node/sync/src/lib.rs | 68 ++++++++++ beacon_node/version/Cargo.toml | 8 ++ beacon_node/version/src/lib.rs | 25 ++++ eth2/fork_choice/src/lib.rs | 1 + 17 files changed, 345 insertions(+), 135 deletions(-) create mode 100644 beacon_node/network/Cargo.toml create mode 100644 beacon_node/network/src/lib.rs create mode 100644 beacon_node/network/src/network_configuration.rs create mode 100644 beacon_node/src/config.rs delete mode 100644 beacon_node/src/config/mod.rs create mode 100644 beacon_node/src/error.rs create mode 100644 beacon_node/src/run.rs create mode 100644 beacon_node/sync/Cargo.toml create mode 100644 beacon_node/sync/src/lib.rs create mode 100644 beacon_node/version/Cargo.toml create mode 100644 beacon_node/version/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 42d69489b..1090a9b6f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,9 @@ members = [ "eth2/utils/test_random_derive", "beacon_node", "beacon_node/db", + "beacon_node/network", + "beacon_node/sync", + "beacon_node/version", "beacon_node/beacon_chain", "beacon_node/beacon_chain/test_harness", "protos", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index a4804e07e..f909d5103 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "beacon_node" version = "0.1.0" -authors = ["Paul Hauner "] +authors = ["Paul Hauner ", "Age Manning BeaconBlockStore { } } - /// Retuns an object implementing `BeaconBlockReader`, or `None` (if hash not known). + /// Returns an object implementing `BeaconBlockReader`, or `None` (if hash not known). /// /// Note: Presently, this function fully deserializes a `BeaconBlock` and returns that. In the /// future, it would be ideal to return an object capable of reading directly from serialized diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml new file mode 100644 index 000000000..57f75e273 --- /dev/null +++ b/beacon_node/network/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "network" +version = "0.1.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +# SigP repository until PR is merged +libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } +version = { path = "../version" } diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs new file mode 100644 index 000000000..1dc56ec4f --- /dev/null +++ b/beacon_node/network/src/lib.rs @@ -0,0 +1,4 @@ +/// This crate provides the network server for Lighthouse. +mod network_configuration; + +pub use network_configuration::NetworkConfiguration; diff --git a/beacon_node/network/src/network_configuration.rs b/beacon_node/network/src/network_configuration.rs new file mode 100644 index 000000000..64d763287 --- /dev/null +++ b/beacon_node/network/src/network_configuration.rs @@ -0,0 +1,39 @@ +use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; +use std::net::IpAddr; +use version; + +#[derive(Debug, Clone)] +/// Network configuration for lighthouse. +pub struct NetworkConfiguration { + //TODO: stubbing networking initial params, change in the future + /// IP address to listen on. + pub listen_address: Option, + /// Listen port UDP/TCP. + pub listen_port: Option, + /// Gossipsub configuration parameters. + pub gs_config: GossipsubConfig, + /// List of nodes to initially connect to. + pub boot_nodes: Vec, + /// Client version + pub client_version: String, + //TODO: more to be added +} + +impl Default for NetworkConfiguration { + /// Generate a default network configuration. + fn default() -> Self { + NetworkConfiguration { + listen_address: None, + listen_port: None, + gs_config: GossipsubConfigBuilder::new().build(), + boot_nodes: Vec::new(), + client_version: version::version(), + } + } +} + +impl NetworkConfiguration { + pub fn new() -> Self { + NetworkConfiguration::default() + } +} diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs new file mode 100644 index 000000000..c7fa57909 --- /dev/null +++ b/beacon_node/src/config.rs @@ -0,0 +1,85 @@ +use clap::ArgMatches; +use db::DBType; +use fork_choice::ForkChoiceAlgorithm; +use network::NetworkConfiguration; +use slog::error; +use std::fs; +use std::net::IpAddr; +use std::path::PathBuf; +use types::ChainSpec; + +/// Stores the core configuration for this Lighthouse instance. +/// This struct is general, other components may implement more +/// specialized configuration structs. +#[derive(Debug, Clone)] +pub struct Config { + pub data_dir: PathBuf, + pub spec: ChainSpec, + pub net_conf: network::NetworkConfiguration, + pub fork_choice: ForkChoiceAlgorithm, + pub db_type: DBType, + pub db_name: PathBuf, + //pub rpc_conf: + //pub ipc_conf: +} + +impl Default for Config { + /// Build a new lighthouse configuration from defaults. + fn default() -> Self { + let data_dir = { + let home = dirs::home_dir().expect("Unable to determine home dir."); + home.join(".lighthouse/") + }; + fs::create_dir_all(&data_dir) + .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir)); + Self { + data_dir: data_dir.clone(), + // default to foundation for chain specs + spec: ChainSpec::foundation(), + net_conf: NetworkConfiguration::default(), + // default to bitwise LMD Ghost + fork_choice: ForkChoiceAlgorithm::BitwiseLMDGhost, + // default to memory db for now + db_type: DBType::Memory, + // default db name for disk-based dbs + db_name: data_dir.join("chain.db"), + } + } +} + +impl Config { + /// Parses the CLI arguments into a `Config` struct. + pub fn parse_args(args: ArgMatches, log: &slog::Logger) -> Result { + let mut config = Config::default(); + + // Network related args + + // Custom listening address ipv4/ipv6 + if let Some(listen_address_str) = args.value_of("listen_address") { + if let Ok(listen_address) = listen_address_str.parse::() { + config.net_conf.listen_address = Some(listen_address); + } else { + error!(log, "Invalid Ip Address"; "Address" => listen_address_str); + return Err("Invalid Ip Address"); + } + } + // Custom p2p listen port + if let Some(port_str) = args.value_of("port") { + if let Ok(port) = port_str.parse::() { + config.net_conf.listen_port = Some(port); + } else { + error!(log, "Invalid port"; "port" => port_str); + return Err("Invalid port"); + } + } + + // filesystem args + + // Custom datadir + if let Some(dir) = args.value_of("datadir") { + config.data_dir = PathBuf::from(dir.to_string()); + }; + + Ok(config) + } +} diff --git a/beacon_node/src/config/mod.rs b/beacon_node/src/config/mod.rs deleted file mode 100644 index 5c94e300c..000000000 --- a/beacon_node/src/config/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::fs; -use std::path::PathBuf; - -/// Stores the core configuration for this Lighthouse instance. -/// This struct is general, other components may implement more -/// specialized config structs. -#[derive(Clone)] -pub struct LighthouseConfig { - pub data_dir: PathBuf, - pub p2p_listen_port: u16, -} - -const DEFAULT_LIGHTHOUSE_DIR: &str = ".lighthouse"; - -impl LighthouseConfig { - /// Build a new lighthouse configuration from defaults. - pub fn default() -> Self { - let data_dir = { - let home = dirs::home_dir().expect("Unable to determine home dir."); - home.join(DEFAULT_LIGHTHOUSE_DIR) - }; - fs::create_dir_all(&data_dir) - .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir)); - let p2p_listen_port = 0; - Self { - data_dir, - p2p_listen_port, - } - } -} diff --git a/beacon_node/src/error.rs b/beacon_node/src/error.rs new file mode 100644 index 000000000..163fe575d --- /dev/null +++ b/beacon_node/src/error.rs @@ -0,0 +1,8 @@ +// generates error types + +use error_chain::{ + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; + +error_chain! {} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index b9ef2c8a7..ed26a55fe 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,34 +1,23 @@ extern crate slog; mod config; +mod error; mod rpc; +mod run; -use std::path::PathBuf; - -use crate::config::LighthouseConfig; -use crate::rpc::start_server; -use beacon_chain::BeaconChain; -use bls::create_proof_of_possession; use clap::{App, Arg}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - MemoryDB, -}; -use fork_choice::BitwiseLMDGhost; -use slog::{error, info, o, Drain}; -use slot_clock::SystemTimeSlotClock; -use std::sync::Arc; -use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; +use config::Config; +use slog::{o, Drain}; fn main() { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::CompactFormat::new(decorator).build().fuse(); let drain = slog_async::Async::new(drain).build().fuse(); - let log = slog::Logger::root(drain, o!()); + let logger = slog::Logger::root(drain, o!()); let matches = App::new("Lighthouse") - .version("0.0.1") - .author("Sigma Prime ") + .version(version::version().as_str()) + .author("Sigma Prime ") .about("Eth 2.0 Client") .arg( Arg::with_name("datadir") @@ -37,6 +26,13 @@ fn main() { .help("Data directory for keys and databases.") .takes_value(true), ) + .arg( + Arg::with_name("listen_address") + .long("listen_address") + .value_name("Listen Address") + .help("The Network address to listen for p2p connections.") + .takes_value(true), + ) .arg( Arg::with_name("port") .long("port") @@ -46,89 +42,8 @@ fn main() { ) .get_matches(); - let mut config = LighthouseConfig::default(); + // invalid arguments, panic + let config = Config::parse_args(matches, &logger).unwrap(); - // Custom datadir - if let Some(dir) = matches.value_of("datadir") { - config.data_dir = PathBuf::from(dir.to_string()); - } - - // Custom p2p listen port - if let Some(port_str) = matches.value_of("port") { - if let Ok(port) = port_str.parse::() { - config.p2p_listen_port = port; - } else { - error!(log, "Invalid port"; "port" => port_str); - return; - } - } - - // Log configuration - info!(log, ""; - "data_dir" => &config.data_dir.to_str(), - "port" => &config.p2p_listen_port); - - // Specification (presently fixed to foundation). - let spec = ChainSpec::foundation(); - - // Database (presently in-memory) - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); - - // Slot clock - let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). - let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) - .expect("Unable to load SystemTimeSlotClock"); - // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - - /* - * Generate some random data to start a chain with. - * - * This is will need to be replace for production usage. - */ - let latest_eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }; - let keypairs: Vec = (0..10) - .collect::>() - .iter() - .map(|_| Keypair::random()) - .collect(); - let initial_validator_deposits = keypairs - .iter() - .map(|keypair| Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. - proof_of_possession: create_proof_of_possession(&keypair), - }, - }, - }) - .collect(); - - // Genesis chain - let _chain_result = BeaconChain::genesis( - state_store.clone(), - block_store.clone(), - slot_clock, - genesis_time, - latest_eth1_data, - initial_validator_deposits, - spec, - fork_choice, - ); - - let _server = start_server(log.clone()); - - loop { - std::thread::sleep(std::time::Duration::from_secs(1)); - } + run::run_beacon_node(config, &logger); } diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs new file mode 100644 index 000000000..18c4c3fe0 --- /dev/null +++ b/beacon_node/src/run.rs @@ -0,0 +1,54 @@ +use crate::config::Config; +use crate::error; +use crate::rpc::start_server; +use beacon_chain::BeaconChain; +use bls::create_proof_of_possession; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + ClientDB, DBType, DiskDB, MemoryDB, +}; +use fork_choice::{BitwiseLMDGhost, ForkChoiceAlgorithm}; +use futures::sync::oneshot; +use network::NetworkConfiguration; +use slog::{error, info}; +use slot_clock::SystemTimeSlotClock; +use std::cell::RefCell; +use std::sync::Arc; +use tokio::runtime::{Builder, Runtime, TaskExecutor}; +use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; + +pub fn run_beacon_node(config: Config, log: &slog::Logger) -> error::Result<()> { + let mut runtime = Builder::new() + .name_prefix("main-") + .build() + .map_err(|e| format!("{:?}", e))?; + + // Log configuration + info!(log, ""; + "data_dir" => &config.data_dir.to_str(), + "port" => &config.net_conf.listen_port); + + // run service until ctrl-c + let (ctrlc_send, ctrlc) = oneshot::channel(); + let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); + ctrlc::set_handler(move || { + if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { + ctrlc_send + .send(()) + .expect("Error sending termination message"); + } + }); + + let executor = runtime.executor(); + + start(config, log, executor); + + runtime.block_on(ctrlc); + + info!(log, "Shutting down."); + //TODO: handle shutdown of processes gracefully + + Ok(()) +} + +fn start(config: Config, log: &slog::Logger, executor: TaskExecutor) {} diff --git a/beacon_node/sync/Cargo.toml b/beacon_node/sync/Cargo.toml new file mode 100644 index 000000000..347506bf0 --- /dev/null +++ b/beacon_node/sync/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "sync" +version = "0.1.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] + diff --git a/beacon_node/sync/src/lib.rs b/beacon_node/sync/src/lib.rs new file mode 100644 index 000000000..f520e9e09 --- /dev/null +++ b/beacon_node/sync/src/lib.rs @@ -0,0 +1,68 @@ +// /// Syncing for lighthouse. + +/* +// for initial testing and setup, to be replaced. +pub fn sync_server(config: Config) { + // Set up database + let db = match config.db_type { + _ => Arc::new(MemoryDB::open()), + //TODO: Box db + //DBType::Memory => Arc::new(Box::new(MemoryDB::open())), + //DBType::RocksDB => Arc::new(Box::new(DiskDB::open(&config.db_name, None))), + }; + + // build block + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + + // Slot clock + let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). + let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) + .expect("Unable to load SystemTimeSlotClock"); + // Choose the fork choice + let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + + /* + * Generate some random data to start a chain with. + * + * This is will need to be replace for production usage. + */ +let latest_eth1_data = Eth1Data { +deposit_root: Hash256::zero(), +block_hash: Hash256::zero(), +}; +let keypairs: Vec = (0..10) +.collect::>() +.iter() +.map(|_| Keypair::random()) +.collect(); +let initial_validator_deposits = keypairs +.iter() +.map(|keypair| Deposit { +branch: vec![], // branch verification is not specified. +index: 0, // index verification is not specified. +deposit_data: DepositData { +amount: 32_000_000_000, // 32 ETH (in Gwei) +timestamp: genesis_time - 1, +deposit_input: DepositInput { +pubkey: keypair.pk.clone(), +withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. +proof_of_possession: create_proof_of_possession(&keypair), +}, +}, +}) +.collect(); + +// Genesis chain +let _chain_result = BeaconChain::genesis( +state_store.clone(), +block_store.clone(), +slot_clock, +genesis_time, +latest_eth1_data, +initial_validator_deposits, +spec, +fork_choice, +); +} +*/ diff --git a/beacon_node/version/Cargo.toml b/beacon_node/version/Cargo.toml new file mode 100644 index 000000000..0497408f1 --- /dev/null +++ b/beacon_node/version/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "version" +version = "0.1.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +target_info = "0.1.0" diff --git a/beacon_node/version/src/lib.rs b/beacon_node/version/src/lib.rs new file mode 100644 index 000000000..628186aa0 --- /dev/null +++ b/beacon_node/version/src/lib.rs @@ -0,0 +1,25 @@ +//TODO: Build the version and hash of the built lighthouse binary + +/// Version information for the Lighthouse beacon node. +// currently only supports unstable release +extern crate target_info; + +use target_info::Target; + +const TRACK: &'static str = "unstable"; + +/// Provides the current platform +pub fn platform() -> String { + format!("{}-{}", Target::arch(), Target::os()) +} + +/// Version of the beacon node. +// TODO: Find the sha3 hash, date and rust version used to build the beacon_node binary +pub fn version() -> String { + format!( + "Lighthouse/v{}-{}/{}", + env!("CARGO_PKG_VERSION"), + TRACK, + platform() + ) +} diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index 6062c19b1..318ac99a8 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -94,6 +94,7 @@ impl From for ForkChoiceError { } /// Fork choice options that are currently implemented. +#[derive(Debug, Clone)] pub enum ForkChoiceAlgorithm { /// Chooses the longest chain becomes the head. Not for production. LongestChain, From 2e020a3efaac9361fed1dede756731e081156c33 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 1 Mar 2019 12:45:01 +1100 Subject: [PATCH 012/154] Implement the basic structure of the beacon node. --- Cargo.toml | 2 + beacon_node/Cargo.toml | 23 +++------ beacon_node/client/Cargo.toml | 22 ++++++++ .../config.rs => client/src/client_config.rs} | 12 ++--- beacon_node/client/src/client_types.rs | 25 ++++++++++ beacon_node/{ => client}/src/error.rs | 0 beacon_node/client/src/lib.rs | 50 +++++++++++++++++++ beacon_node/client/src/notifier.rs | 35 +++++++++++++ beacon_node/rpc/Cargo.toml | 23 +++++++++ .../{src/rpc => rpc/src}/beacon_block.rs | 0 .../{src/rpc/mod.rs => rpc/src/lib.rs} | 0 beacon_node/{src/rpc => rpc/src}/validator.rs | 0 beacon_node/src/main.rs | 9 ++-- beacon_node/src/run.rs | 43 +++++++--------- 14 files changed, 188 insertions(+), 56 deletions(-) create mode 100644 beacon_node/client/Cargo.toml rename beacon_node/{src/config.rs => client/src/client_config.rs} (90%) create mode 100644 beacon_node/client/src/client_types.rs rename beacon_node/{ => client}/src/error.rs (100%) create mode 100644 beacon_node/client/src/lib.rs create mode 100755 beacon_node/client/src/notifier.rs create mode 100644 beacon_node/rpc/Cargo.toml rename beacon_node/{src/rpc => rpc/src}/beacon_block.rs (100%) rename beacon_node/{src/rpc/mod.rs => rpc/src/lib.rs} (100%) rename beacon_node/{src/rpc => rpc/src}/validator.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 1090a9b6f..6cd11c438 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,9 @@ members = [ "eth2/utils/test_random_derive", "beacon_node", "beacon_node/db", + "beacon_node/client", "beacon_node/network", + "beacon_node/rpc", "beacon_node/sync", "beacon_node/version", "beacon_node/beacon_chain", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index f909d5103..8b2641786 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -5,25 +5,14 @@ authors = ["Paul Hauner ", "Age Manning "] +edition = "2018" + +[dependencies] +beacon_chain = { path = "../beacon_chain" } +network = { path = "../network" } +sync = { path = "../sync" } +db = { path = "../db" } +fork_choice = { path = "../../eth2/fork_choice" } +types = { path = "../../eth2/types" } +slot_clock = { path = "../../eth2/utils/slot_clock" } + +error-chain = "0.12.0" +slog = "^2.2.3" +tokio = "0.1.15" +clap = "2.32.0" +dirs = "1.0.3" +exit-future = "0.1.3" +futures = "0.1.25" diff --git a/beacon_node/src/config.rs b/beacon_node/client/src/client_config.rs similarity index 90% rename from beacon_node/src/config.rs rename to beacon_node/client/src/client_config.rs index c7fa57909..c1580aa9f 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/client/src/client_config.rs @@ -8,11 +8,9 @@ use std::net::IpAddr; use std::path::PathBuf; use types::ChainSpec; -/// Stores the core configuration for this Lighthouse instance. -/// This struct is general, other components may implement more -/// specialized configuration structs. +/// Stores the client configuration for this Lighthouse instance. #[derive(Debug, Clone)] -pub struct Config { +pub struct ClientConfig { pub data_dir: PathBuf, pub spec: ChainSpec, pub net_conf: network::NetworkConfiguration, @@ -23,7 +21,7 @@ pub struct Config { //pub ipc_conf: } -impl Default for Config { +impl Default for ClientConfig { /// Build a new lighthouse configuration from defaults. fn default() -> Self { let data_dir = { @@ -47,10 +45,10 @@ impl Default for Config { } } -impl Config { +impl ClientConfig { /// Parses the CLI arguments into a `Config` struct. pub fn parse_args(args: ArgMatches, log: &slog::Logger) -> Result { - let mut config = Config::default(); + let mut config = ClientConfig::default(); // Network related args diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs new file mode 100644 index 000000000..38ae1c8c3 --- /dev/null +++ b/beacon_node/client/src/client_types.rs @@ -0,0 +1,25 @@ +use db::{ClientDB, DiskDB, MemoryDB}; +use fork_choice::{BitwiseLMDGhost, ForkChoice}; +use slot_clock::{SlotClock, SystemTimeSlotClock, TestingSlotClock}; + +pub trait ClientTypes { + type ForkChoice: ForkChoice; + type DB: ClientDB; + type SlotClock: SlotClock; +} + +pub struct StandardClientType {} + +impl ClientTypes for StandardClientType { + type DB = DiskDB; + type ForkChoice = BitwiseLMDGhost; + type SlotClock = SystemTimeSlotClock; +} + +pub struct TestingClientType {} + +impl ClientTypes for TestingClientType { + type DB = MemoryDB; + type SlotClock = TestingSlotClock; + type ForkChoice = BitwiseLMDGhost; +} diff --git a/beacon_node/src/error.rs b/beacon_node/client/src/error.rs similarity index 100% rename from beacon_node/src/error.rs rename to beacon_node/client/src/error.rs diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs new file mode 100644 index 000000000..3bfde0e9d --- /dev/null +++ b/beacon_node/client/src/lib.rs @@ -0,0 +1,50 @@ +extern crate slog; + +mod client_config; + +pub mod client_types; +pub mod error; +pub mod notifier; + +pub use client_config::ClientConfig; +pub use client_types::ClientTypes; + +//use beacon_chain::BeaconChain; +use exit_future::{Exit, Signal}; +use std::marker::PhantomData; +//use std::sync::Arc; +use tokio::runtime::TaskExecutor; + +//use network::NetworkService; + +pub struct Client { + config: ClientConfig, + // beacon_chain: Arc>, + // network: Option>, + exit: exit_future::Exit, + exit_signal: Option, + log: slog::Logger, + phantom: PhantomData, +} + +impl Client { + pub fn new( + config: ClientConfig, + log: slog::Logger, + executor: TaskExecutor, + ) -> error::Result { + let (exit_signal, exit) = exit_future::signal(); + + Ok(Client { + config, + exit, + exit_signal: Some(exit_signal), + log, + phantom: PhantomData, + }) + } + + pub fn logger(&self) -> slog::Logger { + self.log.clone() + } +} diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs new file mode 100755 index 000000000..3edf93bf6 --- /dev/null +++ b/beacon_node/client/src/notifier.rs @@ -0,0 +1,35 @@ +use crate::Client; +use crate::ClientTypes; +use db::ClientDB; +use exit_future::Exit; +use fork_choice::ForkChoice; +use futures::{Future, Stream}; +use slog::{debug, info}; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::runtime::TaskExecutor; +use tokio::timer::Interval; + +/// Thread that monitors the client and reports useful statistics to the user. + +pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) { + // notification heartbeat + let interval = Interval::new(Instant::now(), Duration::from_secs(5)); + + let log = client.logger(); + + // build heartbeat logic here + let heartbeat = move |_| { + info!(log, "Temp heartbeat output"); + Ok(()) + }; + + // map error and spawn + let log = client.logger(); + let heartbeat_interval = interval + .map_err(move |e| debug!(log, "Timer error {}", e)) + .for_each(heartbeat); + + executor.spawn(exit.until(heartbeat_interval).map(|_| ())); +} diff --git a/beacon_node/rpc/Cargo.toml b/beacon_node/rpc/Cargo.toml new file mode 100644 index 000000000..4c3333ee1 --- /dev/null +++ b/beacon_node/rpc/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "rpc" +version = "0.1.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +bls = { path = "../../eth2/utils/bls" } +beacon_chain = { path = "../beacon_chain" } + +protos = { path = "../../protos" } +grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } +protobuf = "2.0.2" +clap = "2.32.0" +db = { path = "../db" } +dirs = "1.0.3" +futures = "0.1.23" +slog = "^2.2.3" +slot_clock = { path = "../../eth2/utils/slot_clock" } +slog-term = "^2.4.0" +slog-async = "^2.3.0" +types = { path = "../../eth2/types" } +ssz = { path = "../../eth2/utils/ssz" } diff --git a/beacon_node/src/rpc/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs similarity index 100% rename from beacon_node/src/rpc/beacon_block.rs rename to beacon_node/rpc/src/beacon_block.rs diff --git a/beacon_node/src/rpc/mod.rs b/beacon_node/rpc/src/lib.rs similarity index 100% rename from beacon_node/src/rpc/mod.rs rename to beacon_node/rpc/src/lib.rs diff --git a/beacon_node/src/rpc/validator.rs b/beacon_node/rpc/src/validator.rs similarity index 100% rename from beacon_node/src/rpc/validator.rs rename to beacon_node/rpc/src/validator.rs diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index ed26a55fe..09cac99b4 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,12 +1,9 @@ extern crate slog; -mod config; -mod error; -mod rpc; mod run; use clap::{App, Arg}; -use config::Config; +use client::ClientConfig; use slog::{o, Drain}; fn main() { @@ -43,7 +40,7 @@ fn main() { .get_matches(); // invalid arguments, panic - let config = Config::parse_args(matches, &logger).unwrap(); + let config = ClientConfig::parse_args(matches, &logger).unwrap(); - run::run_beacon_node(config, &logger); + run::run_beacon_node(config, logger); } diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 18c4c3fe0..3207ce43d 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,23 +1,13 @@ -use crate::config::Config; -use crate::error; -use crate::rpc::start_server; -use beacon_chain::BeaconChain; -use bls::create_proof_of_possession; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, DBType, DiskDB, MemoryDB, -}; -use fork_choice::{BitwiseLMDGhost, ForkChoiceAlgorithm}; +use client::client_types::{StandardClientType, TestingClientType}; +use client::error; +use client::{notifier, Client, ClientConfig}; use futures::sync::oneshot; -use network::NetworkConfiguration; -use slog::{error, info}; -use slot_clock::SystemTimeSlotClock; +use futures::Future; +use slog::info; use std::cell::RefCell; -use std::sync::Arc; -use tokio::runtime::{Builder, Runtime, TaskExecutor}; -use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; +use tokio::runtime::Builder; -pub fn run_beacon_node(config: Config, log: &slog::Logger) -> error::Result<()> { +pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result<()> { let mut runtime = Builder::new() .name_prefix("main-") .build() @@ -33,22 +23,23 @@ pub fn run_beacon_node(config: Config, log: &slog::Logger) -> error::Result<()> let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); ctrlc::set_handler(move || { if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { - ctrlc_send - .send(()) - .expect("Error sending termination message"); + ctrlc_send.send(()).expect("Error sending ctrl-c message"); } }); + let (exit_signal, exit) = exit_future::signal(); + let executor = runtime.executor(); - start(config, log, executor); + // currently testing - using TestingNode type + let client: Client = Client::new(config, log.clone(), executor.clone())?; + notifier::run(&client, executor, exit); runtime.block_on(ctrlc); - info!(log, "Shutting down."); - //TODO: handle shutdown of processes gracefully - + info!(log, "Shutting down.."); + exit_signal.fire(); + drop(client); + runtime.shutdown_on_idle().wait().unwrap(); Ok(()) } - -fn start(config: Config, log: &slog::Logger, executor: TaskExecutor) {} From 3b8f29a9141e337738937c2efe23c496621ed90b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 4 Mar 2019 16:39:37 +1100 Subject: [PATCH 013/154] [Temp Commit] Implements more basic skeleton code. --- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/initialize.rs | 56 +++++++++++++++ beacon_node/client/src/client_config.rs | 13 ++-- beacon_node/client/src/lib.rs | 21 ++++-- beacon_node/client/src/notifier.rs | 2 +- beacon_node/libp2p/Cargo.toml | 7 ++ beacon_node/libp2p/src/lib.rs | 11 +++ beacon_node/libp2p/src/service.rs | 0 beacon_node/network/Cargo.toml | 4 +- beacon_node/network/src/error.rs | 8 +++ beacon_node/network/src/lib.rs | 6 +- beacon_node/network/src/message_handler.rs | 18 +++++ beacon_node/network/src/messages.rs | 27 +++++++ ...ork_configuration.rs => network_config.rs} | 15 ++-- beacon_node/network/src/service.rs | 0 beacon_node/src/run.rs | 1 + beacon_node/sync/Cargo.toml | 3 +- beacon_node/sync/src/lib.rs | 72 ++----------------- beacon_node/sync/src/simple_syncer.rs | 22 ++++++ 19 files changed, 195 insertions(+), 93 deletions(-) create mode 100644 beacon_node/beacon_chain/src/initialize.rs mode change 100755 => 100644 beacon_node/client/src/notifier.rs create mode 100644 beacon_node/libp2p/Cargo.toml create mode 100644 beacon_node/libp2p/src/lib.rs create mode 100644 beacon_node/libp2p/src/service.rs create mode 100644 beacon_node/network/src/error.rs create mode 100644 beacon_node/network/src/message_handler.rs create mode 100644 beacon_node/network/src/messages.rs rename beacon_node/network/src/{network_configuration.rs => network_config.rs} (75%) create mode 100644 beacon_node/network/src/service.rs create mode 100644 beacon_node/sync/src/simple_syncer.rs diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 4ce894477..b5471be5f 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "beacon_chain" version = "0.1.0" -authors = ["Paul Hauner "] +authors = ["Paul Hauner ", "Age Manning "] edition = "2018" [dependencies] diff --git a/beacon_node/beacon_chain/src/initialize.rs b/beacon_node/beacon_chain/src/initialize.rs new file mode 100644 index 000000000..14d0f81a6 --- /dev/null +++ b/beacon_node/beacon_chain/src/initialize.rs @@ -0,0 +1,56 @@ +// Initialisation functions to generate a new BeaconChain. + +pub fn initialise_test_chain( + config: &ClientConfig, +) -> Arc> { + let spec = config.spec; + // Slot clock + let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). + let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) + .expect("Unable to load SystemTimeSlotClock"); + // Choose the fork choice + let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + + /* + * Generate some random data to start a chain with. + * + * This is will need to be replace for production usage. + */ + let latest_eth1_data = Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }; + let keypairs: Vec = (0..10) + .collect::>() + .iter() + .map(|_| Keypair::random()) + .collect(); + let initial_validator_deposits = keypairs + .iter() + .map(|keypair| Deposit { + branch: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. + proof_of_possession: create_proof_of_possession(&keypair), + }, + }, + }) + .collect(); + + // Genesis chain + Arc::new(BeaconChain::genesis( + state_store.clone(), + block_store.clone(), + slot_clock, + genesis_time, + latest_eth1_data, + initial_validator_deposits, + spec, + fork_choice, + )); +} diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index c1580aa9f..8943e783d 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -1,7 +1,7 @@ use clap::ArgMatches; use db::DBType; use fork_choice::ForkChoiceAlgorithm; -use network::NetworkConfiguration; +use network::NetworkConfig; use slog::error; use std::fs; use std::net::IpAddr; @@ -13,7 +13,7 @@ use types::ChainSpec; pub struct ClientConfig { pub data_dir: PathBuf, pub spec: ChainSpec, - pub net_conf: network::NetworkConfiguration, + pub net_conf: network::NetworkConfig, pub fork_choice: ForkChoiceAlgorithm, pub db_type: DBType, pub db_name: PathBuf, @@ -34,7 +34,7 @@ impl Default for ClientConfig { data_dir: data_dir.clone(), // default to foundation for chain specs spec: ChainSpec::foundation(), - net_conf: NetworkConfiguration::default(), + net_conf: NetworkConfig::default(), // default to bitwise LMD Ghost fork_choice: ForkChoiceAlgorithm::BitwiseLMDGhost, // default to memory db for now @@ -53,12 +53,13 @@ impl ClientConfig { // Network related args // Custom listening address ipv4/ipv6 + // TODO: Handle list of addresses if let Some(listen_address_str) = args.value_of("listen_address") { if let Ok(listen_address) = listen_address_str.parse::() { - config.net_conf.listen_address = Some(listen_address); + config.net_conf.listen_address = Some(Vec::new(listen_address)); } else { - error!(log, "Invalid Ip Address"; "Address" => listen_address_str); - return Err("Invalid Ip Address"); + error!(log, "Invalid IP Address"; "Address" => listen_address_str); + return Err("Invalid IP Address"); } } // Custom p2p listen port diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 3bfde0e9d..5ea6ba4a6 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -13,14 +13,15 @@ pub use client_types::ClientTypes; use exit_future::{Exit, Signal}; use std::marker::PhantomData; //use std::sync::Arc; +use network::NetworkService; use tokio::runtime::TaskExecutor; -//use network::NetworkService; - +/// Main beacon node client service. This provides the connection and initialisation of the clients +/// sub-services in multiple threads. pub struct Client { config: ClientConfig, // beacon_chain: Arc>, - // network: Option>, + network: Option>, exit: exit_future::Exit, exit_signal: Option, log: slog::Logger, @@ -28,6 +29,7 @@ pub struct Client { } impl Client { + /// Generate an instance of the client. Spawn and link all internal subprocesses. pub fn new( config: ClientConfig, log: slog::Logger, @@ -35,16 +37,21 @@ impl Client { ) -> error::Result { let (exit_signal, exit) = exit_future::signal(); + // TODO: generate a beacon_chain service. + + // start the network service, libp2p and syncing threads + // TODO: Add beacon_chain reference to network parameters + let network_config = config.net_config; + let network_logger = client.log.new(o!("Service" => "Network")); + let (network, network_send) = NetworkService::new(network_config, network_logger); + Ok(Client { config, exit, exit_signal: Some(exit_signal), log, + network: Some(network), phantom: PhantomData, }) } - - pub fn logger(&self) -> slog::Logger { - self.log.clone() - } } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs old mode 100755 new mode 100644 index 3edf93bf6..68d93e735 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -17,7 +17,7 @@ pub fn run(client: &Client, executor: TaskExecutor, exit: Exi // notification heartbeat let interval = Interval::new(Instant::now(), Duration::from_secs(5)); - let log = client.logger(); + let log = client.log.new(o!("Service" => "Notifier")); // build heartbeat logic here let heartbeat = move |_| { diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml new file mode 100644 index 000000000..f35ae4d43 --- /dev/null +++ b/beacon_node/libp2p/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "libp2p" +version = "0.1.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs new file mode 100644 index 000000000..e20eb055f --- /dev/null +++ b/beacon_node/libp2p/src/lib.rs @@ -0,0 +1,11 @@ +/// This crate contains the main link for lighthouse to rust-libp2p. It therefore re-exports +/// all required libp2p functionality. +/// +/// This crate builds and manages the libp2p services required by the beacon node. +extern crate libp2p; + +mod libp2p_service; + +pub use libp2p::{GossipsubConfig, PeerId}; + +pub use libp2p_service::LibP2PService; diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs new file mode 100644 index 000000000..e69de29bb diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 57f75e273..31fc9eab6 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,6 +5,6 @@ authors = ["Age Manning "] edition = "2018" [dependencies] -# SigP repository until PR is merged -libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } +libp2p = { git = "../libp2p" } version = { path = "../version" } +types = { path = "../../eth2/types" } diff --git a/beacon_node/network/src/error.rs b/beacon_node/network/src/error.rs new file mode 100644 index 000000000..163fe575d --- /dev/null +++ b/beacon_node/network/src/error.rs @@ -0,0 +1,8 @@ +// generates error types + +use error_chain::{ + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; + +error_chain! {} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 1dc56ec4f..3bc555dd6 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,4 +1,6 @@ /// This crate provides the network server for Lighthouse. -mod network_configuration; +mod network_config; +mod service; -pub use network_configuration::NetworkConfiguration; +pub use network_config::NetworkConfig; +pub use service::NetworkService; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs new file mode 100644 index 000000000..66a7b5815 --- /dev/null +++ b/beacon_node/network/src/message_handler.rs @@ -0,0 +1,18 @@ +use crate::node_message::NodeMessage; + +/// Handles messages received from the network and client and organises syncing. +pub struct MessageHandler { + sync: Syncer, + //TODO: Implement beacon chain + //chain: BeaconChain +} + +/// Types of messages the handler can receive. +pub enum HandlerMessage { + /// Peer has connected. + PeerConnected(PeerId), + /// Peer has disconnected, + PeerDisconnected(PeerId), + /// A Node message has been received. + Message(Peer, NodeMessage), +} diff --git a/beacon_node/network/src/messages.rs b/beacon_node/network/src/messages.rs new file mode 100644 index 000000000..5f9a666e0 --- /dev/null +++ b/beacon_node/network/src/messages.rs @@ -0,0 +1,27 @@ +use types::{H256,Slot} + +/// Messages between nodes across the network. +pub enum NodeMessage { + + Status(Status), + BlockRequest, +} + +pub struct Status { + /// Current node version. + version: u8 + /// Genesis Hash. + genesis_hash: H256 + /// Best known slot number. + best_slot: Slot + /// Best known slot hash. + best_slot_hash: H256 +} + +/// Types of messages that the network service can receive. +pub enum NetworkMessage { + /// Send a message to libp2p service. + //TODO: Define typing for messages accross the wire + Send(Node, Message), +} + diff --git a/beacon_node/network/src/network_configuration.rs b/beacon_node/network/src/network_config.rs similarity index 75% rename from beacon_node/network/src/network_configuration.rs rename to beacon_node/network/src/network_config.rs index 64d763287..98347e122 100644 --- a/beacon_node/network/src/network_configuration.rs +++ b/beacon_node/network/src/network_config.rs @@ -4,10 +4,10 @@ use version; #[derive(Debug, Clone)] /// Network configuration for lighthouse. -pub struct NetworkConfiguration { +pub struct NetworkConfig { //TODO: stubbing networking initial params, change in the future /// IP address to listen on. - pub listen_address: Option, + pub listen_addresses: Option>, /// Listen port UDP/TCP. pub listen_port: Option, /// Gossipsub configuration parameters. @@ -16,14 +16,13 @@ pub struct NetworkConfiguration { pub boot_nodes: Vec, /// Client version pub client_version: String, - //TODO: more to be added } -impl Default for NetworkConfiguration { +impl Default for NetworkConfig { /// Generate a default network configuration. fn default() -> Self { - NetworkConfiguration { - listen_address: None, + NetworkConfig { + listen_addresses: None, listen_port: None, gs_config: GossipsubConfigBuilder::new().build(), boot_nodes: Vec::new(), @@ -32,8 +31,8 @@ impl Default for NetworkConfiguration { } } -impl NetworkConfiguration { +impl NetworkConfig { pub fn new() -> Self { - NetworkConfiguration::default() + NetworkConfig::default() } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs new file mode 100644 index 000000000..e69de29bb diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 3207ce43d..f2a703cbc 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -37,6 +37,7 @@ pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result runtime.block_on(ctrlc); + // perform global shutdown operations. info!(log, "Shutting down.."); exit_signal.fire(); drop(client); diff --git a/beacon_node/sync/Cargo.toml b/beacon_node/sync/Cargo.toml index 347506bf0..4997cd094 100644 --- a/beacon_node/sync/Cargo.toml +++ b/beacon_node/sync/Cargo.toml @@ -5,4 +5,5 @@ authors = ["Age Manning "] edition = "2018" [dependencies] - +types = { path = "../../eth2/types" } +libp2p = { git = "../libp2p/" } diff --git a/beacon_node/sync/src/lib.rs b/beacon_node/sync/src/lib.rs index f520e9e09..a901344f5 100644 --- a/beacon_node/sync/src/lib.rs +++ b/beacon_node/sync/src/lib.rs @@ -1,68 +1,10 @@ -// /// Syncing for lighthouse. +/// Syncing for lighthouse. +/// +/// Stores the various syncing methods for the beacon chain. +mod simple_sync; -/* -// for initial testing and setup, to be replaced. -pub fn sync_server(config: Config) { - // Set up database - let db = match config.db_type { - _ => Arc::new(MemoryDB::open()), - //TODO: Box db - //DBType::Memory => Arc::new(Box::new(MemoryDB::open())), - //DBType::RocksDB => Arc::new(Box::new(DiskDB::open(&config.db_name, None))), - }; +pub use crate::SimpleSync; - // build block - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); - - // Slot clock - let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). - let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) - .expect("Unable to load SystemTimeSlotClock"); - // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - - /* - * Generate some random data to start a chain with. - * - * This is will need to be replace for production usage. - */ -let latest_eth1_data = Eth1Data { -deposit_root: Hash256::zero(), -block_hash: Hash256::zero(), -}; -let keypairs: Vec = (0..10) -.collect::>() -.iter() -.map(|_| Keypair::random()) -.collect(); -let initial_validator_deposits = keypairs -.iter() -.map(|keypair| Deposit { -branch: vec![], // branch verification is not specified. -index: 0, // index verification is not specified. -deposit_data: DepositData { -amount: 32_000_000_000, // 32 ETH (in Gwei) -timestamp: genesis_time - 1, -deposit_input: DepositInput { -pubkey: keypair.pk.clone(), -withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. -proof_of_possession: create_proof_of_possession(&keypair), -}, -}, -}) -.collect(); - -// Genesis chain -let _chain_result = BeaconChain::genesis( -state_store.clone(), -block_store.clone(), -slot_clock, -genesis_time, -latest_eth1_data, -initial_validator_deposits, -spec, -fork_choice, -); +pub enum SyncMethod { + SimpleSync, } -*/ diff --git a/beacon_node/sync/src/simple_syncer.rs b/beacon_node/sync/src/simple_syncer.rs new file mode 100644 index 000000000..f1d0a5219 --- /dev/null +++ b/beacon_node/sync/src/simple_syncer.rs @@ -0,0 +1,22 @@ +use std::collections::HashMap; +use types::{Slot, H256}; + +/// Keeps track of syncing information for known connected peers. +pub struct PeerSyncInfo { + best_slot: Slot, + best_slot_hash: H256, +} + +/// The current syncing state. +pub enum SyncState { + Idle, + Downloading, + Stopped, +} + +/// Simple Syncing protocol. +pub struct SimpleSync { + genesis_hash: H256, + known_peers: HashMap, + state: SyncState, +} From b68adc1ae3edfb537e49e866dd9a31fc4f329e97 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 4 Mar 2019 18:31:01 +1100 Subject: [PATCH 014/154] Implement skeleton network/sync framework. --- beacon_node/client/src/client_config.rs | 2 +- beacon_node/client/src/error.rs | 8 +++- beacon_node/client/src/lib.rs | 13 +++--- beacon_node/client/src/notifier.rs | 4 +- beacon_node/libp2p/Cargo.toml | 3 ++ beacon_node/libp2p/src/lib.rs | 11 ++--- beacon_node/libp2p/src/service.rs | 11 +++++ beacon_node/network/Cargo.toml | 7 +++- beacon_node/network/src/lib.rs | 5 ++- beacon_node/network/src/message_handler.rs | 32 +++++++++++++-- beacon_node/network/src/messages.rs | 25 ++++++----- beacon_node/network/src/network_config.rs | 2 +- beacon_node/network/src/service.rs | 41 +++++++++++++++++++ beacon_node/sync/Cargo.toml | 2 +- beacon_node/sync/src/lib.rs | 3 +- .../src/{simple_syncer.rs => simple_sync.rs} | 17 ++++++-- 16 files changed, 147 insertions(+), 39 deletions(-) rename beacon_node/sync/src/{simple_syncer.rs => simple_sync.rs} (54%) diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 8943e783d..ef7443839 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -56,7 +56,7 @@ impl ClientConfig { // TODO: Handle list of addresses if let Some(listen_address_str) = args.value_of("listen_address") { if let Ok(listen_address) = listen_address_str.parse::() { - config.net_conf.listen_address = Some(Vec::new(listen_address)); + config.net_conf.listen_addresses = Some(vec![listen_address]); } else { error!(log, "Invalid IP Address"; "Address" => listen_address_str); return Err("Invalid IP Address"); diff --git a/beacon_node/client/src/error.rs b/beacon_node/client/src/error.rs index 163fe575d..618813826 100644 --- a/beacon_node/client/src/error.rs +++ b/beacon_node/client/src/error.rs @@ -1,8 +1,14 @@ // generates error types +use network; use error_chain::{ error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, impl_extract_backtrace, }; -error_chain! {} +error_chain! { + links { + Network(network::error::Error, network::error::ErrorKind); + } + +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 5ea6ba4a6..d0b096416 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -11,9 +11,10 @@ pub use client_types::ClientTypes; //use beacon_chain::BeaconChain; use exit_future::{Exit, Signal}; +use network::Service as NetworkService; +use slog::o; use std::marker::PhantomData; -//use std::sync::Arc; -use network::NetworkService; +use std::sync::Arc; use tokio::runtime::TaskExecutor; /// Main beacon node client service. This provides the connection and initialisation of the clients @@ -39,11 +40,11 @@ impl Client { // TODO: generate a beacon_chain service. - // start the network service, libp2p and syncing threads + // Start the network service, libp2p and syncing threads // TODO: Add beacon_chain reference to network parameters - let network_config = config.net_config; - let network_logger = client.log.new(o!("Service" => "Network")); - let (network, network_send) = NetworkService::new(network_config, network_logger); + let network_config = config.net_conf.clone(); + let network_logger = log.new(o!("Service" => "Network")); + let (network, network_send) = NetworkService::new(network_config, network_logger)?; Ok(Client { config, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 68d93e735..dd38701c9 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -4,7 +4,7 @@ use db::ClientDB; use exit_future::Exit; use fork_choice::ForkChoice; use futures::{Future, Stream}; -use slog::{debug, info}; +use slog::{debug, info, o}; use slot_clock::SlotClock; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -26,7 +26,7 @@ pub fn run(client: &Client, executor: TaskExecutor, exit: Exi }; // map error and spawn - let log = client.logger(); + let log = client.log.clone(); let heartbeat_interval = interval .map_err(move |e| debug!(log, "Timer error {}", e)) .for_each(heartbeat); diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index f35ae4d43..69f76369f 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -5,3 +5,6 @@ authors = ["Age Manning "] edition = "2018" [dependencies] +# SigP repository until PR is merged +libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } +slog = "2.4.1" diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index e20eb055f..6f07b760f 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -2,10 +2,11 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. -extern crate libp2p; +mod service; -mod libp2p_service; +pub use libp2p::{ + gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, + PeerId, +}; -pub use libp2p::{GossipsubConfig, PeerId}; - -pub use libp2p_service::LibP2PService; +pub use service::Service; diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index e69de29bb..118f0d528 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -0,0 +1,11 @@ +use slog::debug; + +/// The configuration and state of the libp2p components for the beacon node. +pub struct Service {} + +impl Service { + pub fn new(log: slog::Logger) -> Self { + debug!(log, "Service starting"); + Service {} + } +} diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 31fc9eab6..f32ee1f90 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,6 +5,11 @@ authors = ["Age Manning "] edition = "2018" [dependencies] -libp2p = { git = "../libp2p" } +libp2p = { path = "../libp2p" } version = { path = "../version" } types = { path = "../../eth2/types" } +sync = { path = "../sync" } +slog = "2.4.1" +futures = "0.1.25" +error-chain = "0.12.0" +crossbeam-channel = "0.3.8" diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 3bc555dd6..8ffb90b93 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,6 +1,9 @@ /// This crate provides the network server for Lighthouse. +pub mod error; +mod message_handler; +mod messages; mod network_config; mod service; pub use network_config::NetworkConfig; -pub use service::NetworkService; +pub use service::Service; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 66a7b5815..87935e899 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,8 +1,14 @@ -use crate::node_message::NodeMessage; +use crate::error; +use crate::messages::NodeMessage; +use crossbeam_channel::{unbounded as channel, Sender}; +use libp2p::PeerId; +use slog::debug; +use sync::SimpleSync; +use types::Hash256; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { - sync: Syncer, + sync: SimpleSync, //TODO: Implement beacon chain //chain: BeaconChain } @@ -14,5 +20,25 @@ pub enum HandlerMessage { /// Peer has disconnected, PeerDisconnected(PeerId), /// A Node message has been received. - Message(Peer, NodeMessage), + Message(PeerId, NodeMessage), +} + +impl MessageHandler { + /// Initializes and runs the MessageHandler. + pub fn new(log: slog::Logger) -> error::Result> { + debug!(log, "Service starting"); + + let (handler_send, handler_recv) = channel(); + + // Initialise sync and begin processing in thread + //TODO: Load genesis from BeaconChain + let temp_genesis = Hash256::zero(); + let sync = SimpleSync::new(temp_genesis); + + let handler = MessageHandler { sync }; + + // spawn handler thread + + Ok(handler_send) + } } diff --git a/beacon_node/network/src/messages.rs b/beacon_node/network/src/messages.rs index 5f9a666e0..05b899269 100644 --- a/beacon_node/network/src/messages.rs +++ b/beacon_node/network/src/messages.rs @@ -1,27 +1,26 @@ -use types::{H256,Slot} +use libp2p::PeerId; +use types::{Hash256, Slot}; /// Messages between nodes across the network. pub enum NodeMessage { - Status(Status), BlockRequest, } pub struct Status { - /// Current node version. - version: u8 - /// Genesis Hash. - genesis_hash: H256 - /// Best known slot number. - best_slot: Slot - /// Best known slot hash. - best_slot_hash: H256 + /// Current node version. + version: u8, + /// Genesis Hash. + genesis_hash: Hash256, + /// Best known slot number. + best_slot: Slot, + /// Best known slot hash. + best_slot_hash: Hash256, } /// Types of messages that the network service can receive. pub enum NetworkMessage { /// Send a message to libp2p service. - //TODO: Define typing for messages accross the wire - Send(Node, Message), + //TODO: Define typing for messages across the wire + Send(PeerId, NodeMessage), } - diff --git a/beacon_node/network/src/network_config.rs b/beacon_node/network/src/network_config.rs index 98347e122..5c513fcc6 100644 --- a/beacon_node/network/src/network_config.rs +++ b/beacon_node/network/src/network_config.rs @@ -1,4 +1,4 @@ -use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; +use libp2p::{GossipsubConfig, GossipsubConfigBuilder}; use std::net::IpAddr; use version; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e69de29bb..9170628ac 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -0,0 +1,41 @@ +use crate::error; +use crate::message_handler::{HandlerMessage, MessageHandler}; +use crate::messages::{NetworkMessage, NodeMessage}; +use crate::NetworkConfig; +use crossbeam_channel::{unbounded as channel, Sender}; +use futures::sync::oneshot; +use libp2p::Service as LibP2PService; +use slog::{debug, info, o, trace, warn, Logger}; +use std::sync::{Arc, Mutex}; + +/// Service that handles communication between internal services and the libp2p network service. +pub struct Service { + //libp2p_service: Arc>, +//libp2p_thread: oneshot::Sender<()>, +//message_handler: MessageHandler, +//message_handler_send: Sender, +} + +impl Service { + pub fn new( + config: NetworkConfig, + log: slog::Logger, + ) -> error::Result<(Arc, Sender)> { + debug!(log, "Service starting"); + let (network_send, network_recv) = channel::(); + + // launch message handler thread + let message_handler_log = log.new(o!("Service" => "MessageHandler")); + let message_handler_send = MessageHandler::new(message_handler_log); + + // launch libp2p service + let libp2p_log = log.new(o!("Service" => "Libp2p")); + let libp2p_service = LibP2PService::new(libp2p_log); + + // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. + + let network = Service {}; + + Ok((Arc::new(network), network_send)) + } +} diff --git a/beacon_node/sync/Cargo.toml b/beacon_node/sync/Cargo.toml index 4997cd094..a4ebe3eed 100644 --- a/beacon_node/sync/Cargo.toml +++ b/beacon_node/sync/Cargo.toml @@ -6,4 +6,4 @@ edition = "2018" [dependencies] types = { path = "../../eth2/types" } -libp2p = { git = "../libp2p/" } +libp2p = { path = "../libp2p" } diff --git a/beacon_node/sync/src/lib.rs b/beacon_node/sync/src/lib.rs index a901344f5..8f5216b85 100644 --- a/beacon_node/sync/src/lib.rs +++ b/beacon_node/sync/src/lib.rs @@ -3,8 +3,9 @@ /// Stores the various syncing methods for the beacon chain. mod simple_sync; -pub use crate::SimpleSync; +pub use simple_sync::SimpleSync; +/// Currently implemented sync methods. pub enum SyncMethod { SimpleSync, } diff --git a/beacon_node/sync/src/simple_syncer.rs b/beacon_node/sync/src/simple_sync.rs similarity index 54% rename from beacon_node/sync/src/simple_syncer.rs rename to beacon_node/sync/src/simple_sync.rs index f1d0a5219..01a6a1adf 100644 --- a/beacon_node/sync/src/simple_syncer.rs +++ b/beacon_node/sync/src/simple_sync.rs @@ -1,10 +1,11 @@ +use libp2p::PeerId; use std::collections::HashMap; -use types::{Slot, H256}; +use types::{Hash256, Slot}; /// Keeps track of syncing information for known connected peers. pub struct PeerSyncInfo { best_slot: Slot, - best_slot_hash: H256, + best_slot_hash: Hash256, } /// The current syncing state. @@ -16,7 +17,17 @@ pub enum SyncState { /// Simple Syncing protocol. pub struct SimpleSync { - genesis_hash: H256, + genesis_hash: Hash256, known_peers: HashMap, state: SyncState, } + +impl SimpleSync { + pub fn new(genesis_hash: Hash256) -> Self { + SimpleSync { + genesis_hash, + known_peers: HashMap::new(), + state: SyncState::Idle, + } + } +} From ac639c64274188eb9e2065af33d423cd69b47908 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 6 Mar 2019 23:31:08 +1100 Subject: [PATCH 015/154] Add libp2p transport - tcp/ws/secio and multiplexing. --- beacon_node/client/src/client_config.rs | 4 +- beacon_node/libp2p/Cargo.toml | 1 + beacon_node/libp2p/src/lib.rs | 3 +- beacon_node/libp2p/src/network_config.rs | 58 +++++++++++++++++++++++ beacon_node/libp2p/src/service.rs | 47 ++++++++++++++++-- beacon_node/network/src/lib.rs | 3 +- beacon_node/network/src/network_config.rs | 38 --------------- beacon_node/network/src/service.rs | 3 +- 8 files changed, 108 insertions(+), 49 deletions(-) create mode 100644 beacon_node/libp2p/src/network_config.rs delete mode 100644 beacon_node/network/src/network_config.rs diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index ef7443839..3f5fbab2f 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -56,7 +56,7 @@ impl ClientConfig { // TODO: Handle list of addresses if let Some(listen_address_str) = args.value_of("listen_address") { if let Ok(listen_address) = listen_address_str.parse::() { - config.net_conf.listen_addresses = Some(vec![listen_address]); + config.net_conf.listen_addresses = vec![listen_address]; } else { error!(log, "Invalid IP Address"; "Address" => listen_address_str); return Err("Invalid IP Address"); @@ -65,7 +65,7 @@ impl ClientConfig { // Custom p2p listen port if let Some(port_str) = args.value_of("port") { if let Ok(port) = port_str.parse::() { - config.net_conf.listen_port = Some(port); + config.net_conf.listen_port = port; } else { error!(log, "Invalid port"; "port" => port_str); return Err("Invalid port"); diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index 69f76369f..b32eed1a6 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -8,3 +8,4 @@ edition = "2018" # SigP repository until PR is merged libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } slog = "2.4.1" +version = { path = "../version" } diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index 6f07b760f..7b4514337 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -2,11 +2,12 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. +mod network_config; mod service; pub use libp2p::{ gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, PeerId, }; - +pub use network_config::NetworkConfig; pub use service::Service; diff --git a/beacon_node/libp2p/src/network_config.rs b/beacon_node/libp2p/src/network_config.rs new file mode 100644 index 000000000..7cb1cf6bb --- /dev/null +++ b/beacon_node/libp2p/src/network_config.rs @@ -0,0 +1,58 @@ +use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; +use libp2p::secio; +use std::fmt; +use std::net::IpAddr; + +#[derive(Clone)] +/// Network configuration for lighthouse. +pub struct NetworkConfig { + //TODO: stubbing networking initial params, change in the future + /// IP address to listen on. + pub listen_addresses: Vec, + /// Listen port UDP/TCP. + pub listen_port: u16, + /// Gossipsub configuration parameters. + pub gs_config: GossipsubConfig, + /// List of nodes to initially connect to. + pub boot_nodes: Vec, + /// Peer key related to this nodes PeerId. + pub local_private_key: secio::SecioKeyPair, + /// Client version + pub client_version: String, +} + +impl Default for NetworkConfig { + /// Generate a default network configuration. + fn default() -> Self { + // hard-coded defaults + let bootnodes = ["127.0.0.1"]; + let default_port = 9000; + + // TODO: Currently using ed25519 key pairs. Wire protocol specifies RSA. Waiting for this + // PR to be merged to generate RSA keys: https://github.com/briansmith/ring/pull/733 + + NetworkConfig { + listen_addresses: vec!["127.0.0.1".parse().expect("correct IP address")], + listen_port: default_port, + gs_config: GossipsubConfigBuilder::new().build(), + boot_nodes: bootnodes + .iter() + .map(|s| s.parse().expect("Bootnodes must be IP addresses")) + .collect(), + local_private_key: secio::SecioKeyPair::ed25519_generated().unwrap(), + client_version: version::version(), + } + } +} + +impl NetworkConfig { + pub fn new() -> Self { + NetworkConfig::default() + } +} + +impl fmt::Debug for NetworkConfig { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "NetworkConfig: listen_addresses: {:?}, listen_port: {:?}, gs_config: {:?}, boot_nodes: {:?}, local_private_key: , client_version: {:?}", self.listen_addresses, self.listen_port, self.gs_config, self.boot_nodes, self.local_private_key.to_public_key(), self.client_version) + } +} diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 118f0d528..53c566187 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -1,11 +1,50 @@ +use crate::NetworkConfig; +use libp2p::gossipsub::GossipsubEvent; +use libp2p::PeerId; +use libp2p::{build_tcp_ws_secio_mplex_yamux, core, secio, Transport}; use slog::debug; +use std::error; /// The configuration and state of the libp2p components for the beacon node. -pub struct Service {} +pub struct Service { + /// This node's PeerId. + peer_id: PeerId, +} impl Service { - pub fn new(log: slog::Logger) -> Self { - debug!(log, "Service starting"); - Service {} + pub fn new(config: NetworkConfig, log: slog::Logger) -> Self { + debug!(log, "Libp2p Service starting"); + + let local_private_key = config.local_private_key; + let peer_id = local_private_key.to_peer_id(); + debug!(log, "Local peer id: {:?}", peer_id); + + // Set up the transport + let transport = build_transport(local_private_key); + + //let transport = libp2p:: + + Service { peer_id } } } + +/// The implementation supports TCP/IP, WebSockets over TCP/IP, secio as the encryption layer, and +/// mplex or yamux as the multiplexing layer. +fn build_transport( + local_private_key: secio::SecioKeyPair, +) -> impl Transport< + Output = ( + PeerId, + impl core::muxing::StreamMuxer + + Send + + Sync, + ), + Error = impl error::Error + Send, + Listener = impl Send, + Dial = impl Send, + ListenerUpgrade = impl Send, +> + Clone { + // TODO: The Wire protocol currently doesn't specify encryption and this will need to be customised + // in the future. + build_tcp_ws_secio_mplex_yamux(local_private_key) +} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 8ffb90b93..ae03d8367 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -2,8 +2,7 @@ pub mod error; mod message_handler; mod messages; -mod network_config; mod service; -pub use network_config::NetworkConfig; +pub use libp2p::NetworkConfig; pub use service::Service; diff --git a/beacon_node/network/src/network_config.rs b/beacon_node/network/src/network_config.rs deleted file mode 100644 index 5c513fcc6..000000000 --- a/beacon_node/network/src/network_config.rs +++ /dev/null @@ -1,38 +0,0 @@ -use libp2p::{GossipsubConfig, GossipsubConfigBuilder}; -use std::net::IpAddr; -use version; - -#[derive(Debug, Clone)] -/// Network configuration for lighthouse. -pub struct NetworkConfig { - //TODO: stubbing networking initial params, change in the future - /// IP address to listen on. - pub listen_addresses: Option>, - /// Listen port UDP/TCP. - pub listen_port: Option, - /// Gossipsub configuration parameters. - pub gs_config: GossipsubConfig, - /// List of nodes to initially connect to. - pub boot_nodes: Vec, - /// Client version - pub client_version: String, -} - -impl Default for NetworkConfig { - /// Generate a default network configuration. - fn default() -> Self { - NetworkConfig { - listen_addresses: None, - listen_port: None, - gs_config: GossipsubConfigBuilder::new().build(), - boot_nodes: Vec::new(), - client_version: version::version(), - } - } -} - -impl NetworkConfig { - pub fn new() -> Self { - NetworkConfig::default() - } -} diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 9170628ac..ac8d9b442 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -30,10 +30,9 @@ impl Service { // launch libp2p service let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = LibP2PService::new(libp2p_log); + let libp2p_service = LibP2PService::new(config, libp2p_log); // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. - let network = Service {}; Ok((Arc::new(network), network_send)) From e8e4c4ab9baaa1334c662bc9ef2a5a3b04e65095 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 7 Mar 2019 11:43:55 +1100 Subject: [PATCH 016/154] Adds basic structure for swarm behaviour and topology. --- beacon_node/libp2p/Cargo.toml | 1 + beacon_node/libp2p/src/service.rs | 33 ++++++++++++++++++++++++------- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index b32eed1a6..fff7dc82d 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -9,3 +9,4 @@ edition = "2018" libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } slog = "2.4.1" version = { path = "../version" } +tokio = "0.1.16" diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 53c566187..528d24ce8 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -1,14 +1,17 @@ use crate::NetworkConfig; -use libp2p::gossipsub::GossipsubEvent; -use libp2p::PeerId; +use libp2p::core::{muxing::StreamMuxer, nodes::Substream}; +use libp2p::gossipsub::{Gossipsub, GossipsubConfig, GossipsubEvent}; use libp2p::{build_tcp_ws_secio_mplex_yamux, core, secio, Transport}; +use libp2p::{core::swarm::NetworkBehaviour, PeerId, Swarm}; use slog::debug; use std::error; /// The configuration and state of the libp2p components for the beacon node. pub struct Service { + /// The libp2p Swarm handler. + swarm: String, /// This node's PeerId. - peer_id: PeerId, + local_peer_id: PeerId, } impl Service { @@ -16,15 +19,22 @@ impl Service { debug!(log, "Libp2p Service starting"); let local_private_key = config.local_private_key; - let peer_id = local_private_key.to_peer_id(); - debug!(log, "Local peer id: {:?}", peer_id); + let local_peer_id = local_private_key.to_peer_id(); + debug!(log, "Local peer id: {:?}", local_peer_id); // Set up the transport let transport = build_transport(local_private_key); + // Set up gossipsub routing + let behaviour = build_behaviour(local_peer_id, config.gs_config); + // Set up Topology + let topology = local_peer_id; - //let transport = libp2p:: + let swarm = Swarm::new(transport, behaviour, topology); - Service { peer_id } + Service { + local_peer_id, + swarm, + } } } @@ -48,3 +58,12 @@ fn build_transport( // in the future. build_tcp_ws_secio_mplex_yamux(local_private_key) } + +/// Builds the network behaviour for the libp2p Swarm. +fn build_behaviour( + local_peer_id: PeerId, + config: GossipsubConfig, +) -> impl NetworkBehaviour { + // TODO: Add Kademlia/Peer discovery + Gossipsub::new(local_peer_id, config) +} From 9f13731d6d02feb3045a27471ae8061e0c5a77cc Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 7 Mar 2019 16:17:06 +1100 Subject: [PATCH 017/154] Implements a basic libp2p tcp,secio,mplex,gossipsub swarm. --- beacon_node/libp2p/Cargo.toml | 1 + beacon_node/libp2p/src/behaviour.rs | 46 +++++++++++++++++++ beacon_node/libp2p/src/lib.rs | 1 + beacon_node/libp2p/src/service.rs | 70 ++++++++++++++++++----------- 4 files changed, 91 insertions(+), 27 deletions(-) create mode 100644 beacon_node/libp2p/src/behaviour.rs diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index fff7dc82d..9c4c6e7a5 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -10,3 +10,4 @@ libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } slog = "2.4.1" version = { path = "../version" } tokio = "0.1.16" +futures = "0.1.25" diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs new file mode 100644 index 000000000..0c9aae16e --- /dev/null +++ b/beacon_node/libp2p/src/behaviour.rs @@ -0,0 +1,46 @@ +use futures::prelude::*; +use libp2p::{ + core::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, + gossipsub::{Gossipsub, GossipsubConfig, GossipsubEvent, GossipsubRpc}, + tokio_io::{AsyncRead, AsyncWrite}, + NetworkBehaviour, PeerId, +}; + +/// Builds the network behaviour for the libp2p Swarm. +/// Implements gossipsub message routing. +#[derive(NetworkBehaviour)] +pub struct Behaviour { + gossipsub: Gossipsub, + // TODO: Add Kademlia for peer discovery + /// The events generated by this behaviour to be consumed in the swarm poll. + // We use gossipsub events for now, generalise later. + #[behaviour(ignore)] + events: Vec, +} + +// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: GossipsubEvent) { + self.events.push(event); + } +} + +impl Behaviour { + pub fn new(local_peer_id: PeerId, gs_config: GossipsubConfig) -> Self { + Behaviour { + gossipsub: Gossipsub::new(local_peer_id, gs_config), + events: Vec::new(), + } + } + + /// Consume the events list when polled. + fn poll(&mut self) -> Async> { + if !self.events.is_empty() { + return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + } + + Async::NotReady + } +} diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index 7b4514337..01dc42073 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -2,6 +2,7 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. +mod behaviour; mod network_config; mod service; diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 528d24ce8..7ed715bd6 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -1,18 +1,29 @@ +use crate::behaviour::Behaviour; use crate::NetworkConfig; -use libp2p::core::{muxing::StreamMuxer, nodes::Substream}; -use libp2p::gossipsub::{Gossipsub, GossipsubConfig, GossipsubEvent}; +use futures::prelude::*; +use libp2p::core::{ + muxing::StreamMuxerBox, + nodes::Substream, + transport::boxed::Boxed, + upgrade::{InboundUpgrade, InboundUpgradeExt, OutboundUpgrade, OutboundUpgradeExt}, +}; use libp2p::{build_tcp_ws_secio_mplex_yamux, core, secio, Transport}; -use libp2p::{core::swarm::NetworkBehaviour, PeerId, Swarm}; +use libp2p::{PeerId, Swarm}; use slog::debug; use std::error; +use std::io::{Error, ErrorKind}; +use std::time::Duration; /// The configuration and state of the libp2p components for the beacon node. pub struct Service { /// The libp2p Swarm handler. - swarm: String, + swarm: Swarm, Behaviour>>, /// This node's PeerId. local_peer_id: PeerId, } +//Swarm>>> + +//swarm: Swarm, Behaviour>>, impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> Self { @@ -25,9 +36,9 @@ impl Service { // Set up the transport let transport = build_transport(local_private_key); // Set up gossipsub routing - let behaviour = build_behaviour(local_peer_id, config.gs_config); + let behaviour = Behaviour::new(local_peer_id.clone(), config.gs_config); // Set up Topology - let topology = local_peer_id; + let topology = local_peer_id.clone(); let swarm = Swarm::new(transport, behaviour, topology); @@ -42,28 +53,33 @@ impl Service { /// mplex or yamux as the multiplexing layer. fn build_transport( local_private_key: secio::SecioKeyPair, -) -> impl Transport< - Output = ( - PeerId, - impl core::muxing::StreamMuxer - + Send - + Sync, - ), - Error = impl error::Error + Send, - Listener = impl Send, - Dial = impl Send, - ListenerUpgrade = impl Send, -> + Clone { +) -> Boxed<(PeerId, StreamMuxerBox), Error> { // TODO: The Wire protocol currently doesn't specify encryption and this will need to be customised // in the future. - build_tcp_ws_secio_mplex_yamux(local_private_key) -} + let transport = libp2p::tcp::TcpConfig::new(); + let transport = libp2p::dns::DnsConfig::new(transport); + #[cfg(feature = "libp2p-websocket")] + let transport = { + let trans_clone = transport.clone(); + transport.or_transport(websocket::WsConfig::new(trans_clone)) + }; + transport + .with_upgrade(secio::SecioConfig::new(local_private_key)) + .and_then(move |out, endpoint| { + let peer_id = out.remote_key.into_peer_id(); + let peer_id2 = peer_id.clone(); + let upgrade = core::upgrade::SelectUpgrade::new( + libp2p::yamux::Config::default(), + libp2p::mplex::MplexConfig::new(), + ) + // TODO: use a single `.map` instead of two maps + .map_inbound(move |muxer| (peer_id, muxer)) + .map_outbound(move |muxer| (peer_id2, muxer)); -/// Builds the network behaviour for the libp2p Swarm. -fn build_behaviour( - local_peer_id: PeerId, - config: GossipsubConfig, -) -> impl NetworkBehaviour { - // TODO: Add Kademlia/Peer discovery - Gossipsub::new(local_peer_id, config) + core::upgrade::apply(out.stream, upgrade, endpoint) + .map(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) + }) + .with_timeout(Duration::from_secs(20)) + .map_err(|err| Error::new(ErrorKind::Other, err)) + .boxed() } From 08b803b6e7d24ddb85eb4e6678647509eb562af0 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Thu, 7 Mar 2019 17:50:00 +1100 Subject: [PATCH 018/154] Modifications to fuzz tests --- eth2/utils/ssz/fuzz/Cargo.toml | 12 ++++++++++++ .../fuzz_targets/fuzz_target_vec_address_decode.rs | 12 ++++++++++++ .../fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs | 10 ++++++++++ .../ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs | 9 --------- .../ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs | 2 +- .../fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs | 10 ++++++++++ 6 files changed, 45 insertions(+), 10 deletions(-) create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs create mode 100644 eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index c76cbbbde..055d031a0 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -88,6 +88,18 @@ path = "fuzz_targets/fuzz_target_address_encode.rs" name = "fuzz_target_vec_decode" path = "fuzz_targets/fuzz_target_vec_decode.rs" +[[bin]] +name = "fuzz_target_vec_address_decode" +path = "fuzz_targets/fuzz_target_vec_address_decode.rs" + +[[bin]] +name = "fuzz_target_vec_u64_decode" +path = "fuzz_targets/fuzz_target_vec_u64_decode.rs" + +[[bin]] +name = "fuzz_target_vec_bool_decode" +path = "fuzz_targets/fuzz_target_vec_bool_decode.rs" + [[bin]] name = "fuzz_target_vec_encode" path = "fuzz_targets/fuzz_target_vec_encode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs new file mode 100644 index 000000000..6c686df1a --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs @@ -0,0 +1,12 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::{Address}; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec
, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs new file mode 100644 index 000000000..25017ef25 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs @@ -0,0 +1,10 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs index 048d19ee5..cc1dc09f5 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs @@ -9,13 +9,4 @@ use ssz::{DecodeError, Decodable}; // Fuzz ssz_decode() fuzz_target!(|data: &[u8]| { let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); - /* - let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); - let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); - let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); - let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); - let _result: Result<(Vec
, usize), DecodeError> = Decodable::ssz_decode(data, 0); - let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); - let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); - */ }); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs index 6980e5d20..39500b782 100644 --- a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs @@ -6,7 +6,7 @@ extern crate ssz; use ethereum_types::{Address, H256}; use ssz::SszStream; -// Fuzz ssz_decode() +// Fuzz ssz_encode() fuzz_target!(|data: &[u8]| { let mut ssz = SszStream::new(); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs new file mode 100644 index 000000000..ee25a6378 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs @@ -0,0 +1,10 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); From 3c517694282614dc46e510380c79d2e57164eab4 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 8 Mar 2019 11:07:30 +1100 Subject: [PATCH 019/154] Node listens on default port and connects to bootnodes. --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/client_config.rs | 25 +++++++----- beacon_node/libp2p/Cargo.toml | 1 + beacon_node/libp2p/src/lib.rs | 2 + beacon_node/libp2p/src/network_config.rs | 22 ++++------- beacon_node/libp2p/src/service.rs | 48 +++++++++++++++++------- 6 files changed, 61 insertions(+), 38 deletions(-) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 04f80e76b..46394ac91 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } +libp2p = { path = "../libp2p" } sync = { path = "../sync" } db = { path = "../db" } fork_choice = { path = "../../eth2/fork_choice" } diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 3f5fbab2f..18b39277a 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -1,10 +1,12 @@ use clap::ArgMatches; use db::DBType; use fork_choice::ForkChoiceAlgorithm; +use libp2p::multiaddr::ToMultiaddr; use network::NetworkConfig; use slog::error; use std::fs; use std::net::IpAddr; +use std::net::SocketAddr; use std::path::PathBuf; use types::ChainSpec; @@ -52,16 +54,6 @@ impl ClientConfig { // Network related args - // Custom listening address ipv4/ipv6 - // TODO: Handle list of addresses - if let Some(listen_address_str) = args.value_of("listen_address") { - if let Ok(listen_address) = listen_address_str.parse::() { - config.net_conf.listen_addresses = vec![listen_address]; - } else { - error!(log, "Invalid IP Address"; "Address" => listen_address_str); - return Err("Invalid IP Address"); - } - } // Custom p2p listen port if let Some(port_str) = args.value_of("port") { if let Ok(port) = port_str.parse::() { @@ -71,6 +63,19 @@ impl ClientConfig { return Err("Invalid port"); } } + // Custom listening address ipv4/ipv6 + // TODO: Handle list of addresses + if let Some(listen_address_str) = args.value_of("listen_address") { + if let Ok(listen_address) = listen_address_str.parse::() { + let multiaddr = SocketAddr::new(listen_address, config.net_conf.listen_port) + .to_multiaddr() + .expect("Invalid listen address format"); + config.net_conf.listen_addresses = vec![multiaddr]; + } else { + error!(log, "Invalid IP Address"; "Address" => listen_address_str); + return Err("Invalid IP Address"); + } + } // filesystem args diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index 9c4c6e7a5..d78ddd882 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -11,3 +11,4 @@ slog = "2.4.1" version = { path = "../version" } tokio = "0.1.16" futures = "0.1.25" +parity-multiaddr = "0.2.0" diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index 01dc42073..1a24165ba 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -6,6 +6,8 @@ mod behaviour; mod network_config; mod service; +pub use libp2p::multiaddr; +pub use libp2p::Multiaddr; pub use libp2p::{ gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, PeerId, diff --git a/beacon_node/libp2p/src/network_config.rs b/beacon_node/libp2p/src/network_config.rs index 7cb1cf6bb..8eeb33861 100644 --- a/beacon_node/libp2p/src/network_config.rs +++ b/beacon_node/libp2p/src/network_config.rs @@ -1,20 +1,20 @@ use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; use libp2p::secio; +use libp2p::Multiaddr; use std::fmt; -use std::net::IpAddr; #[derive(Clone)] /// Network configuration for lighthouse. pub struct NetworkConfig { //TODO: stubbing networking initial params, change in the future /// IP address to listen on. - pub listen_addresses: Vec, + pub listen_addresses: Vec, /// Listen port UDP/TCP. pub listen_port: u16, /// Gossipsub configuration parameters. pub gs_config: GossipsubConfig, /// List of nodes to initially connect to. - pub boot_nodes: Vec, + pub boot_nodes: Vec, /// Peer key related to this nodes PeerId. pub local_private_key: secio::SecioKeyPair, /// Client version @@ -24,21 +24,15 @@ pub struct NetworkConfig { impl Default for NetworkConfig { /// Generate a default network configuration. fn default() -> Self { - // hard-coded defaults - let bootnodes = ["127.0.0.1"]; - let default_port = 9000; - // TODO: Currently using ed25519 key pairs. Wire protocol specifies RSA. Waiting for this // PR to be merged to generate RSA keys: https://github.com/briansmith/ring/pull/733 - NetworkConfig { - listen_addresses: vec!["127.0.0.1".parse().expect("correct IP address")], - listen_port: default_port, + listen_addresses: vec!["/ip4/127.0.0.1/tcp/9000" + .parse() + .expect("is a correct multi-address")], + listen_port: 9000, gs_config: GossipsubConfigBuilder::new().build(), - boot_nodes: bootnodes - .iter() - .map(|s| s.parse().expect("Bootnodes must be IP addresses")) - .collect(), + boot_nodes: Vec::new(), local_private_key: secio::SecioKeyPair::ed25519_generated().unwrap(), client_version: version::version(), } diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 7ed715bd6..8e1670701 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -5,12 +5,12 @@ use libp2p::core::{ muxing::StreamMuxerBox, nodes::Substream, transport::boxed::Boxed, - upgrade::{InboundUpgrade, InboundUpgradeExt, OutboundUpgrade, OutboundUpgradeExt}, + upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; -use libp2p::{build_tcp_ws_secio_mplex_yamux, core, secio, Transport}; +use libp2p::multiaddr::Protocol; +use libp2p::{core, secio, Transport}; use libp2p::{PeerId, Swarm}; -use slog::debug; -use std::error; +use slog::{debug, info, warn}; use std::io::{Error, ErrorKind}; use std::time::Duration; @@ -21,9 +21,6 @@ pub struct Service { /// This node's PeerId. local_peer_id: PeerId, } -//Swarm>>> - -//swarm: Swarm, Behaviour>>, impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> Self { @@ -33,14 +30,37 @@ impl Service { let local_peer_id = local_private_key.to_peer_id(); debug!(log, "Local peer id: {:?}", local_peer_id); - // Set up the transport - let transport = build_transport(local_private_key); - // Set up gossipsub routing - let behaviour = Behaviour::new(local_peer_id.clone(), config.gs_config); - // Set up Topology - let topology = local_peer_id.clone(); + let mut swarm = { + // Set up the transport + let transport = build_transport(local_private_key); + // Set up gossipsub routing + let behaviour = Behaviour::new(local_peer_id.clone(), config.gs_config); + // Set up Topology + let topology = local_peer_id.clone(); + Swarm::new(transport, behaviour, topology) + }; - let swarm = Swarm::new(transport, behaviour, topology); + // listen on all addresses + for address in &config.listen_addresses { + match Swarm::listen_on(&mut swarm, address.clone()) { + Ok(mut listen_addr) => { + listen_addr.append(Protocol::P2p(local_peer_id.clone().into())); + info!(log, "Listening on: {}", listen_addr); + } + Err(err) => warn!(log, "Cannot listen on: {} : {:?}", address, err), + }; + } + // connect to boot nodes - these are currently stored as multiadders + // Once we have discovery, can set to peerId + for bootnode in config.boot_nodes { + match Swarm::dial_addr(&mut swarm, bootnode.clone()) { + Ok(()) => debug!(log, "Dialing bootnode: {}", bootnode), + Err(err) => debug!( + log, + "Could not connect to bootnode: {} error: {:?}", bootnode, err + ), + }; + } Service { local_peer_id, From 21032334ac272afb4b34d89cb35d0d077bd0c5e9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 8 Mar 2019 12:15:57 +1100 Subject: [PATCH 020/154] Adds bootnodes to chainspec. Handles type correctly --- beacon_node/client/Cargo.toml | 2 -- beacon_node/client/src/client_config.rs | 10 +++++--- beacon_node/libp2p/Cargo.toml | 2 +- beacon_node/libp2p/src/lib.rs | 4 ++-- beacon_node/libp2p/src/network_config.rs | 9 ++++--- beacon_node/libp2p/src/service.rs | 2 +- eth2/types/Cargo.toml | 1 + eth2/types/src/chain_spec.rs | 30 +++++++++++++++++++++++- eth2/types/src/lib.rs | 2 ++ 9 files changed, 49 insertions(+), 13 deletions(-) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 46394ac91..8914a9e7e 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,13 +7,11 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } -libp2p = { path = "../libp2p" } sync = { path = "../sync" } db = { path = "../db" } fork_choice = { path = "../../eth2/fork_choice" } types = { path = "../../eth2/types" } slot_clock = { path = "../../eth2/utils/slot_clock" } - error-chain = "0.12.0" slog = "^2.2.3" tokio = "0.1.15" diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 18b39277a..bc3a78279 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -1,13 +1,13 @@ use clap::ArgMatches; use db::DBType; use fork_choice::ForkChoiceAlgorithm; -use libp2p::multiaddr::ToMultiaddr; use network::NetworkConfig; use slog::error; use std::fs; use std::net::IpAddr; use std::net::SocketAddr; use std::path::PathBuf; +use types::multiaddr::ToMultiaddr; use types::ChainSpec; /// Stores the client configuration for this Lighthouse instance. @@ -32,11 +32,15 @@ impl Default for ClientConfig { }; fs::create_dir_all(&data_dir) .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir)); + + let default_spec = ChainSpec::lighthouse_testnet(); + let default_net_conf = NetworkConfig::new(default_spec.boot_nodes.clone()); + Self { data_dir: data_dir.clone(), // default to foundation for chain specs - spec: ChainSpec::foundation(), - net_conf: NetworkConfig::default(), + spec: default_spec, + net_conf: default_net_conf, // default to bitwise LMD Ghost fork_choice: ForkChoiceAlgorithm::BitwiseLMDGhost, // default to memory db for now diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index d78ddd882..496d30268 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] # SigP repository until PR is merged libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } +types = { path = "../../eth2/types" } slog = "2.4.1" version = { path = "../version" } tokio = "0.1.16" futures = "0.1.25" -parity-multiaddr = "0.2.0" diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index 1a24165ba..f72725e49 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -6,11 +6,11 @@ mod behaviour; mod network_config; mod service; -pub use libp2p::multiaddr; -pub use libp2p::Multiaddr; pub use libp2p::{ gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, PeerId, }; pub use network_config::NetworkConfig; pub use service::Service; +pub use types::multiaddr; +pub use types::Multiaddr; diff --git a/beacon_node/libp2p/src/network_config.rs b/beacon_node/libp2p/src/network_config.rs index 8eeb33861..7bab57dde 100644 --- a/beacon_node/libp2p/src/network_config.rs +++ b/beacon_node/libp2p/src/network_config.rs @@ -1,6 +1,6 @@ +use crate::Multiaddr; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; use libp2p::secio; -use libp2p::Multiaddr; use std::fmt; #[derive(Clone)] @@ -40,8 +40,11 @@ impl Default for NetworkConfig { } impl NetworkConfig { - pub fn new() -> Self { - NetworkConfig::default() + pub fn new(boot_nodes: Vec) -> Self { + let mut conf = NetworkConfig::default(); + conf.boot_nodes = boot_nodes; + + conf } } diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 8e1670701..ee36cefd5 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -1,4 +1,5 @@ use crate::behaviour::Behaviour; +use crate::multiaddr::Protocol; use crate::NetworkConfig; use futures::prelude::*; use libp2p::core::{ @@ -7,7 +8,6 @@ use libp2p::core::{ transport::boxed::Boxed, upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; -use libp2p::multiaddr::Protocol; use libp2p::{core, secio, Transport}; use libp2p::{PeerId, Swarm}; use slog::{debug, info, warn}; diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index ea1343dba..f8120d520 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -22,6 +22,7 @@ ssz = { path = "../utils/ssz" } ssz_derive = { path = "../utils/ssz_derive" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } test_random_derive = { path = "../utils/test_random_derive" } +libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } [dev-dependencies] env_logger = "0.6.0" diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 789bb6c0c..ef2c94d65 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -1,4 +1,4 @@ -use crate::{Address, Epoch, Fork, Hash256, Slot}; +use crate::{Address, Epoch, Fork, Hash256, Multiaddr, Slot}; use bls::Signature; const GWEI: u64 = 1_000_000_000; @@ -106,6 +106,12 @@ pub struct ChainSpec { domain_exit: u64, domain_randao: u64, domain_transfer: u64, + + /* + * Network specific parameters + * + */ + pub boot_nodes: Vec, } impl ChainSpec { @@ -232,9 +238,31 @@ impl ChainSpec { domain_exit: 3, domain_randao: 4, domain_transfer: 5, + + /* + * Boot nodes + */ + boot_nodes: vec![], } } + /// Returns a `ChainSpec` compatible with the Lighthouse testnet specification. + /// + /// Spec v0.4.0 + pub fn lighthouse_testnet() -> Self { + /* + * Lighthouse testnet bootnodes + */ + let boot_nodes = vec!["/ip4/127.0.0.1/tcp/9000" + .parse() + .expect("correct multiaddr")]; + + let mut standard_spec = ChainSpec::foundation(); + standard_spec.boot_nodes = boot_nodes; + + standard_spec + } + /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. /// /// Spec v0.4.0 diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 9bf60f2c9..555560090 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -73,3 +73,5 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; pub type ProposerMap = HashMap; pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; +pub use libp2p::multiaddr; +pub use libp2p::Multiaddr; From d4f3bab68dad9f3192d99d395ca4f65481d2c62a Mon Sep 17 00:00:00 2001 From: mjkeating Date: Fri, 8 Mar 2019 15:24:07 -0800 Subject: [PATCH 021/154] Updated TreeHash logic as per revised spec --- eth2/types/src/attestation.rs | 4 +- eth2/types/src/attestation_data.rs | 4 +- .../src/attestation_data_and_custody_bit.rs | 4 +- eth2/types/src/attester_slashing.rs | 4 +- eth2/types/src/beacon_block.rs | 4 +- eth2/types/src/beacon_block_body.rs | 4 +- eth2/types/src/beacon_state.rs | 54 +++++++------- eth2/types/src/beacon_state/tests.rs | 4 +- eth2/types/src/crosslink.rs | 4 +- eth2/types/src/deposit.rs | 4 +- eth2/types/src/deposit_data.rs | 4 +- eth2/types/src/deposit_input.rs | 4 +- eth2/types/src/eth1_data.rs | 4 +- eth2/types/src/eth1_data_vote.rs | 4 +- eth2/types/src/fork.rs | 4 +- eth2/types/src/pending_attestation.rs | 4 +- eth2/types/src/proposal.rs | 4 +- eth2/types/src/proposer_slashing.rs | 4 +- eth2/types/src/shard_reassignment_record.rs | 4 +- eth2/types/src/slashable_attestation.rs | 4 +- eth2/types/src/slot_epoch_macros.rs | 8 +-- eth2/types/src/test_utils/macros.rs | 4 +- eth2/types/src/transfer.rs | 4 +- eth2/types/src/validator.rs | 4 +- eth2/types/src/voluntary_exit.rs | 4 +- eth2/utils/bls/src/aggregate_signature.rs | 2 +- eth2/utils/bls/src/public_key.rs | 2 +- eth2/utils/bls/src/secret_key.rs | 2 +- eth2/utils/bls/src/signature.rs | 2 +- eth2/utils/boolean-bitfield/src/lib.rs | 4 +- eth2/utils/ssz/src/impl_tree_hash.rs | 26 +++---- eth2/utils/ssz/src/tree_hash.rs | 71 +++++++------------ eth2/utils/ssz_derive/src/lib.rs | 6 +- 33 files changed, 126 insertions(+), 143 deletions(-) diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 03ef8ce48..67bff3d20 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -35,11 +35,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Attestation::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 1dfadfb1d..5899ab52d 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -53,11 +53,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = AttestationData::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 83018c194..e9cf4bb67 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -42,11 +42,11 @@ mod test { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 1cb671960..7b25e94c4 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -35,11 +35,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = AttesterSlashing::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 2e1e24ef7..6c2b15888 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -69,11 +69,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = BeaconBlock::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index e7dec2e4b..2a43f289c 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -36,11 +36,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = BeaconBlockBody::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 809408b32..bbba5d70b 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1262,42 +1262,42 @@ impl Decodable for BeaconState { } impl TreeHash for BeaconState { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { let mut result: Vec = vec![]; - result.append(&mut self.slot.hash_tree_root_internal()); - result.append(&mut self.genesis_time.hash_tree_root_internal()); - result.append(&mut self.fork.hash_tree_root_internal()); - result.append(&mut self.validator_registry.hash_tree_root_internal()); - result.append(&mut self.validator_balances.hash_tree_root_internal()); + result.append(&mut self.slot.hash_tree_root()); + result.append(&mut self.genesis_time.hash_tree_root()); + result.append(&mut self.fork.hash_tree_root()); + result.append(&mut self.validator_registry.hash_tree_root()); + result.append(&mut self.validator_balances.hash_tree_root()); result.append( &mut self .validator_registry_update_epoch - .hash_tree_root_internal(), + .hash_tree_root(), ); - result.append(&mut self.latest_randao_mixes.hash_tree_root_internal()); + result.append(&mut self.latest_randao_mixes.hash_tree_root()); result.append( &mut self .previous_shuffling_start_shard - .hash_tree_root_internal(), + .hash_tree_root(), ); - result.append(&mut self.current_shuffling_start_shard.hash_tree_root_internal()); - result.append(&mut self.previous_shuffling_epoch.hash_tree_root_internal()); - result.append(&mut self.current_shuffling_epoch.hash_tree_root_internal()); - result.append(&mut self.previous_shuffling_seed.hash_tree_root_internal()); - result.append(&mut self.current_shuffling_seed.hash_tree_root_internal()); - result.append(&mut self.previous_justified_epoch.hash_tree_root_internal()); - result.append(&mut self.justified_epoch.hash_tree_root_internal()); - result.append(&mut self.justification_bitfield.hash_tree_root_internal()); - result.append(&mut self.finalized_epoch.hash_tree_root_internal()); - result.append(&mut self.latest_crosslinks.hash_tree_root_internal()); - result.append(&mut self.latest_block_roots.hash_tree_root_internal()); - result.append(&mut self.latest_active_index_roots.hash_tree_root_internal()); - result.append(&mut self.latest_slashed_balances.hash_tree_root_internal()); - result.append(&mut self.latest_attestations.hash_tree_root_internal()); - result.append(&mut self.batched_block_roots.hash_tree_root_internal()); - result.append(&mut self.latest_eth1_data.hash_tree_root_internal()); - result.append(&mut self.eth1_data_votes.hash_tree_root_internal()); - result.append(&mut self.deposit_index.hash_tree_root_internal()); + result.append(&mut self.current_shuffling_start_shard.hash_tree_root()); + result.append(&mut self.previous_shuffling_epoch.hash_tree_root()); + result.append(&mut self.current_shuffling_epoch.hash_tree_root()); + result.append(&mut self.previous_shuffling_seed.hash_tree_root()); + result.append(&mut self.current_shuffling_seed.hash_tree_root()); + result.append(&mut self.previous_justified_epoch.hash_tree_root()); + result.append(&mut self.justified_epoch.hash_tree_root()); + result.append(&mut self.justification_bitfield.hash_tree_root()); + result.append(&mut self.finalized_epoch.hash_tree_root()); + result.append(&mut self.latest_crosslinks.hash_tree_root()); + result.append(&mut self.latest_block_roots.hash_tree_root()); + result.append(&mut self.latest_active_index_roots.hash_tree_root()); + result.append(&mut self.latest_slashed_balances.hash_tree_root()); + result.append(&mut self.latest_attestations.hash_tree_root()); + result.append(&mut self.batched_block_roots.hash_tree_root()); + result.append(&mut self.latest_eth1_data.hash_tree_root()); + result.append(&mut self.eth1_data_votes.hash_tree_root()); + result.append(&mut self.deposit_index.hash_tree_root()); hash(&result) } } diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 40bfd146c..eca54bf36 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -72,11 +72,11 @@ pub fn test_ssz_round_trip() { } #[test] -pub fn test_hash_tree_root_internal() { +pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = BeaconState::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index f49195a75..ed31a80d5 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -34,11 +34,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Crosslink::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 2e69ea599..dcd82e550 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -33,11 +33,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Deposit::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 1eb2722a9..d90b07b09 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -33,11 +33,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = DepositData::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index c4c79c3d1..1dda64ce2 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -34,11 +34,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = DepositInput::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index 2c817ca38..88f91e3a9 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -32,11 +32,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Eth1Data::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index 898145575..bd8266ce3 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -32,11 +32,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Eth1DataVote::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 0acd6da90..9cf6ae396 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -44,11 +44,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Fork::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 0430d18ba..c1293546e 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -34,11 +34,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = PendingAttestation::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/proposal.rs b/eth2/types/src/proposal.rs index b1fd737a0..dda544a19 100644 --- a/eth2/types/src/proposal.rs +++ b/eth2/types/src/proposal.rs @@ -37,11 +37,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Proposal::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index f86e7f3a8..307a4b0a0 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -37,11 +37,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = ProposerSlashing::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/shard_reassignment_record.rs b/eth2/types/src/shard_reassignment_record.rs index f5dfa8676..d8595b69d 100644 --- a/eth2/types/src/shard_reassignment_record.rs +++ b/eth2/types/src/shard_reassignment_record.rs @@ -29,11 +29,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = ShardReassignmentRecord::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index 20ba76cdb..2c4bde8db 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -132,11 +132,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = SlashableAttestation::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 2148b6cc2..4b2332baf 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -207,9 +207,9 @@ macro_rules! impl_ssz { } impl TreeHash for $type { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { let mut result: Vec = vec![]; - result.append(&mut self.0.hash_tree_root_internal()); + result.append(&mut self.0.hash_tree_root()); hash(&result) } } @@ -543,11 +543,11 @@ macro_rules! ssz_tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = $type::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/test_utils/macros.rs b/eth2/types/src/test_utils/macros.rs index b7c0a6522..f5b2fd87c 100644 --- a/eth2/types/src/test_utils/macros.rs +++ b/eth2/types/src/test_utils/macros.rs @@ -17,14 +17,14 @@ macro_rules! ssz_tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use ssz::TreeHash; let mut rng = XorShiftRng::from_seed([42; 16]); let original = $type::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index 0382dee11..b3c283fa2 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -39,11 +39,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Transfer::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 43701ca05..578c4db76 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -91,11 +91,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = Validator::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 58c3ae4c2..36b5597f0 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -34,11 +34,11 @@ mod tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { let mut rng = XorShiftRng::from_seed([42; 16]); let original = VoluntaryExit::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 2d8776353..3ebb6f15e 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -95,7 +95,7 @@ impl Serialize for AggregateSignature { } impl TreeHash for AggregateSignature { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) } } diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index c7fd526a0..0c2ad81bb 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -66,7 +66,7 @@ impl Serialize for PublicKey { } impl TreeHash for PublicKey { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) } } diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index f2d54f4ac..4ff9f8684 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -41,7 +41,7 @@ impl Decodable for SecretKey { } impl TreeHash for SecretKey { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { self.0.as_bytes().clone() } } diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index c0c31ef27..23b0c0834 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -73,7 +73,7 @@ impl Decodable for Signature { } impl TreeHash for Signature { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) } } diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index fb3a78e7a..a0fce1f0a 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -187,8 +187,8 @@ impl Serialize for BooleanBitfield { } impl ssz::TreeHash for BooleanBitfield { - fn hash_tree_root_internal(&self) -> Vec { - self.to_bytes().hash_tree_root_internal() + fn hash_tree_root(&self) -> Vec { + self.to_bytes().hash_tree_root() } } diff --git a/eth2/utils/ssz/src/impl_tree_hash.rs b/eth2/utils/ssz/src/impl_tree_hash.rs index 54bd7c139..03976f637 100644 --- a/eth2/utils/ssz/src/impl_tree_hash.rs +++ b/eth2/utils/ssz/src/impl_tree_hash.rs @@ -3,55 +3,55 @@ use super::{merkle_hash, ssz_encode, TreeHash}; use hashing::hash; impl TreeHash for u8 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for u16 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for u32 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for u64 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for usize { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for bool { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for Address { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for H256 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for [u8] { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { if self.len() > 32 { return hash(&self); } @@ -63,12 +63,12 @@ impl TreeHash for Vec where T: TreeHash, { - /// Returns the merkle_hash of a list of hash_tree_root_internal values created + /// Returns the merkle_hash of a list of hash_tree_root values created /// from the given list. /// Note: A byte vector, Vec, must be converted to a slice (as_slice()) /// to be handled properly (i.e. hashed) as byte array. - fn hash_tree_root_internal(&self) -> Vec { - let mut tree_hashes = self.iter().map(|x| x.hash_tree_root_internal()).collect(); + fn hash_tree_root(&self) -> Vec { + let mut tree_hashes = self.iter().map(|x| x.hash_tree_root()).collect(); merkle_hash(&mut tree_hashes) } } @@ -79,7 +79,7 @@ mod tests { #[test] fn test_impl_tree_hash_vec() { - let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root_internal(); + let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root(); assert_eq!(result.len(), 32); } } diff --git a/eth2/utils/ssz/src/tree_hash.rs b/eth2/utils/ssz/src/tree_hash.rs index 7c1ab35e9..85e56924c 100644 --- a/eth2/utils/ssz/src/tree_hash.rs +++ b/eth2/utils/ssz/src/tree_hash.rs @@ -1,44 +1,31 @@ use hashing::hash; -const SSZ_CHUNK_SIZE: usize = 128; +const BYTES_PER_CHUNK: usize = 32; const HASHSIZE: usize = 32; pub trait TreeHash { - fn hash_tree_root_internal(&self) -> Vec; - fn hash_tree_root(&self) -> Vec { - let mut result = self.hash_tree_root_internal(); - zpad(&mut result, HASHSIZE); - result - } + fn hash_tree_root(&self) -> Vec; } /// Returns a 32 byte hash of 'list' - a vector of byte vectors. /// Note that this will consume 'list'. pub fn merkle_hash(list: &mut Vec>) -> Vec { // flatten list - let (mut chunk_size, mut chunkz) = list_to_blob(list); + let mut chunkz = list_to_blob(list); // get data_len as bytes. It will hashed will the merkle root let mut datalen = list.len().to_le_bytes().to_vec(); zpad(&mut datalen, 32); - // Tree-hash + // merklelize while chunkz.len() > HASHSIZE { let mut new_chunkz: Vec = Vec::new(); - for two_chunks in chunkz.chunks(chunk_size * 2) { - if two_chunks.len() == chunk_size { - // Odd number of chunks - let mut c = two_chunks.to_vec(); - c.append(&mut vec![0; SSZ_CHUNK_SIZE]); - new_chunkz.append(&mut hash(&c)); - } else { - // Hash two chuncks together - new_chunkz.append(&mut hash(two_chunks)); - } + for two_chunks in chunkz.chunks(BYTES_PER_CHUNK * 2) { + // Hash two chuncks together + new_chunkz.append(&mut hash(two_chunks)); } - chunk_size = HASHSIZE; chunkz = new_chunkz; } @@ -46,17 +33,13 @@ pub fn merkle_hash(list: &mut Vec>) -> Vec { hash(&chunkz) } -fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { - let chunk_size = if list.is_empty() || list[0].len() < SSZ_CHUNK_SIZE { - SSZ_CHUNK_SIZE - } else { - list[0].len() - }; - +fn list_to_blob(list: &mut Vec>) -> Vec { + // pack - fit as many many items per chunk as we can and then + // right pad to BYTES_PER_CHUNCK let (items_per_chunk, chunk_count) = if list.is_empty() { (1, 1) } else { - let items_per_chunk = SSZ_CHUNK_SIZE / list[0].len(); + let items_per_chunk = BYTES_PER_CHUNK / list[0].len(); let chunk_count = list.len() / items_per_chunk; (items_per_chunk, chunk_count) }; @@ -64,20 +47,20 @@ fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { let mut chunkz = Vec::new(); if list.is_empty() { // handle and empty list - chunkz.append(&mut vec![0; SSZ_CHUNK_SIZE]); - } else if list[0].len() <= SSZ_CHUNK_SIZE { + chunkz.append(&mut vec![0; BYTES_PER_CHUNK * 2]); + } else if list[0].len() <= BYTES_PER_CHUNK { // just create a blob here; we'll divide into // chunked slices when we merklize - let mut chunk = Vec::with_capacity(chunk_size); + let mut chunk = Vec::with_capacity(BYTES_PER_CHUNK); let mut item_count_in_chunk = 0; - chunkz.reserve(chunk_count * chunk_size); + chunkz.reserve(chunk_count * BYTES_PER_CHUNK); for item in list.iter_mut() { item_count_in_chunk += 1; chunk.append(item); // completed chunk? if item_count_in_chunk == items_per_chunk { - zpad(&mut chunk, chunk_size); + zpad(&mut chunk, BYTES_PER_CHUNK); chunkz.append(&mut chunk); item_count_in_chunk = 0; } @@ -85,18 +68,18 @@ fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { // left-over uncompleted chunk? if item_count_in_chunk != 0 { - zpad(&mut chunk, chunk_size); + zpad(&mut chunk, BYTES_PER_CHUNK); chunkz.append(&mut chunk); } - } else { - // chunks larger than SSZ_CHUNK_SIZE - chunkz.reserve(chunk_count * chunk_size); - for item in list.iter_mut() { - chunkz.append(item); - } } - (chunk_size, chunkz) + // extend the number of chunks to a power of two if necessary + if !chunk_count.is_power_of_two() { + let zero_chunks_count = chunk_count.next_power_of_two() - chunk_count; + chunkz.append(&mut vec![0; zero_chunks_count * BYTES_PER_CHUNK]); + } + + chunkz } /// right pads with zeros making 'bytes' 'size' in length @@ -112,9 +95,9 @@ mod tests { #[test] fn test_merkle_hash() { - let data1 = vec![1; 100]; - let data2 = vec![2; 100]; - let data3 = vec![3; 100]; + let data1 = vec![1; 32]; + let data2 = vec![2; 32]; + let data3 = vec![3; 32]; let mut list = vec![data1, data2, data3]; let result = merkle_hash(&mut list); diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 0d2e17f76..a7802a274 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -146,10 +146,10 @@ pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { let output = quote! { impl ssz::TreeHash for #name { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { let mut list: Vec> = Vec::new(); #( - list.push(self.#field_idents.hash_tree_root_internal()); + list.push(self.#field_idents.hash_tree_root()); )* ssz::merkle_hash(&mut list) @@ -224,7 +224,7 @@ pub fn ssz_signed_root_derive(input: TokenStream) -> TokenStream { fn signed_root(&self) -> Vec { let mut list: Vec> = Vec::new(); #( - list.push(self.#field_idents.hash_tree_root_internal()); + list.push(self.#field_idents.hash_tree_root()); )* ssz::merkle_hash(&mut list) From 7db2b5187689f7b6cd1d80994f2a62ba304dd1fe Mon Sep 17 00:00:00 2001 From: mjkeating Date: Fri, 8 Mar 2019 16:37:01 -0800 Subject: [PATCH 022/154] ran cargo fmt --- eth2/types/src/beacon_state.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index bbba5d70b..c68aa53c3 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1269,17 +1269,9 @@ impl TreeHash for BeaconState { result.append(&mut self.fork.hash_tree_root()); result.append(&mut self.validator_registry.hash_tree_root()); result.append(&mut self.validator_balances.hash_tree_root()); - result.append( - &mut self - .validator_registry_update_epoch - .hash_tree_root(), - ); + result.append(&mut self.validator_registry_update_epoch.hash_tree_root()); result.append(&mut self.latest_randao_mixes.hash_tree_root()); - result.append( - &mut self - .previous_shuffling_start_shard - .hash_tree_root(), - ); + result.append(&mut self.previous_shuffling_start_shard.hash_tree_root()); result.append(&mut self.current_shuffling_start_shard.hash_tree_root()); result.append(&mut self.previous_shuffling_epoch.hash_tree_root()); result.append(&mut self.current_shuffling_epoch.hash_tree_root()); From 5f51c6d60a4c0495e7b99024f00ecbfebbe73c5b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 11 Mar 2019 15:09:57 +1100 Subject: [PATCH 023/154] Add editorconfig template --- .editorconfig | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..a14dd7a51 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,9 @@ +root = true +[*] +indent_style=space +indent_size=4 +end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +max_line_length=100 +insert_final_newline=false From ae983a9347d9a275ee90aeb044d0a6f2d4545ffa Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 12 Mar 2019 17:28:11 +1100 Subject: [PATCH 024/154] Basic networking service with channel --- beacon_node/client/src/lib.rs | 13 ++-- beacon_node/client/src/notifier.rs | 15 +++- beacon_node/libp2p/Cargo.toml | 1 + beacon_node/libp2p/src/behaviour.rs | 34 ++++++-- beacon_node/libp2p/src/error.rs | 8 ++ beacon_node/libp2p/src/lib.rs | 4 +- beacon_node/libp2p/src/service.rs | 46 +++++++++-- beacon_node/network/Cargo.toml | 1 + beacon_node/network/src/error.rs | 7 +- beacon_node/network/src/lib.rs | 1 + beacon_node/network/src/messages.rs | 5 ++ beacon_node/network/src/service.rs | 116 +++++++++++++++++++++++++--- beacon_node/src/run.rs | 2 + 13 files changed, 224 insertions(+), 29 deletions(-) create mode 100644 beacon_node/libp2p/src/error.rs diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index d0b096416..6600c9e39 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -22,9 +22,9 @@ use tokio::runtime::TaskExecutor; pub struct Client { config: ClientConfig, // beacon_chain: Arc>, - network: Option>, - exit: exit_future::Exit, - exit_signal: Option, + pub network: Arc, + pub exit: exit_future::Exit, + pub exit_signal: Signal, log: slog::Logger, phantom: PhantomData, } @@ -44,14 +44,15 @@ impl Client { // TODO: Add beacon_chain reference to network parameters let network_config = config.net_conf.clone(); let network_logger = log.new(o!("Service" => "Network")); - let (network, network_send) = NetworkService::new(network_config, network_logger)?; + let (network, network_send) = + NetworkService::new(network_config, executor, network_logger)?; Ok(Client { config, exit, - exit_signal: Some(exit_signal), + exit_signal: exit_signal, log, - network: Some(network), + network: network, phantom: PhantomData, }) } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index dd38701c9..6b52e670a 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -4,9 +4,10 @@ use db::ClientDB; use exit_future::Exit; use fork_choice::ForkChoice; use futures::{Future, Stream}; +use network::NodeMessage; use slog::{debug, info, o}; use slot_clock::SlotClock; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; @@ -19,9 +20,21 @@ pub fn run(client: &Client, executor: TaskExecutor, exit: Exi let log = client.log.new(o!("Service" => "Notifier")); + // TODO: Debugging only + let counter = Arc::new(Mutex::new(0)); + let network = client.network.clone(); + // build heartbeat logic here let heartbeat = move |_| { info!(log, "Temp heartbeat output"); + let mut count = counter.lock().unwrap(); + *count += 1; + + if *count % 5 == 0 { + debug!(log, "Sending Message"); + network.send_message(String::from("Testing network channel")) + } + Ok(()) }; diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index 496d30268..ecd91e170 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -12,3 +12,4 @@ slog = "2.4.1" version = { path = "../version" } tokio = "0.1.16" futures = "0.1.25" +error-chain = "0.12.0" diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs index 0c9aae16e..be12011dd 100644 --- a/beacon_node/libp2p/src/behaviour.rs +++ b/beacon_node/libp2p/src/behaviour.rs @@ -1,7 +1,7 @@ use futures::prelude::*; use libp2p::{ core::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, - gossipsub::{Gossipsub, GossipsubConfig, GossipsubEvent, GossipsubRpc}, + gossipsub::{Gossipsub, GossipsubConfig, GossipsubEvent}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; @@ -9,13 +9,14 @@ use libp2p::{ /// Builds the network behaviour for the libp2p Swarm. /// Implements gossipsub message routing. #[derive(NetworkBehaviour)] +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] pub struct Behaviour { gossipsub: Gossipsub, // TODO: Add Kademlia for peer discovery /// The events generated by this behaviour to be consumed in the swarm poll. // We use gossipsub events for now, generalise later. #[behaviour(ignore)] - events: Vec, + events: Vec, } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour @@ -23,7 +24,15 @@ impl NetworkBehaviourEventProcess { fn inject_event(&mut self, event: GossipsubEvent) { - self.events.push(event); + match event { + GossipsubEvent::Message(message) => { + let gs_message = String::from_utf8_lossy(&message.data); + // TODO: Remove this type - debug only + self.events + .push(BehaviourEvent::Message(gs_message.to_string())) + } + _ => {} + } } } @@ -35,8 +44,10 @@ impl Behaviour { } } - /// Consume the events list when polled. - fn poll(&mut self) -> Async> { + /// Consumes the events list when polled. + fn poll( + &mut self, + ) -> Async> { if !self.events.is_empty() { return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); } @@ -44,3 +55,16 @@ impl Behaviour { Async::NotReady } } + +impl Behaviour { + pub fn send_message(&self, message: String) { + // TODO: Encode and send via gossipsub + + } +} + +/// The types of events than can be obtained from polling the behaviour. +pub enum BehaviourEvent { + // TODO: This is a stub at the moment + Message(String), +} diff --git a/beacon_node/libp2p/src/error.rs b/beacon_node/libp2p/src/error.rs new file mode 100644 index 000000000..163fe575d --- /dev/null +++ b/beacon_node/libp2p/src/error.rs @@ -0,0 +1,8 @@ +// generates error types + +use error_chain::{ + error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, + impl_extract_backtrace, +}; + +error_chain! {} diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index f72725e49..a1bf4402c 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -2,7 +2,8 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. -mod behaviour; +pub mod behaviour; +pub mod error; mod network_config; mod service; @@ -11,6 +12,7 @@ pub use libp2p::{ PeerId, }; pub use network_config::NetworkConfig; +pub use service::Libp2pEvent; pub use service::Service; pub use types::multiaddr; pub use types::Multiaddr; diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index ee36cefd5..dceb62511 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -1,7 +1,9 @@ -use crate::behaviour::Behaviour; +use crate::behaviour::{Behaviour, BehaviourEvent}; +use crate::error; use crate::multiaddr::Protocol; use crate::NetworkConfig; use futures::prelude::*; +use futures::Stream; use libp2p::core::{ muxing::StreamMuxerBox, nodes::Substream, @@ -17,13 +19,16 @@ use std::time::Duration; /// The configuration and state of the libp2p components for the beacon node. pub struct Service { /// The libp2p Swarm handler. - swarm: Swarm, Behaviour>>, + //TODO: Make this private + pub swarm: Swarm, Behaviour>>, /// This node's PeerId. local_peer_id: PeerId, + /// The libp2p logger handle. + pub log: slog::Logger, } impl Service { - pub fn new(config: NetworkConfig, log: slog::Logger) -> Self { + pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { debug!(log, "Libp2p Service starting"); let local_private_key = config.local_private_key; @@ -50,7 +55,7 @@ impl Service { Err(err) => warn!(log, "Cannot listen on: {} : {:?}", address, err), }; } - // connect to boot nodes - these are currently stored as multiadders + // connect to boot nodes - these are currently stored as multiaddrs // Once we have discovery, can set to peerId for bootnode in config.boot_nodes { match Swarm::dial_addr(&mut swarm, bootnode.clone()) { @@ -62,10 +67,36 @@ impl Service { }; } - Service { + Ok(Service { local_peer_id, swarm, + log, + }) + } +} + +impl Stream for Service { + type Item = Libp2pEvent; + type Error = crate::error::Error; + + fn poll(&mut self) -> Poll, Self::Error> { + loop { + // TODO: Currently only gossipsub events passed here. + // Build a type for more generic events + match self.swarm.poll() { + Ok(Async::Ready(Some(BehaviourEvent::Message(m)))) => { + // TODO: Stub here for debugging + debug!(self.log, "Message received: {}", m); + return Ok(Async::Ready(Some(Libp2pEvent::Message(m)))); + } + // TODO: Fill with all behaviour events + _ => break, + Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"), + Ok(Async::NotReady) => break, + _ => break, + } } + Ok(Async::NotReady) } } @@ -103,3 +134,8 @@ fn build_transport( .map_err(|err| Error::new(ErrorKind::Other, err)) .boxed() } + +/// Events that can be obtained from polling the Libp2p Service. +pub enum Libp2pEvent { + Message(String), +} diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index f32ee1f90..19d3e82ad 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -13,3 +13,4 @@ slog = "2.4.1" futures = "0.1.25" error-chain = "0.12.0" crossbeam-channel = "0.3.8" +tokio = "0.1.16" diff --git a/beacon_node/network/src/error.rs b/beacon_node/network/src/error.rs index 163fe575d..2005f76ae 100644 --- a/beacon_node/network/src/error.rs +++ b/beacon_node/network/src/error.rs @@ -1,8 +1,13 @@ // generates error types +use libp2p; use error_chain::{ error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, impl_extract_backtrace, }; -error_chain! {} +error_chain! { + links { + Libp2p(libp2p::error::Error, libp2p::error::ErrorKind); + } +} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index ae03d8367..49b2abadd 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -5,4 +5,5 @@ mod messages; mod service; pub use libp2p::NetworkConfig; +pub use messages::NodeMessage; pub use service::Service; diff --git a/beacon_node/network/src/messages.rs b/beacon_node/network/src/messages.rs index 05b899269..d3a83fd5c 100644 --- a/beacon_node/network/src/messages.rs +++ b/beacon_node/network/src/messages.rs @@ -2,11 +2,15 @@ use libp2p::PeerId; use types::{Hash256, Slot}; /// Messages between nodes across the network. +#[derive(Debug, Clone)] pub enum NodeMessage { Status(Status), BlockRequest, + // TODO: only for testing - remove + Message(String), } +#[derive(Debug, Clone)] pub struct Status { /// Current node version. version: u8, @@ -19,6 +23,7 @@ pub struct Status { } /// Types of messages that the network service can receive. +#[derive(Debug, Clone)] pub enum NetworkMessage { /// Send a message to libp2p service. //TODO: Define typing for messages across the wire diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ac8d9b442..e75b7e49a 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -2,39 +2,135 @@ use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::messages::{NetworkMessage, NodeMessage}; use crate::NetworkConfig; -use crossbeam_channel::{unbounded as channel, Sender}; +use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; +use futures::future::lazy; +use futures::future::poll_fn; +use futures::prelude::*; use futures::sync::oneshot; +use futures::Stream; +use libp2p::behaviour::BehaviourEvent; +use libp2p::error::Error as libp2pError; use libp2p::Service as LibP2PService; +use libp2p::{Libp2pEvent, PeerId}; use slog::{debug, info, o, trace, warn, Logger}; use std::sync::{Arc, Mutex}; +use tokio::runtime::TaskExecutor; /// Service that handles communication between internal services and the libp2p network service. pub struct Service { //libp2p_service: Arc>, -//libp2p_thread: oneshot::Sender<()>, -//message_handler: MessageHandler, -//message_handler_send: Sender, + libp2p_exit: oneshot::Sender<()>, + network_send: crossbeam_channel::Sender, + //message_handler: MessageHandler, + //message_handler_send: Sender, } impl Service { pub fn new( config: NetworkConfig, + executor: TaskExecutor, log: slog::Logger, ) -> error::Result<(Arc, Sender)> { - debug!(log, "Service starting"); - let (network_send, network_recv) = channel::(); - // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = MessageHandler::new(message_handler_log); + let message_handler_send = MessageHandler::new(message_handler_log)?; // launch libp2p service let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = LibP2PService::new(config, libp2p_log); + let libp2p_service = LibP2PService::new(config, libp2p_log)?; // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. - let network = Service {}; + let (network_send, libp2p_exit) = + spawn_service(libp2p_service, message_handler_send, executor, log)?; + let network = Service { + libp2p_exit, + network_send: network_send.clone(), + }; Ok((Arc::new(network), network_send)) } + + // TODO: Testing only + pub fn send_message(&self, message: String) { + let node_message = NodeMessage::Message(message); + self.network_send + .send(NetworkMessage::Send(PeerId::random(), node_message)); + } +} + +fn spawn_service( + libp2p_service: LibP2PService, + message_handler_send: crossbeam_channel::Sender, + executor: TaskExecutor, + log: slog::Logger, +) -> error::Result<( + crossbeam_channel::Sender, + oneshot::Sender<()>, +)> { + let (network_exit, exit_rx) = oneshot::channel(); + let (network_send, network_recv) = channel::(); + + // spawn on the current executor + executor.spawn( + network_service( + libp2p_service, + network_recv, + message_handler_send, + log.clone(), + ) + // allow for manual termination + .select(exit_rx.then(|_| Ok(()))) + .then(move |_| { + debug!(log.clone(), "Network service ended"); + Ok(()) + }), + ); + + Ok((network_send, network_exit)) +} + +fn network_service( + mut libp2p_service: LibP2PService, + network_recv: crossbeam_channel::Receiver, + message_handler_send: crossbeam_channel::Sender, + log: slog::Logger, +) -> impl futures::Future { + futures::future::poll_fn(move || -> Result<_, libp2p::error::Error> { + // poll the swarm + loop { + match libp2p_service.poll() { + Ok(Async::Ready(Some(Libp2pEvent::Message(m)))) => debug!( + libp2p_service.log, + "Network Service: Message received: {}", m + ), + _ => break, + } + } + // poll the network channel + // TODO: refactor - combine poll_fn's? + loop { + match network_recv.try_recv() { + // TODO: Testing message - remove + Ok(NetworkMessage::Send(_peer_id, node_message)) => { + match node_message { + NodeMessage::Message(m) => { + debug!(log, "Message received via network channel: {:?}", m); + //TODO: Make swarm private + //TODO: Implement correct peer id topic message handling + libp2p_service.swarm.send_message(m); + } + //TODO: Handle all NodeMessage types + _ => break, + }; + } + Err(TryRecvError::Empty) => break, + Err(TryRecvError::Disconnected) => { + return Err(libp2p::error::Error::from("Network channel disconnected")); + } + // TODO: Implement all NetworkMessage + _ => break, + } + } + Ok(Async::NotReady) + }) } diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index f2a703cbc..cfae001a0 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -40,6 +40,8 @@ pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result // perform global shutdown operations. info!(log, "Shutting down.."); exit_signal.fire(); + // shutdown the client + // client.exit_signal.fire(); drop(client); runtime.shutdown_on_idle().wait().unwrap(); Ok(()) From 2b7aa269c301e964673328fad5b55497cc79988c Mon Sep 17 00:00:00 2001 From: pawanjay176 Date: Wed, 13 Mar 2019 00:22:15 +0530 Subject: [PATCH 025/154] Add OptimizedLMDGhost fork choice rule and tests --- eth2/fork_choice/src/lib.rs | 4 + eth2/fork_choice/src/optimized_lmd_ghost.rs | 452 ++++++++++++++++++++ eth2/fork_choice/tests/tests.rs | 22 +- 3 files changed, 475 insertions(+), 3 deletions(-) create mode 100644 eth2/fork_choice/src/optimized_lmd_ghost.rs diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index 6062c19b1..a947473f3 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -23,6 +23,7 @@ extern crate types; pub mod bitwise_lmd_ghost; pub mod longest_chain; pub mod slow_lmd_ghost; +pub mod optimized_lmd_ghost; use db::stores::BeaconBlockAtSlotError; use db::DBError; @@ -31,6 +32,7 @@ use types::{BeaconBlock, ChainSpec, Hash256}; pub use bitwise_lmd_ghost::BitwiseLMDGhost; pub use longest_chain::LongestChain; pub use slow_lmd_ghost::SlowLMDGhost; +pub use optimized_lmd_ghost::OptimizedLMDGhost; /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures /// which can be built in block processing through the `add_block` and `add_attestation` functions. @@ -101,4 +103,6 @@ pub enum ForkChoiceAlgorithm { SlowLMDGhost, /// An optimised version of bitwise LMD-GHOST by Vitalik. BitwiseLMDGhost, + /// An optimised implementation of LMD ghost. + OptimizedLMDGhost } diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs new file mode 100644 index 000000000..093120bb5 --- /dev/null +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -0,0 +1,452 @@ +//! The optimised bitwise LMD-GHOST fork choice rule. +extern crate bit_vec; + +use crate::{ForkChoice, ForkChoiceError}; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + ClientDB, +}; +use log::{debug, trace}; +use std::collections::HashMap; +use std::sync::Arc; +use types::{ + readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, + ChainSpec, Hash256, Slot, SlotHeight, +}; + +//TODO: Pruning - Children +//TODO: Handle Syncing + +// NOTE: This uses u32 to represent difference between block heights. Thus this is only +// applicable for block height differences in the range of a u32. +// This can potentially be parallelized in some parts. + +/// Compute the base-2 logarithm of an integer, floored (rounded down) +#[inline] +fn log2_int(x: u64) -> u32 { + if x == 0 { + return 0; + } + 63 - x.leading_zeros() +} + +fn power_of_2_below(x: u64) -> u64 { + 2u64.pow(log2_int(x)) +} + +/// Stores the necessary data structures to run the optimised lmd ghost algorithm. +pub struct OptimizedLMDGhost { + /// A cache of known ancestors at given heights for a specific block. + //TODO: Consider FnvHashMap + cache: HashMap, Hash256>, + /// Log lookup table for blocks to their ancestors. + //TODO: Verify we only want/need a size 16 log lookup + ancestors: Vec>, + /// Stores the children for any given parent. + children: HashMap>, + /// The latest attestation targets as a map of validator index to block hash. + //TODO: Could this be a fixed size vec + latest_attestation_targets: HashMap, + /// Block storage access. + block_store: Arc>, + /// State storage access. + state_store: Arc>, + max_known_height: SlotHeight, +} + +impl OptimizedLMDGhost +where + T: ClientDB + Sized, +{ + pub fn new( + block_store: Arc>, + state_store: Arc>, + ) -> Self { + OptimizedLMDGhost { + cache: HashMap::new(), + ancestors: vec![HashMap::new(); 16], + latest_attestation_targets: HashMap::new(), + children: HashMap::new(), + max_known_height: SlotHeight::new(0), + block_store, + state_store, + } + } + + /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to + /// weighted votes. + pub fn get_latest_votes( + &self, + state_root: &Hash256, + block_slot: Slot, + spec: &ChainSpec, + ) -> Result, ForkChoiceError> { + // get latest votes + // Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) // + // FORK_CHOICE_BALANCE_INCREMENT + // build a hashmap of block_hash to weighted votes + let mut latest_votes: HashMap = HashMap::new(); + // gets the current weighted votes + let current_state = self + .state_store + .get_deserialized(&state_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; + + let active_validator_indices = get_active_validator_indices( + ¤t_state.validator_registry[..], + block_slot.epoch(spec.slots_per_epoch), + ); + + for index in active_validator_indices { + let balance = std::cmp::min( + current_state.validator_balances[index], + spec.max_deposit_amount, + ) / spec.fork_choice_balance_increment; + if balance > 0 { + if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) { + *latest_votes.entry(*target).or_insert_with(|| 0) += balance; + } + } + } + trace!("Latest votes: {:?}", latest_votes); + Ok(latest_votes) + } + + /// Gets the ancestor at a given height `at_height` of a block specified by `block_hash`. + fn get_ancestor( + &mut self, + block_hash: Hash256, + target_height: SlotHeight, + spec: &ChainSpec, + ) -> Option { + // return None if we can't get the block from the db. + let block_height = { + let block_slot = self + .block_store + .get_deserialized(&block_hash) + .ok()? + .expect("Should have returned already if None") + .slot; + + block_slot.height(spec.genesis_slot) + }; + + // verify we haven't exceeded the block height + if target_height >= block_height { + if target_height > block_height { + return None; + } else { + return Some(block_hash); + } + } + // check if the result is stored in our cache + let cache_key = CacheKey::new(&block_hash, target_height.as_u64()); + if let Some(ancestor) = self.cache.get(&cache_key) { + return Some(*ancestor); + } + + // not in the cache recursively search for ancestors using a log-lookup + if let Some(ancestor) = { + let ancestor_lookup = self.ancestors + [log2_int((block_height - target_height - 1u64).as_u64()) as usize] + .get(&block_hash) + //TODO: Panic if we can't lookup and fork choice fails + .expect("All blocks should be added to the ancestor log lookup table"); + self.get_ancestor(*ancestor_lookup, target_height, &spec) + } { + // add the result to the cache + self.cache.insert(cache_key, ancestor); + return Some(ancestor); + } + + None + } + + // looks for an obvious block winner given the latest votes for a specific height + fn get_clear_winner( + &mut self, + latest_votes: &HashMap, + block_height: SlotHeight, + spec: &ChainSpec, + ) -> Option { + // map of vote counts for every hash at this height + let mut current_votes: HashMap = HashMap::new(); + let mut total_vote_count = 0; + + trace!("Clear winner at block height: {}", block_height); + // loop through the latest votes and count all votes + // these have already been weighted by balance + for (hash, votes) in latest_votes.iter() { + if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) { + let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); + current_votes.insert(ancestor, current_vote_value + *votes); + total_vote_count += votes; + } + } + // Check if there is a clear block winner at this height. If so return it. + for (hash, votes) in current_votes.iter() { + if *votes > total_vote_count / 2 { + // we have a clear winner, return it + return Some(*hash); + } + } + // didn't find a clear winner + None + } + + // Finds the best child (one with highest votes) + fn choose_best_child(&self, votes: &HashMap) -> Option { + if votes.is_empty() { + return None; + } + let mut best_child: Hash256 = Hash256::from(0); + let mut max_votes: u64 = 0; + for (&candidate, &votes) in votes.iter() { + // Choose the smaller hash to break ties deterministically + if votes == max_votes && candidate < best_child { + best_child = candidate; + } + if votes > max_votes { + max_votes = votes; + best_child = candidate; + } + } + Some(best_child) + } +} + +impl ForkChoice for OptimizedLMDGhost { + fn add_block( + &mut self, + block: &BeaconBlock, + block_hash: &Hash256, + spec: &ChainSpec, + ) -> Result<(), ForkChoiceError> { + // get the height of the parent + let parent_height = self + .block_store + .get_deserialized(&block.parent_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? + .slot() + .height(spec.genesis_slot); + + let parent_hash = &block.parent_root; + + // add the new block to the children of parent + (*self + .children + .entry(block.parent_root) + .or_insert_with(|| vec![])) + .push(block_hash.clone()); + + // build the ancestor data structure + for index in 0..16 { + if parent_height % (1 << index) == 0 { + self.ancestors[index].insert(*block_hash, *parent_hash); + } else { + // TODO: This is unsafe. Will panic if parent_hash doesn't exist. Using it for debugging + let parent_ancestor = self.ancestors[index][parent_hash]; + self.ancestors[index].insert(*block_hash, parent_ancestor); + } + } + // update the max height + self.max_known_height = std::cmp::max(self.max_known_height, parent_height + 1); + Ok(()) + } + + fn add_attestation( + &mut self, + validator_index: u64, + target_block_root: &Hash256, + spec: &ChainSpec, + ) -> Result<(), ForkChoiceError> { + // simply add the attestation to the latest_attestation_target if the block_height is + // larger + trace!( + "Adding attestation of validator: {:?} for block: {}", + validator_index, + target_block_root + ); + let attestation_target = self + .latest_attestation_targets + .entry(validator_index) + .or_insert_with(|| *target_block_root); + // if we already have a value + if attestation_target != target_block_root { + trace!("Old attestation found: {:?}", attestation_target); + // get the height of the target block + let block_height = self + .block_store + .get_deserialized(&target_block_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? + .slot() + .height(spec.genesis_slot); + + // get the height of the past target block + let past_block_height = self + .block_store + .get_deserialized(&attestation_target)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? + .slot() + .height(spec.genesis_slot); + // update the attestation only if the new target is higher + if past_block_height < block_height { + trace!("Updating old attestation"); + *attestation_target = *target_block_root; + } + } + Ok(()) + } + + /// Perform lmd_ghost on the current chain to find the head. + fn find_head( + &mut self, + justified_block_start: &Hash256, + spec: &ChainSpec, + ) -> Result { + debug!( + "Starting optimised fork choice at block: {}", + justified_block_start + ); + let block = self + .block_store + .get_deserialized(&justified_block_start)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; + + let block_slot = block.slot(); + let state_root = block.state_root(); + let mut block_height = block_slot.height(spec.genesis_slot); + + let mut current_head = *justified_block_start; + + let mut latest_votes = self.get_latest_votes(&state_root, block_slot, spec)?; + + // remove any votes that don't relate to our current head. + latest_votes + .retain(|hash, _| self.get_ancestor(*hash, block_height, spec) == Some(current_head)); + + // begin searching for the head + loop { + debug!( + "Iteration for block: {} with vote length: {}", + current_head, + latest_votes.len() + ); + // if there are no children, we are done, return the current_head + let children = match self.children.get(¤t_head) { + Some(children) => children.clone(), + None => { + debug!("Head found: {}", current_head); + return Ok(current_head); + } + }; + + // logarithmic lookup blocks to see if there are obvious winners, if so, + // progress to the next iteration. + let mut step = + power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u64()) / 2; + while step > 0 { + trace!("Current Step: {}", step); + if let Some(clear_winner) = self.get_clear_winner( + &latest_votes, + block_height - (block_height % step) + step, + spec, + ) { + current_head = clear_winner; + break; + } + step /= 2; + } + if step > 0 { + trace!("Found clear winner: {}", current_head); + } + // if our skip lookup failed and we only have one child, progress to that child + else if children.len() == 1 { + current_head = children[0]; + trace!( + "Lookup failed, only one child, proceeding to child: {}", + current_head + ); + } + // we need to find the best child path to progress down. + else { + trace!("Searching for best child"); + let mut child_votes = HashMap::new(); + for (voted_hash, vote) in latest_votes.iter() { + // if the latest votes correspond to a child + if let Some(child) = self.get_ancestor(*voted_hash, block_height + 1, spec) { + // add up the votes for each child + *child_votes.entry(child).or_insert_with(|| 0) += vote; + } + } + // given the votes on the children, find the best child + current_head = self + .choose_best_child(&child_votes) + .ok_or(ForkChoiceError::CannotFindBestChild)?; + trace!("Best child found: {}", current_head); + } + + // didn't find head yet, proceed to next iteration + // update block height + block_height = self + .block_store + .get_deserialized(¤t_head)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? + .slot() + .height(spec.genesis_slot); + // prune the latest votes for votes that are not part of current chosen chain + // more specifically, only keep votes that have head as an ancestor + for hash in latest_votes.keys() { + trace!( + "Ancestor for vote: {} at height: {} is: {:?}", + hash, + block_height, + self.get_ancestor(*hash, block_height, spec) + ); + } + latest_votes.retain(|hash, _| { + self.get_ancestor(*hash, block_height, spec) == Some(current_head) + }); + } + } +} + +/// Type for storing blocks in a memory cache. Key is comprised of block-hash plus the height. +#[derive(PartialEq, Eq, Hash)] +pub struct CacheKey { + block_hash: Hash256, + block_height: T, +} + +impl CacheKey { + pub fn new(block_hash: &Hash256, block_height: T) -> Self { + CacheKey { + block_hash: *block_hash, + block_height, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + pub fn test_power_of_2_below() { + assert_eq!(power_of_2_below(4), 4); + assert_eq!(power_of_2_below(5), 4); + assert_eq!(power_of_2_below(7), 4); + assert_eq!(power_of_2_below(24), 16); + assert_eq!(power_of_2_below(32), 32); + assert_eq!(power_of_2_below(33), 32); + assert_eq!(power_of_2_below(63), 32); + } + + #[test] + pub fn test_power_of_2_below_large() { + let pow: u64 = 1 << 24; + for x in (pow - 20)..(pow + 20) { + assert!(power_of_2_below(x) <= x, "{}", x); + } + } +} diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 7228bca10..1c8d92e0a 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -3,7 +3,7 @@ extern crate beacon_chain; extern crate bls; extern crate db; -//extern crate env_logger; // for debugging +// extern crate env_logger; // for debugging extern crate fork_choice; extern crate hex; extern crate log; @@ -15,8 +15,8 @@ pub use beacon_chain::BeaconChain; use bls::Signature; use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::MemoryDB; -//use env_logger::{Builder, Env}; -use fork_choice::{BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, SlowLMDGhost}; +// use env_logger::{Builder, Env}; +use fork_choice::{BitwiseLMDGhost, OptimizedLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, SlowLMDGhost}; use ssz::ssz_encode; use std::collections::HashMap; use std::sync::Arc; @@ -27,6 +27,18 @@ use yaml_rust::yaml; // Note: We Assume the block Id's are hex-encoded. +#[test] +fn test_optimized_lmd_ghost() { + // set up logging + // Builder::from_env(Env::default().default_filter_or("trace")).init(); + + test_yaml_vectors( + ForkChoiceAlgorithm::OptimizedLMDGhost, + "tests/lmd_ghost_test_vectors.yaml", + 100, + ); +} + #[test] fn test_bitwise_lmd_ghost() { // set up logging @@ -212,6 +224,10 @@ fn setup_inital_state( // the fork choice instantiation let fork_choice: Box = match fork_choice_algo { + ForkChoiceAlgorithm::OptimizedLMDGhost => Box::new(OptimizedLMDGhost::new( + block_store.clone(), + state_store.clone(), + )), ForkChoiceAlgorithm::BitwiseLMDGhost => Box::new(BitwiseLMDGhost::new( block_store.clone(), state_store.clone(), From 6f5593ef2bf2de66078e365186d4098acb91a085 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 10:24:02 +1100 Subject: [PATCH 026/154] Optimize TestingAttesterSlashingBuilder --- .../test_utils/testing_attester_slashing_builder.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs index 232de87ec..92c7fe814 100644 --- a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs @@ -67,13 +67,15 @@ impl TestingAttesterSlashingBuilder { }; let add_signatures = |attestation: &mut SlashableAttestation| { + // All validators sign with a `false` custody bit. + let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { + data: attestation.data.clone(), + custody_bit: false, + }; + let message = attestation_data_and_custody_bit.hash_tree_root(); + for (i, validator_index) in validator_indices.iter().enumerate() { attestation.custody_bitfield.set(i, false); - let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { - data: attestation.data.clone(), - custody_bit: attestation.custody_bitfield.get(i).unwrap(), - }; - let message = attestation_data_and_custody_bit.hash_tree_root(); let signature = signer(*validator_index, &message[..], epoch, Domain::Attestation); attestation.aggregate_signature.add(&signature); } From 2d2ba6576b5b916449fb17f1e29d49764f690ac2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 11:24:46 +1100 Subject: [PATCH 027/154] Remove old, superseded benches --- .../beacon_chain/test_harness/Cargo.toml | 5 -- .../test_harness/benches/state_transition.rs | 69 ------------------- 2 files changed, 74 deletions(-) delete mode 100644 beacon_node/beacon_chain/test_harness/benches/state_transition.rs diff --git a/beacon_node/beacon_chain/test_harness/Cargo.toml b/beacon_node/beacon_chain/test_harness/Cargo.toml index 448934eb3..50d154732 100644 --- a/beacon_node/beacon_chain/test_harness/Cargo.toml +++ b/beacon_node/beacon_chain/test_harness/Cargo.toml @@ -12,12 +12,7 @@ path = "src/bin.rs" name = "test_harness" path = "src/lib.rs" -[[bench]] -name = "state_transition" -harness = false - [dev-dependencies] -criterion = "0.2" state_processing = { path = "../../../eth2/state_processing" } [dependencies] diff --git a/beacon_node/beacon_chain/test_harness/benches/state_transition.rs b/beacon_node/beacon_chain/test_harness/benches/state_transition.rs deleted file mode 100644 index 7d1c44653..000000000 --- a/beacon_node/beacon_chain/test_harness/benches/state_transition.rs +++ /dev/null @@ -1,69 +0,0 @@ -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; -// use env_logger::{Builder, Env}; -use state_processing::SlotProcessable; -use test_harness::BeaconChainHarness; -use types::{ChainSpec, Hash256}; - -fn mid_epoch_state_transition(c: &mut Criterion) { - // Builder::from_env(Env::default().default_filter_or("debug")).init(); - - let validator_count = 1000; - let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); - - let epoch_depth = (rig.spec.slots_per_epoch * 2) + (rig.spec.slots_per_epoch / 2); - - for _ in 0..epoch_depth { - rig.advance_chain_with_block(); - } - - let state = rig.beacon_chain.state.read().clone(); - - assert!((state.slot + 1) % rig.spec.slots_per_epoch != 0); - - c.bench_function("mid-epoch state transition 10k validators", move |b| { - let state = state.clone(); - b.iter(|| { - let mut state = state.clone(); - black_box(state.per_slot_processing(Hash256::zero(), &rig.spec)) - }) - }); -} - -fn epoch_boundary_state_transition(c: &mut Criterion) { - // Builder::from_env(Env::default().default_filter_or("debug")).init(); - - let validator_count = 10000; - let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count); - - let epoch_depth = rig.spec.slots_per_epoch * 2; - - for _ in 0..(epoch_depth - 1) { - rig.advance_chain_with_block(); - } - - let state = rig.beacon_chain.state.read().clone(); - - assert_eq!((state.slot + 1) % rig.spec.slots_per_epoch, 0); - - c.bench( - "routines", - Benchmark::new("routine_1", move |b| { - let state = state.clone(); - b.iter(|| { - let mut state = state.clone(); - black_box(black_box( - state.per_slot_processing(Hash256::zero(), &rig.spec), - )) - }) - }) - .sample_size(5), // sample size is low because function is sloooow. - ); -} - -criterion_group!( - benches, - mid_epoch_state_transition, - epoch_boundary_state_transition -); -criterion_main!(benches); From 6c4e457c8adf69266fe9088d3ed470302d204705 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 11:25:17 +1100 Subject: [PATCH 028/154] Fix test_harness tests They were broken by changes to TestingBeaconStateBuilder and where the keypairs file is stored. --- .../test_harness/src/beacon_chain_harness.rs | 90 ++----------------- .../beacon_chain/test_harness/src/run_test.rs | 7 +- .../test_harness/src/test_case.rs | 4 +- .../beacon_chain/test_harness/tests/chain.rs | 4 +- 4 files changed, 10 insertions(+), 95 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index c442c05db..28723a203 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -1,7 +1,6 @@ use super::ValidatorHarness; use beacon_chain::{BeaconChain, BlockProcessingOutcome}; pub use beacon_chain::{BeaconChainError, CheckPoint}; -use bls::get_withdrawal_credentials; use db::{ stores::{BeaconBlockStore, BeaconStateStore}, MemoryDB, @@ -12,11 +11,10 @@ use rayon::prelude::*; use slot_clock::TestingSlotClock; use ssz::TreeHash; use std::collections::HashSet; -use std::fs::File; use std::iter::FromIterator; use std::path::Path; use std::sync::Arc; -use types::{beacon_state::BeaconStateBuilder, test_utils::generate_deterministic_keypairs, *}; +use types::{test_utils::TestingBeaconStateBuilder, *}; mod generate_deposits; @@ -42,95 +40,17 @@ impl BeaconChainHarness { /// /// - A keypair, `BlockProducer` and `Attester` for each validator. /// - A new BeaconChain struct where the given validators are in the genesis. - pub fn new( - spec: ChainSpec, - validator_count: usize, - validators_dir: Option<&Path>, - skip_deposit_verification: bool, - ) -> Self { + pub fn new(spec: ChainSpec, validator_count: usize) -> Self { let db = Arc::new(MemoryDB::open()); let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone())); - let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - let latest_eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }; - let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); + let state_builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); + let (genesis_state, keypairs) = state_builder.build(); - // If a `validators_dir` is specified, load the keypairs a YAML file. - // - // Otherwise, generate them deterministically where the first validator has a secret key of - // `1`, etc. - let keypairs = if let Some(path) = validators_dir { - debug!("Loading validator keypairs from file..."); - let keypairs_file = File::open(path.join("keypairs.yaml")).unwrap(); - let mut keypairs: Vec = serde_yaml::from_reader(&keypairs_file).unwrap(); - keypairs.truncate(validator_count); - keypairs - } else { - debug!("Generating validator keypairs..."); - generate_deterministic_keypairs(validator_count) - }; - - // Skipping deposit verification means directly generating `Validator` records, instead - // of generating `Deposit` objects, verifying them and converting them into `Validator` - // records. - // - // It is much faster to skip deposit verification, however it does not test the initial - // validator induction part of beacon chain genesis. - if skip_deposit_verification { - let validators = keypairs - .iter() - .map(|keypair| { - let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( - &keypair.pk, - spec.bls_withdrawal_prefix_byte, - )); - - Validator { - pubkey: keypair.pk.clone(), - withdrawal_credentials, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - initiated_exit: false, - slashed: false, - } - }) - .collect(); - - let balances = vec![32_000_000_000; validator_count]; - - state_builder.import_existing_validators( - validators, - balances, - validator_count as u64, - &spec, - ); - } else { - debug!("Generating initial validator deposits..."); - let deposits = generate_deposits_from_keypairs( - &keypairs, - genesis_time, - spec.get_domain( - spec.genesis_epoch, - Domain::Deposit, - &Fork { - previous_version: spec.genesis_fork_version, - current_version: spec.genesis_fork_version, - epoch: spec.genesis_epoch, - }, - ), - &spec, - ); - state_builder.process_initial_deposits(&deposits, &spec); - }; - - let genesis_state = state_builder.build(&spec).unwrap(); let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); let genesis_block = BeaconBlock::genesis(state_root, &spec); diff --git a/beacon_node/beacon_chain/test_harness/src/run_test.rs b/beacon_node/beacon_chain/test_harness/src/run_test.rs index d4e2e1cf2..4caa299d6 100644 --- a/beacon_node/beacon_chain/test_harness/src/run_test.rs +++ b/beacon_node/beacon_chain/test_harness/src/run_test.rs @@ -1,6 +1,5 @@ use crate::test_case::TestCase; use clap::ArgMatches; -use std::path::Path; use std::{fs::File, io::prelude::*}; use yaml_rust::YamlLoader; @@ -17,10 +16,6 @@ pub fn run_test(matches: &ArgMatches) { }; for doc in &docs { - let validators_dir = matches - .value_of("validators_dir") - .and_then(|dir_str| Some(Path::new(dir_str))); - // For each `test_cases` YAML in the document, build a `TestCase`, execute it and // assert that the execution result matches the test_case description. // @@ -35,7 +30,7 @@ pub fn run_test(matches: &ArgMatches) { // panics with a message. for test_case in doc["test_cases"].as_vec().unwrap() { let test_case = TestCase::from_yaml(test_case); - test_case.assert_result_valid(test_case.execute(validators_dir)) + test_case.assert_result_valid(test_case.execute()) } } } diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index b6b1ea5cc..cee78f6c4 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -69,7 +69,7 @@ impl TestCase { /// Executes the test case, returning an `ExecutionResult`. #[allow(clippy::cyclomatic_complexity)] - pub fn execute(&self, validators_dir: Option<&Path>) -> ExecutionResult { + pub fn execute(&self) -> ExecutionResult { let spec = self.spec(); let validator_count = self.config.deposits_for_chain_start; let slots = self.config.num_slots; @@ -79,7 +79,7 @@ impl TestCase { validator_count ); - let mut harness = BeaconChainHarness::new(spec, validator_count, validators_dir, true); + let mut harness = BeaconChainHarness::new(spec, validator_count); info!("Starting simulation across {} slots...", slots); diff --git a/beacon_node/beacon_chain/test_harness/tests/chain.rs b/beacon_node/beacon_chain/test_harness/tests/chain.rs index e5a52a314..e72c3a5aa 100644 --- a/beacon_node/beacon_chain/test_harness/tests/chain.rs +++ b/beacon_node/beacon_chain/test_harness/tests/chain.rs @@ -10,7 +10,7 @@ fn it_can_build_on_genesis_block() { let spec = ChainSpec::few_validators(); let validator_count = 8; - let mut harness = BeaconChainHarness::new(spec, validator_count as usize, None, true); + let mut harness = BeaconChainHarness::new(spec, validator_count as usize); harness.advance_chain_with_block(); } @@ -25,7 +25,7 @@ fn it_can_produce_past_first_epoch_boundary() { debug!("Starting harness build..."); - let mut harness = BeaconChainHarness::new(spec, validator_count, None, true); + let mut harness = BeaconChainHarness::new(spec, validator_count); debug!("Harness built, tests starting.."); From 6101036c8ef5f9d126b588b428679f16a1b7da89 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 5 Mar 2019 17:28:51 +1100 Subject: [PATCH 029/154] Re-apply removal of duplicated ssz_tests Also, re-apply: "use cfg(test) for test macros" --- eth2/types/src/attestation.rs | 25 +------- eth2/types/src/attestation_data.rs | 25 +------- .../src/attestation_data_and_custody_bit.rs | 27 +------- eth2/types/src/attester_slashing.rs | 25 +------- eth2/types/src/beacon_block.rs | 25 +------- eth2/types/src/beacon_block_body.rs | 25 +------- eth2/types/src/beacon_state/tests.rs | 24 +------ eth2/types/src/crosslink.rs | 25 +------- eth2/types/src/deposit.rs | 25 +------- eth2/types/src/deposit_data.rs | 25 +------- eth2/types/src/deposit_input.rs | 25 +------- eth2/types/src/eth1_data.rs | 25 +------- eth2/types/src/eth1_data_vote.rs | 25 +------- eth2/types/src/fork.rs | 25 +------- eth2/types/src/lib.rs | 1 + eth2/types/src/pending_attestation.rs | 25 +------- eth2/types/src/proposal.rs | 26 +------- eth2/types/src/proposer_slashing.rs | 25 +------- eth2/types/src/shard_reassignment_record.rs | 25 +------- eth2/types/src/slashable_attestation.rs | 24 +------ eth2/types/src/slot_epoch.rs | 4 -- eth2/types/src/slot_epoch_macros.rs | 38 ++--------- eth2/types/src/slot_height.rs | 3 - eth2/types/src/test_utils/mod.rs | 63 ++++++++++++++++--- eth2/types/src/transfer.rs | 25 +------- eth2/types/src/validator.rs | 24 +------ eth2/types/src/voluntary_exit.rs | 25 +------- 27 files changed, 83 insertions(+), 576 deletions(-) diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 67bff3d20..dcc4c1fda 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -20,29 +20,6 @@ pub struct Attestation { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Attestation::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Attestation::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Attestation); } diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 5899ab52d..6e3cb3891 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -38,29 +38,6 @@ impl Eq for AttestationData {} #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttestationData::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttestationData::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(AttestationData); } diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index e9cf4bb67..020b07d28 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -25,31 +25,6 @@ impl TestRandom for AttestationDataAndCustodyBit { #[cfg(test)] mod test { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - - let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(AttestationDataAndCustodyBit); } diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 406e09f29..f437d41f2 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -16,29 +16,6 @@ pub struct AttesterSlashing { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttesterSlashing::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttesterSlashing::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(AttesterSlashing); } diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 0274539ba..615d9f928 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -70,29 +70,6 @@ impl BeaconBlock { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlock::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlock::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(BeaconBlock); } diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 2a43f289c..70ce24dbe 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -21,29 +21,6 @@ pub struct BeaconBlockBody { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlockBody::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlockBody::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(BeaconBlockBody); } diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 78f2f573e..1e1a555fd 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -4,7 +4,6 @@ use super::*; use crate::test_utils::TestingBeaconStateBuilder; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::{BeaconState, ChainSpec}; -use ssz::{ssz_encode, Decodable}; /// Tests that `get_attestation_participants` is consistent with the result of /// get_crosslink_committees_at_slot` with a full bitfield. @@ -51,25 +50,4 @@ pub fn get_attestation_participants_consistency() { } } -#[test] -pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconState::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); -} - -#[test] -pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconState::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 -} +ssz_tests!(BeaconState); diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index ed31a80d5..5db5e20a6 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -19,29 +19,6 @@ pub struct Crosslink { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Crosslink::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Crosslink::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Crosslink); } diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 0b0d0cc64..14eb19ad6 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -18,29 +18,6 @@ pub struct Deposit { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Deposit::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Deposit::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Deposit); } diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index b2fe99fda..9d6c1bda7 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -18,29 +18,6 @@ pub struct DepositData { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositData::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositData::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(DepositData); } diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 966c9fad0..9a9031901 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -66,29 +66,6 @@ impl DepositInput { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositInput::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositInput::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(DepositInput); } diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index 88f91e3a9..c4b2b1894 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -17,29 +17,6 @@ pub struct Eth1Data { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1Data::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1Data::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Eth1Data); } diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index bd8266ce3..4788833bd 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -17,29 +17,6 @@ pub struct Eth1DataVote { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1DataVote::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1DataVote::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Eth1DataVote); } diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 9cf6ae396..f3b62f5a8 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -29,29 +29,6 @@ impl Fork { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Fork::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Fork::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Fork); } diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 76fcb43ed..e7be732eb 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -1,3 +1,4 @@ +#[macro_use] pub mod test_utils; pub mod attestation; diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index c1293546e..68dd1c345 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -19,29 +19,6 @@ pub struct PendingAttestation { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = PendingAttestation::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = PendingAttestation::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(PendingAttestation); } diff --git a/eth2/types/src/proposal.rs b/eth2/types/src/proposal.rs index dda544a19..59d6370e1 100644 --- a/eth2/types/src/proposal.rs +++ b/eth2/types/src/proposal.rs @@ -23,30 +23,7 @@ pub struct Proposal { mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, SignedRoot, TreeHash}; - - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Proposal::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Proposal::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + use ssz::{SignedRoot, TreeHash}; #[derive(TreeHash)] struct SignedProposal { @@ -75,4 +52,5 @@ mod tests { assert_eq!(original.signed_root(), other.hash_tree_root()); } + ssz_tests!(Proposal); } diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index ddd951ae1..26c3d67a7 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -18,29 +18,6 @@ pub struct ProposerSlashing { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ProposerSlashing::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ProposerSlashing::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(ProposerSlashing); } diff --git a/eth2/types/src/shard_reassignment_record.rs b/eth2/types/src/shard_reassignment_record.rs index d8595b69d..9f1705f16 100644 --- a/eth2/types/src/shard_reassignment_record.rs +++ b/eth2/types/src/shard_reassignment_record.rs @@ -14,29 +14,6 @@ pub struct ShardReassignmentRecord { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ShardReassignmentRecord::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ShardReassignmentRecord::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(ShardReassignmentRecord); } diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index 2c4bde8db..56c9dfc2f 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -46,7 +46,6 @@ mod tests { use crate::chain_spec::ChainSpec; use crate::slot_epoch::{Epoch, Slot}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_is_double_vote_true() { @@ -120,28 +119,7 @@ mod tests { ); } - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = SlashableAttestation::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = SlashableAttestation::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(SlashableAttestation); fn create_slashable_attestation( slot_factor: u64, diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 2af7f5196..c40b6badf 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -103,8 +103,6 @@ impl<'a> Iterator for SlotIter<'a> { #[cfg(test)] mod slot_tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::ssz_encode; all_tests!(Slot); } @@ -112,8 +110,6 @@ mod slot_tests { #[cfg(test)] mod epoch_tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::ssz_encode; all_tests!(Epoch); diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 4b2332baf..300ad3f6f 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -248,7 +248,7 @@ macro_rules! impl_common { } // test macros -#[allow(unused_macros)] +#[cfg(test)] macro_rules! new_tests { ($type: ident) => { #[test] @@ -260,7 +260,7 @@ macro_rules! new_tests { }; } -#[allow(unused_macros)] +#[cfg(test)] macro_rules! from_into_tests { ($type: ident, $other: ident) => { #[test] @@ -286,7 +286,7 @@ macro_rules! from_into_tests { }; } -#[allow(unused_macros)] +#[cfg(test)] macro_rules! math_between_tests { ($type: ident, $other: ident) => { #[test] @@ -434,7 +434,7 @@ macro_rules! math_between_tests { }; } -#[allow(unused_macros)] +#[cfg(test)] macro_rules! math_tests { ($type: ident) => { #[test] @@ -528,35 +528,7 @@ macro_rules! math_tests { }; } -#[allow(unused_macros)] -macro_rules! ssz_tests { - ($type: ident) => { - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = $type::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = $type::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } - }; -} - -#[allow(unused_macros)] +#[cfg(test)] macro_rules! all_tests { ($type: ident) => { new_tests!($type); diff --git a/eth2/types/src/slot_height.rs b/eth2/types/src/slot_height.rs index 1739227a4..4a783d4a0 100644 --- a/eth2/types/src/slot_height.rs +++ b/eth2/types/src/slot_height.rs @@ -33,11 +33,8 @@ impl SlotHeight { } #[cfg(test)] - mod slot_height_tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::ssz_encode; all_tests!(SlotHeight); } diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 6fdbe53ad..4b435805c 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -13,12 +13,57 @@ mod testing_voluntary_exit_builder; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; pub use keypairs_file::KeypairsFile; pub use rand::{prng::XorShiftRng, SeedableRng}; -pub use test_random::TestRandom; -pub use testing_attestation_builder::TestingAttestationBuilder; -pub use testing_attester_slashing_builder::TestingAttesterSlashingBuilder; -pub use testing_beacon_block_builder::TestingBeaconBlockBuilder; -pub use testing_beacon_state_builder::{keypairs_path, TestingBeaconStateBuilder}; -pub use testing_deposit_builder::TestingDepositBuilder; -pub use testing_proposer_slashing_builder::TestingProposerSlashingBuilder; -pub use testing_transfer_builder::TestingTransferBuilder; -pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; + +pub mod address; +pub mod aggregate_signature; +pub mod bitfield; +pub mod hash256; +#[macro_use] +mod macros; +pub mod public_key; +pub mod secret_key; +pub mod signature; + +pub trait TestRandom +where + T: RngCore, +{ + fn random_for_test(rng: &mut T) -> Self; +} + +impl TestRandom for bool { + fn random_for_test(rng: &mut T) -> Self { + (rng.next_u32() % 2) == 1 + } +} + +impl TestRandom for u64 { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u64() + } +} + +impl TestRandom for u32 { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u32() + } +} + +impl TestRandom for usize { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u32() as usize + } +} + +impl TestRandom for Vec +where + U: TestRandom, +{ + fn random_for_test(rng: &mut T) -> Self { + vec![ + ::random_for_test(rng), + ::random_for_test(rng), + ::random_for_test(rng), + ] + } +} diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index b3c283fa2..af3b18ef4 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -24,29 +24,6 @@ pub struct Transfer { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Transfer::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Transfer::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Transfer); } diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 8901dcd81..6d1936bfd 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -54,18 +54,6 @@ impl Default for Validator { mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Validator::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } #[test] fn test_validator_can_be_active() { @@ -90,15 +78,5 @@ mod tests { } } - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Validator::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Validator); } diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 36b5597f0..38630a057 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -19,29 +19,6 @@ pub struct VoluntaryExit { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = VoluntaryExit::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = VoluntaryExit::random_for_test(&mut rng); - - let result = original.hash_tree_root(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(VoluntaryExit); } From f68f52e206abaedb1819fbd54e3d4cb542924c59 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 12:22:15 +1100 Subject: [PATCH 030/154] Fix issues with previous cherry pick --- eth2/types/src/test_utils/mod.rs | 65 ++++++-------------------------- 1 file changed, 11 insertions(+), 54 deletions(-) diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 4b435805c..9d04d1ca7 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,3 +1,5 @@ +#[macro_use] +mod macros; mod generate_deterministic_keypairs; mod keypairs_file; mod test_random; @@ -13,57 +15,12 @@ mod testing_voluntary_exit_builder; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; pub use keypairs_file::KeypairsFile; pub use rand::{prng::XorShiftRng, SeedableRng}; - -pub mod address; -pub mod aggregate_signature; -pub mod bitfield; -pub mod hash256; -#[macro_use] -mod macros; -pub mod public_key; -pub mod secret_key; -pub mod signature; - -pub trait TestRandom -where - T: RngCore, -{ - fn random_for_test(rng: &mut T) -> Self; -} - -impl TestRandom for bool { - fn random_for_test(rng: &mut T) -> Self { - (rng.next_u32() % 2) == 1 - } -} - -impl TestRandom for u64 { - fn random_for_test(rng: &mut T) -> Self { - rng.next_u64() - } -} - -impl TestRandom for u32 { - fn random_for_test(rng: &mut T) -> Self { - rng.next_u32() - } -} - -impl TestRandom for usize { - fn random_for_test(rng: &mut T) -> Self { - rng.next_u32() as usize - } -} - -impl TestRandom for Vec -where - U: TestRandom, -{ - fn random_for_test(rng: &mut T) -> Self { - vec![ - ::random_for_test(rng), - ::random_for_test(rng), - ::random_for_test(rng), - ] - } -} +pub use test_random::TestRandom; +pub use testing_attestation_builder::TestingAttestationBuilder; +pub use testing_attester_slashing_builder::TestingAttesterSlashingBuilder; +pub use testing_beacon_block_builder::TestingBeaconBlockBuilder; +pub use testing_beacon_state_builder::{keypairs_path, TestingBeaconStateBuilder}; +pub use testing_deposit_builder::TestingDepositBuilder; +pub use testing_proposer_slashing_builder::TestingProposerSlashingBuilder; +pub use testing_transfer_builder::TestingTransferBuilder; +pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; From bfa2e71b468e34eedcf847076aa29f3a53038bfd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 14:41:43 +1100 Subject: [PATCH 031/154] Move `PublicKey` to store uncompressed bytes. This is an optimisation that allows for faster hashing of a public key, however it adds a penalty to SSZ encoding because we need to go decompressed -> PublicKey -> compressed. The spec presently uses compressed bytes to store public keys, however I'm hoping it will change. --- eth2/utils/bls/src/aggregate_public_key.rs | 2 +- eth2/utils/bls/src/public_key.rs | 56 ++++++++++++++++------ eth2/utils/bls/src/signature.rs | 4 +- 3 files changed, 44 insertions(+), 18 deletions(-) diff --git a/eth2/utils/bls/src/aggregate_public_key.rs b/eth2/utils/bls/src/aggregate_public_key.rs index 2174a43cb..47c95d6c9 100644 --- a/eth2/utils/bls/src/aggregate_public_key.rs +++ b/eth2/utils/bls/src/aggregate_public_key.rs @@ -14,7 +14,7 @@ impl AggregatePublicKey { } pub fn add(&mut self, public_key: &PublicKey) { - self.0.add(public_key.as_raw()) + self.0.add(&public_key.as_raw()) } /// Returns the underlying signature. diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index c85760bbf..3e2bff19e 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -10,39 +10,58 @@ use ssz::{ use std::default; use std::hash::{Hash, Hasher}; -/// A single BLS signature. +/// A single BLS public key. +/// +/// This struct stores an uncompressed public key as a byte vec. The reason we store bytes instead +/// of the `RawPublicKey` struct is because it allows for building a hashmap of `PublicKey` much +/// faster. +/// +/// Storing as uncompressed bytes costs ~0.02% more time when adding a `PublicKey` to an +/// `AggregateKey`, however it saves ~0.5ms each time you need to add a pubkey to a hashmap. /// /// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ /// serialization). #[derive(Debug, Clone, Eq)] -pub struct PublicKey(RawPublicKey); +pub struct PublicKey { + bytes: Vec, +} impl PublicKey { pub fn from_secret_key(secret_key: &SecretKey) -> Self { - PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) + let mut raw_key = RawPublicKey::from_secret_key(secret_key.as_raw()); + let uncompressed_bytes = raw_key.as_uncompressed_bytes(); + Self { + bytes: uncompressed_bytes, + } } /// Returns the underlying signature. - pub fn as_raw(&self) -> &RawPublicKey { - &self.0 + pub fn as_raw(&self) -> RawPublicKey { + RawPublicKey::from_uncompressed_bytes(&self.bytes).expect("PublicKey in invalid state") } /// Converts compressed bytes to PublicKey pub fn from_bytes(bytes: &[u8]) -> Result { - let pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; - Ok(PublicKey(pubkey)) + let mut pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; + Ok(Self { + bytes: pubkey.as_uncompressed_bytes(), + }) } /// Returns the PublicKey as (x, y) bytes pub fn as_uncompressed_bytes(&self) -> Vec { - RawPublicKey::as_uncompressed_bytes(&mut self.0.clone()) + self.bytes.clone() } /// Converts (x, y) bytes to PublicKey pub fn from_uncompressed_bytes(bytes: &[u8]) -> Result { - let pubkey = + // Do a conversion to check the bytes are valid. + let _pubkey = RawPublicKey::from_uncompressed_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; - Ok(PublicKey(pubkey)) + + Ok(Self { + bytes: bytes.to_vec(), + }) } /// Returns the last 6 bytes of the SSZ encoding of the public key, as a hex string. @@ -64,15 +83,22 @@ impl default::Default for PublicKey { impl Encodable for PublicKey { fn ssz_append(&self, s: &mut SszStream) { - s.append_vec(&self.0.as_bytes()); + s.append_vec(&self.as_raw().as_bytes()); } } impl Decodable for PublicKey { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { let (sig_bytes, i) = decode_ssz_list(bytes, i)?; - let raw_sig = RawPublicKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; - Ok((PublicKey(raw_sig), i)) + let mut raw_sig = + RawPublicKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; + + Ok(( + Self { + bytes: raw_sig.as_uncompressed_bytes(), + }, + i, + )) } } @@ -99,7 +125,7 @@ impl<'de> Deserialize<'de> for PublicKey { impl TreeHash for PublicKey { fn hash_tree_root(&self) -> Vec { - hash(&self.0.as_bytes()) + hash(&self.as_raw().as_bytes()) } } @@ -117,7 +143,7 @@ impl Hash for PublicKey { /// /// Use `ssz::Encode` to obtain the bytes required for consensus hashing. fn hash(&self, state: &mut H) { - self.as_uncompressed_bytes().hash(state) + self.bytes.hash(state) } } diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 47598bc66..47e1dad2e 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -33,7 +33,7 @@ impl Signature { /// Verify the Signature against a PublicKey. pub fn verify(&self, msg: &[u8], domain: u64, pk: &PublicKey) -> bool { - self.0.verify(msg, domain, pk.as_raw()) + self.0.verify(msg, domain, &pk.as_raw()) } /// Verify the Signature against a PublicKey, where the message has already been hashed. @@ -44,7 +44,7 @@ impl Signature { pk: &PublicKey, ) -> bool { self.0 - .verify_hashed(x_real_hashed, x_imaginary_hashed, pk.as_raw()) + .verify_hashed(x_real_hashed, x_imaginary_hashed, &pk.as_raw()) } /// Returns the underlying signature. From 8ee3523abd47b294d2ee41167dcf8d2e27bb76b1 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 13 Mar 2019 14:43:24 +1100 Subject: [PATCH 032/154] Transition to secp256k1 default peer id --- beacon_node/libp2p/src/network_config.rs | 6 ++++-- eth2/types/src/lib.rs | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/beacon_node/libp2p/src/network_config.rs b/beacon_node/libp2p/src/network_config.rs index 7bab57dde..ecc36da33 100644 --- a/beacon_node/libp2p/src/network_config.rs +++ b/beacon_node/libp2p/src/network_config.rs @@ -2,6 +2,7 @@ use crate::Multiaddr; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; use libp2p::secio; use std::fmt; +use types::Topic; #[derive(Clone)] /// Network configuration for lighthouse. @@ -24,8 +25,9 @@ pub struct NetworkConfig { impl Default for NetworkConfig { /// Generate a default network configuration. fn default() -> Self { - // TODO: Currently using ed25519 key pairs. Wire protocol specifies RSA. Waiting for this + // TODO: Currently using secp256k1 key pairs. Wire protocol specifies RSA. Waiting for this // PR to be merged to generate RSA keys: https://github.com/briansmith/ring/pull/733 + NetworkConfig { listen_addresses: vec!["/ip4/127.0.0.1/tcp/9000" .parse() @@ -33,7 +35,7 @@ impl Default for NetworkConfig { listen_port: 9000, gs_config: GossipsubConfigBuilder::new().build(), boot_nodes: Vec::new(), - local_private_key: secio::SecioKeyPair::ed25519_generated().unwrap(), + local_private_key: secio::SecioKeyPair::secp256k1_generated().unwrap(), client_version: version::version(), } } diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 555560090..47235a15b 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -73,5 +73,6 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; pub type ProposerMap = HashMap; pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; +pub use libp2p::floodsub::Topic; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; From 23a8fbfc74b64e6f698dfe66d2ffdafec2f2cacd Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 13 Mar 2019 15:37:44 +1100 Subject: [PATCH 033/154] Add default topics and initial topic subscription --- beacon_node/libp2p/src/behaviour.rs | 5 +++++ beacon_node/libp2p/src/network_config.rs | 4 +++- beacon_node/libp2p/src/service.rs | 19 +++++++++++++++++-- eth2/types/src/lib.rs | 2 +- 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs index be12011dd..be49abb94 100644 --- a/beacon_node/libp2p/src/behaviour.rs +++ b/beacon_node/libp2p/src/behaviour.rs @@ -5,6 +5,7 @@ use libp2p::{ tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; +use types::Topic; /// Builds the network behaviour for the libp2p Swarm. /// Implements gossipsub message routing. @@ -57,6 +58,10 @@ impl Behaviour { } impl Behaviour { + pub fn subscribe(&mut self, topic: Topic) -> bool { + self.gossipsub.subscribe(topic) + } + pub fn send_message(&self, message: String) { // TODO: Encode and send via gossipsub diff --git a/beacon_node/libp2p/src/network_config.rs b/beacon_node/libp2p/src/network_config.rs index ecc36da33..176892bb0 100644 --- a/beacon_node/libp2p/src/network_config.rs +++ b/beacon_node/libp2p/src/network_config.rs @@ -2,7 +2,6 @@ use crate::Multiaddr; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; use libp2p::secio; use std::fmt; -use types::Topic; #[derive(Clone)] /// Network configuration for lighthouse. @@ -20,6 +19,8 @@ pub struct NetworkConfig { pub local_private_key: secio::SecioKeyPair, /// Client version pub client_version: String, + /// List of topics to subscribe to as strings + pub topics: Vec, } impl Default for NetworkConfig { @@ -37,6 +38,7 @@ impl Default for NetworkConfig { boot_nodes: Vec::new(), local_private_key: secio::SecioKeyPair::secp256k1_generated().unwrap(), client_version: version::version(), + topics: vec![String::from("beacon_chain")], } } } diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index dceb62511..26154beb6 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -12,9 +12,10 @@ use libp2p::core::{ }; use libp2p::{core, secio, Transport}; use libp2p::{PeerId, Swarm}; -use slog::{debug, info, warn}; +use slog::{debug, info, trace, warn}; use std::io::{Error, ErrorKind}; use std::time::Duration; +use types::{Topic, TopicBuilder}; /// The configuration and state of the libp2p components for the beacon node. pub struct Service { @@ -33,7 +34,7 @@ impl Service { let local_private_key = config.local_private_key; let local_peer_id = local_private_key.to_peer_id(); - debug!(log, "Local peer id: {:?}", local_peer_id); + info!(log, "Local peer id: {:?}", local_peer_id); let mut swarm = { // Set up the transport @@ -67,6 +68,20 @@ impl Service { }; } + // subscribe to default gossipsub topics + let mut subscribed_topics = vec![]; + for topic in config.topics { + let t = TopicBuilder::new(topic.to_string()).build(); + match swarm.subscribe(t) { + true => { + trace!(log, "Subscribed to topic: {:?}", topic); + subscribed_topics.push(topic); + } + false => warn!(log, "Could not subscribe to topic: {:?}", topic), + }; + } + info!(log, "Subscribed to topics: {:?}", subscribed_topics); + Ok(Service { local_peer_id, swarm, diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 47235a15b..2cf61cfec 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -73,6 +73,6 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; pub type ProposerMap = HashMap; pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; -pub use libp2p::floodsub::Topic; +pub use libp2p::floodsub::{Topic, TopicBuilder}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; From 6cd3c4bd1a6531009a16a2a1894ff7c70636d5d8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 16:40:28 +1100 Subject: [PATCH 034/154] Add a cache for public keys to BeaconState This allows for a fast lookup of "is this public key already in the validator registry". --- eth2/state_processing/benches/benches.rs | 5 +- .../src/per_block_processing.rs | 9 ++-- .../src/per_block_processing/errors.rs | 5 +- .../per_block_processing/verify_deposit.rs | 7 +-- eth2/types/src/beacon_state.rs | 46 +++++++++++++++++++ eth2/types/src/beacon_state/pubkey_cache.rs | 45 ++++++++++++++++++ eth2/types/src/lib.rs | 3 +- .../testing_beacon_state_builder.rs | 2 + 8 files changed, 109 insertions(+), 13 deletions(-) create mode 100644 eth2/types/src/beacon_state/pubkey_cache.rs diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index ad8c4f714..4de97a298 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,14 +1,11 @@ -use criterion::Benchmark; use criterion::Criterion; use criterion::{criterion_group, criterion_main}; use env_logger::{Builder, Env}; -use types::test_utils::TestingBeaconStateBuilder; -use types::*; mod bench_block_processing; mod bench_epoch_processing; -pub const VALIDATOR_COUNT: usize = 300_032; +pub const VALIDATOR_COUNT: usize = 16_384; // `LOG_LEVEL == "debug"` gives logs, but they're very noisy and slow down benching. pub const LOG_LEVEL: &str = ""; diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 7b5aafa7f..13a47836b 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -373,19 +373,20 @@ pub fn process_deposits( .map_err(|e| e.into_with_index(i)) })?; - let public_key_to_index_hashmap = build_public_key_hashmap(&state); - // Check `state.deposit_index` and update the state in series. for (i, deposit) in deposits.iter().enumerate() { verify_deposit_index(state, deposit).map_err(|e| e.into_with_index(i))?; + // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the + // depositing validator already exists in the registry. + state.update_pubkey_cache()?; + // Get an `Option` where `u64` is the validator index if this deposit public key // already exists in the beacon_state. // // This function also verifies the withdrawal credentials. let validator_index = - get_existing_validator_index(state, deposit, &public_key_to_index_hashmap) - .map_err(|e| e.into_with_index(i))?; + get_existing_validator_index(state, deposit).map_err(|e| e.into_with_index(i))?; let deposit_data = &deposit.deposit_data; let deposit_input = &deposit.deposit_data.deposit_input; diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index a3e3ebad1..8366a6584 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -294,6 +294,8 @@ impl_into_with_index_without_beacon_error!( pub enum DepositValidationError { /// Validation completed successfully and the object is invalid. Invalid(DepositInvalid), + /// Encountered a `BeaconStateError` whilst attempting to determine validity. + BeaconStateError(BeaconStateError), } /// Describes why an object is invalid. @@ -313,7 +315,8 @@ pub enum DepositInvalid { BadMerkleProof, } -impl_into_with_index_without_beacon_error!(DepositValidationError, DepositInvalid); +impl_from_beacon_state_error!(DepositValidationError); +impl_into_with_index_with_beacon_error!(DepositValidationError, DepositInvalid); /* * `Exit` Validation diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 1aabbb973..aad38f616 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -72,11 +72,12 @@ pub fn build_public_key_hashmap(state: &BeaconState) -> PublicKeyValidatorIndexH pub fn get_existing_validator_index( state: &BeaconState, deposit: &Deposit, - pubkey_map: &HashMap, ) -> Result, Error> { let deposit_input = &deposit.deposit_data.deposit_input; - let validator_index = pubkey_map.get(&deposit_input.pubkey).and_then(|i| Some(*i)); + let validator_index = state + .get_validator_index(&deposit_input.pubkey)? + .and_then(|i| Some(i)); match validator_index { None => Ok(None), @@ -86,7 +87,7 @@ pub fn get_existing_validator_index( == state.validator_registry[index as usize].withdrawal_credentials, Invalid::BadWithdrawalCredentials ); - Ok(Some(index)) + Ok(Some(index as u64)) } } } diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 878d13b86..fc410fe64 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -5,6 +5,7 @@ use helpers::*; use honey_badger_split::SplitExt; use int_to_bytes::int_to_bytes32; use log::{debug, error, trace}; +use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::Serialize; use ssz::{hash, Decodable, DecodeError, Encodable, SignedRoot, SszStream, TreeHash}; @@ -16,6 +17,7 @@ pub use builder::BeaconStateBuilder; mod builder; mod epoch_cache; pub mod helpers; +mod pubkey_cache; mod tests; pub type Committee = Vec; @@ -52,6 +54,11 @@ pub enum Error { InsufficientAttestations, InsufficientCommittees, EpochCacheUninitialized(RelativeEpoch), + PubkeyCacheInconsistent, + PubkeyCacheIncomplete { + cache_len: usize, + registry_len: usize, + }, } macro_rules! safe_add_assign { @@ -108,6 +115,7 @@ pub struct BeaconState { // Caching (not in the spec) pub cache_index_offset: usize, pub caches: Vec, + pub pubkey_cache: PubkeyCache, } impl BeaconState { @@ -186,6 +194,7 @@ impl BeaconState { */ cache_index_offset: 0, caches: vec![EpochCache::empty(); CACHED_EPOCHS], + pubkey_cache: PubkeyCache::empty(), } } @@ -293,6 +302,41 @@ impl BeaconState { } } + /// Updates the pubkey cache, if required. + /// + /// Adds all `pubkeys` from the `validator_registry` which are not already in the cache. Will + /// never re-add a pubkey. + pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { + for (i, validator) in self + .validator_registry + .iter() + .enumerate() + .skip(self.pubkey_cache.len()) + { + let success = self.pubkey_cache.insert(validator.pubkey.clone(), i); + if !success { + return Err(Error::PubkeyCacheInconsistent); + } + } + + Ok(()) + } + + /// If a validator pubkey exists in the validator registry, returns `Some(i)`, otherwise + /// returns `None`. + /// + /// Requires a fully up-to-date `pubkey_cache`, returns an error if this is not the case. + pub fn get_validator_index(&self, pubkey: &PublicKey) -> Result, Error> { + if self.pubkey_cache.len() == self.validator_registry.len() { + Ok(self.pubkey_cache.get(pubkey)) + } else { + Err(Error::PubkeyCacheIncomplete { + cache_len: self.pubkey_cache.len(), + registry_len: self.validator_registry.len(), + }) + } + } + /// The epoch corresponding to `self.slot`. /// /// Spec v0.4.0 @@ -1188,6 +1232,7 @@ impl Decodable for BeaconState { deposit_index, cache_index_offset: 0, caches: vec![EpochCache::empty(); CACHED_EPOCHS], + pubkey_cache: PubkeyCache::empty(), }, i, )) @@ -1258,6 +1303,7 @@ impl TestRandom for BeaconState { deposit_index: <_>::random_for_test(rng), cache_index_offset: 0, caches: vec![EpochCache::empty(); CACHED_EPOCHS], + pubkey_cache: PubkeyCache::empty(), } } } diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs new file mode 100644 index 000000000..c05147579 --- /dev/null +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -0,0 +1,45 @@ +use crate::*; +use serde_derive::Serialize; +use std::collections::HashMap; + +type ValidatorIndex = usize; + +#[derive(Debug, PartialEq, Clone, Default, Serialize)] +pub struct PubkeyCache { + map: HashMap, +} + +impl PubkeyCache { + /// Instantiates a new, empty cache. + pub fn empty() -> Self { + Self { + map: HashMap::new(), + } + } + + /// Returns the number of validator indices already in the map. + pub fn len(&self) -> ValidatorIndex { + self.map.len() + } + + /// Inserts a validator index into the map. + /// + /// The added index must equal the number of validators already added to the map. This ensures + /// that an index is never skipped. + pub fn insert(&mut self, pubkey: PublicKey, index: ValidatorIndex) -> bool { + if index == self.map.len() { + self.map.insert(pubkey, index); + true + } else { + false + } + } + + /// Inserts a validator index into the map. + /// + /// The added index must equal the number of validators already added to the map. This ensures + /// that an index is never skipped. + pub fn get(&self, pubkey: &PublicKey) -> Option { + self.map.get(pubkey).cloned() + } +} diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 76fcb43ed..3da6a497f 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -1,4 +1,4 @@ -pub mod test_utils; +//! Ethereum 2.0 types pub mod attestation; pub mod attestation_data; @@ -22,6 +22,7 @@ pub mod proposer_slashing; pub mod readers; pub mod shard_reassignment_record; pub mod slashable_attestation; +pub mod test_utils; pub mod transfer; pub mod voluntary_exit; #[macro_use] diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index b2cf28c8a..b9f3c63e0 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -144,6 +144,8 @@ impl TestingBeaconStateBuilder { state.build_epoch_cache(RelativeEpoch::Current, &spec)?; state.build_epoch_cache(RelativeEpoch::Next, &spec)?; + state.update_pubkey_cache()?; + Ok(()) } From 587be831b57528eb016b1c66c0e3eb8679c74f04 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 16:49:32 +1100 Subject: [PATCH 035/154] Add method for dropping pubkey cache. Add bench. --- .../benches/bench_block_processing.rs | 17 +++++++++++++++++ eth2/types/src/beacon_state.rs | 5 +++++ 2 files changed, 22 insertions(+) diff --git a/eth2/state_processing/benches/bench_block_processing.rs b/eth2/state_processing/benches/bench_block_processing.rs index 031942473..128b1051b 100644 --- a/eth2/state_processing/benches/bench_block_processing.rs +++ b/eth2/state_processing/benches/bench_block_processing.rs @@ -426,6 +426,23 @@ fn bench_block_processing( .sample_size(10), ); + let mut state = initial_state.clone(); + state.drop_pubkey_cache(); + c.bench( + &format!("{}/block_processing", desc), + Benchmark::new("build_pubkey_cache", move |b| { + b.iter_batched( + || state.clone(), + |mut state| { + state.update_pubkey_cache().unwrap(); + state + }, + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); + let block = initial_block.clone(); c.bench( &format!("{}/block_processing", desc), diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index fc410fe64..a1dd8983c 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -322,6 +322,11 @@ impl BeaconState { Ok(()) } + /// Completely drops the `pubkey_cache`, replacing it with a new, empty cache. + pub fn drop_pubkey_cache(&mut self) { + self.pubkey_cache = PubkeyCache::empty() + } + /// If a validator pubkey exists in the validator registry, returns `Some(i)`, otherwise /// returns `None`. /// From b2fb2afb2813c4288a462c4a84bf6bf9ea7f7528 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 13 Mar 2019 16:51:37 +1100 Subject: [PATCH 036/154] Revert "Move `PublicKey` to store uncomp. bytes." This reverts commit bfa2e71b468e34eedcf847076aa29f3a53038bfd. --- eth2/utils/bls/src/aggregate_public_key.rs | 2 +- eth2/utils/bls/src/public_key.rs | 56 ++++++---------------- eth2/utils/bls/src/signature.rs | 4 +- 3 files changed, 18 insertions(+), 44 deletions(-) diff --git a/eth2/utils/bls/src/aggregate_public_key.rs b/eth2/utils/bls/src/aggregate_public_key.rs index 47c95d6c9..2174a43cb 100644 --- a/eth2/utils/bls/src/aggregate_public_key.rs +++ b/eth2/utils/bls/src/aggregate_public_key.rs @@ -14,7 +14,7 @@ impl AggregatePublicKey { } pub fn add(&mut self, public_key: &PublicKey) { - self.0.add(&public_key.as_raw()) + self.0.add(public_key.as_raw()) } /// Returns the underlying signature. diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 3e2bff19e..c85760bbf 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -10,58 +10,39 @@ use ssz::{ use std::default; use std::hash::{Hash, Hasher}; -/// A single BLS public key. -/// -/// This struct stores an uncompressed public key as a byte vec. The reason we store bytes instead -/// of the `RawPublicKey` struct is because it allows for building a hashmap of `PublicKey` much -/// faster. -/// -/// Storing as uncompressed bytes costs ~0.02% more time when adding a `PublicKey` to an -/// `AggregateKey`, however it saves ~0.5ms each time you need to add a pubkey to a hashmap. +/// A single BLS signature. /// /// This struct is a wrapper upon a base type and provides helper functions (e.g., SSZ /// serialization). #[derive(Debug, Clone, Eq)] -pub struct PublicKey { - bytes: Vec, -} +pub struct PublicKey(RawPublicKey); impl PublicKey { pub fn from_secret_key(secret_key: &SecretKey) -> Self { - let mut raw_key = RawPublicKey::from_secret_key(secret_key.as_raw()); - let uncompressed_bytes = raw_key.as_uncompressed_bytes(); - Self { - bytes: uncompressed_bytes, - } + PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) } /// Returns the underlying signature. - pub fn as_raw(&self) -> RawPublicKey { - RawPublicKey::from_uncompressed_bytes(&self.bytes).expect("PublicKey in invalid state") + pub fn as_raw(&self) -> &RawPublicKey { + &self.0 } /// Converts compressed bytes to PublicKey pub fn from_bytes(bytes: &[u8]) -> Result { - let mut pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; - Ok(Self { - bytes: pubkey.as_uncompressed_bytes(), - }) + let pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; + Ok(PublicKey(pubkey)) } /// Returns the PublicKey as (x, y) bytes pub fn as_uncompressed_bytes(&self) -> Vec { - self.bytes.clone() + RawPublicKey::as_uncompressed_bytes(&mut self.0.clone()) } /// Converts (x, y) bytes to PublicKey pub fn from_uncompressed_bytes(bytes: &[u8]) -> Result { - // Do a conversion to check the bytes are valid. - let _pubkey = + let pubkey = RawPublicKey::from_uncompressed_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; - - Ok(Self { - bytes: bytes.to_vec(), - }) + Ok(PublicKey(pubkey)) } /// Returns the last 6 bytes of the SSZ encoding of the public key, as a hex string. @@ -83,22 +64,15 @@ impl default::Default for PublicKey { impl Encodable for PublicKey { fn ssz_append(&self, s: &mut SszStream) { - s.append_vec(&self.as_raw().as_bytes()); + s.append_vec(&self.0.as_bytes()); } } impl Decodable for PublicKey { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { let (sig_bytes, i) = decode_ssz_list(bytes, i)?; - let mut raw_sig = - RawPublicKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; - - Ok(( - Self { - bytes: raw_sig.as_uncompressed_bytes(), - }, - i, - )) + let raw_sig = RawPublicKey::from_bytes(&sig_bytes).map_err(|_| DecodeError::TooShort)?; + Ok((PublicKey(raw_sig), i)) } } @@ -125,7 +99,7 @@ impl<'de> Deserialize<'de> for PublicKey { impl TreeHash for PublicKey { fn hash_tree_root(&self) -> Vec { - hash(&self.as_raw().as_bytes()) + hash(&self.0.as_bytes()) } } @@ -143,7 +117,7 @@ impl Hash for PublicKey { /// /// Use `ssz::Encode` to obtain the bytes required for consensus hashing. fn hash(&self, state: &mut H) { - self.bytes.hash(state) + self.as_uncompressed_bytes().hash(state) } } diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 47e1dad2e..47598bc66 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -33,7 +33,7 @@ impl Signature { /// Verify the Signature against a PublicKey. pub fn verify(&self, msg: &[u8], domain: u64, pk: &PublicKey) -> bool { - self.0.verify(msg, domain, &pk.as_raw()) + self.0.verify(msg, domain, pk.as_raw()) } /// Verify the Signature against a PublicKey, where the message has already been hashed. @@ -44,7 +44,7 @@ impl Signature { pk: &PublicKey, ) -> bool { self.0 - .verify_hashed(x_real_hashed, x_imaginary_hashed, &pk.as_raw()) + .verify_hashed(x_real_hashed, x_imaginary_hashed, pk.as_raw()) } /// Returns the underlying signature. From 181aeb3d71d612fab940d7475dc59977b008a4c9 Mon Sep 17 00:00:00 2001 From: pawanjay176 Date: Wed, 13 Mar 2019 14:31:40 +0530 Subject: [PATCH 037/154] Refactored to use max_by --- eth2/fork_choice/src/optimized_lmd_ghost.rs | 25 +++++++++++---------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index 093120bb5..e0074d4de 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -8,6 +8,7 @@ use db::{ }; use log::{debug, trace}; use std::collections::HashMap; +use std::cmp::Ordering; use std::sync::Arc; use types::{ readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, @@ -199,19 +200,19 @@ where if votes.is_empty() { return None; } - let mut best_child: Hash256 = Hash256::from(0); - let mut max_votes: u64 = 0; - for (&candidate, &votes) in votes.iter() { - // Choose the smaller hash to break ties deterministically - if votes == max_votes && candidate < best_child { - best_child = candidate; + + // Iterate through hashmap to get child with maximum votes + let best_child = votes.iter().max_by(|(child1,v1), (child2, v2)| { + let mut result = v1.cmp(v2); + // If votes are equal, choose smaller hash to break ties deterministically + if result == Ordering::Equal { + // Reverse so that max_by chooses smaller hash + result = child1.cmp(child2).reverse(); } - if votes > max_votes { - max_votes = votes; - best_child = candidate; - } - } - Some(best_child) + result + }); + + Some(*best_child.unwrap().0) } } From 4c45b90df5af507c668329092ffc1b39e6b3d0fb Mon Sep 17 00:00:00 2001 From: pawanjay176 Date: Wed, 13 Mar 2019 14:34:00 +0530 Subject: [PATCH 038/154] Formatted changes using rustfmt --- eth2/fork_choice/src/lib.rs | 6 +++--- eth2/fork_choice/src/optimized_lmd_ghost.rs | 10 +++++----- eth2/fork_choice/tests/tests.rs | 4 +++- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index a947473f3..0d6969e89 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -22,8 +22,8 @@ extern crate types; pub mod bitwise_lmd_ghost; pub mod longest_chain; -pub mod slow_lmd_ghost; pub mod optimized_lmd_ghost; +pub mod slow_lmd_ghost; use db::stores::BeaconBlockAtSlotError; use db::DBError; @@ -31,8 +31,8 @@ use types::{BeaconBlock, ChainSpec, Hash256}; pub use bitwise_lmd_ghost::BitwiseLMDGhost; pub use longest_chain::LongestChain; -pub use slow_lmd_ghost::SlowLMDGhost; pub use optimized_lmd_ghost::OptimizedLMDGhost; +pub use slow_lmd_ghost::SlowLMDGhost; /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures /// which can be built in block processing through the `add_block` and `add_attestation` functions. @@ -104,5 +104,5 @@ pub enum ForkChoiceAlgorithm { /// An optimised version of bitwise LMD-GHOST by Vitalik. BitwiseLMDGhost, /// An optimised implementation of LMD ghost. - OptimizedLMDGhost + OptimizedLMDGhost, } diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index e0074d4de..636ccdabc 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -7,8 +7,8 @@ use db::{ ClientDB, }; use log::{debug, trace}; -use std::collections::HashMap; use std::cmp::Ordering; +use std::collections::HashMap; use std::sync::Arc; use types::{ readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, @@ -202,12 +202,12 @@ where } // Iterate through hashmap to get child with maximum votes - let best_child = votes.iter().max_by(|(child1,v1), (child2, v2)| { - let mut result = v1.cmp(v2); + let best_child = votes.iter().max_by(|(child1, v1), (child2, v2)| { + let mut result = v1.cmp(v2); // If votes are equal, choose smaller hash to break ties deterministically if result == Ordering::Equal { - // Reverse so that max_by chooses smaller hash - result = child1.cmp(child2).reverse(); + // Reverse so that max_by chooses smaller hash + result = child1.cmp(child2).reverse(); } result }); diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 1c8d92e0a..5fb963ea5 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -16,7 +16,9 @@ use bls::Signature; use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::MemoryDB; // use env_logger::{Builder, Env}; -use fork_choice::{BitwiseLMDGhost, OptimizedLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, SlowLMDGhost}; +use fork_choice::{ + BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost, +}; use ssz::ssz_encode; use std::collections::HashMap; use std::sync::Arc; From 804da3c3ff0197381abb62b72f41555770bd672f Mon Sep 17 00:00:00 2001 From: pawanjay176 Date: Thu, 14 Mar 2019 00:58:15 +0530 Subject: [PATCH 039/154] Adds additional tests for lmd ghost fork choice rules --- .../tests/bitwise_lmd_ghost_test_vectors.yaml | 66 +++++++++++++++++++ .../tests/lmd_ghost_test_vectors.yaml | 16 +++++ 2 files changed, 82 insertions(+) diff --git a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml index 3233137ab..931d8decf 100644 --- a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml +++ b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml @@ -63,3 +63,69 @@ test_cases: - b7: 2 heads: - id: 'b4' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + - id: 'b3' + parent: 'b0' + - id: 'b4' + parent: 'b1' + - id: 'b5' + parent: 'b1' + - id: 'b6' + parent: 'b2' + - id: 'b7' + parent: 'b2' + - id: 'b8' + parent: 'b3' + - id: 'b9' + parent: 'b3' + weights: + - b1: 2 + - b2: 1 + - b3: 1 + - b4: 7 + - b5: 5 + - b6: 2 + - b7: 4 + - b8: 4 + - b9: 2 + heads: + - id: 'b4' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + - id: 'b3' + parent: 'b0' + - id: 'b4' + parent: 'b1' + - id: 'b5' + parent: 'b1' + - id: 'b6' + parent: 'b2' + - id: 'b7' + parent: 'b2' + - id: 'b8' + parent: 'b3' + - id: 'b9' + parent: 'b3' + weights: + - b1: 1 + - b2: 1 + - b3: 1 + - b4: 7 + - b5: 5 + - b6: 2 + - b7: 4 + - b8: 4 + - b9: 2 + heads: + - id: 'b7' \ No newline at end of file diff --git a/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml b/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml index 4676d8201..dab998beb 100644 --- a/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml +++ b/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml @@ -35,3 +35,19 @@ test_cases: - b3: 3 heads: - id: 'b1' +# equal weights children. Should choose lower hash b2 +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + - id: 'b3' + parent: 'b0' + weights: + - b1: 5 + - b2: 6 + - b3: 6 + heads: + - id: 'b2' From 6f919e6f7dbb6879afcd2af51409184887748d49 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 11:53:50 +1100 Subject: [PATCH 040/154] Add first iteration on faster rewards processing. --- .../benches/bench_epoch_processing.rs | 19 +- eth2/state_processing/benches/benches.rs | 2 +- .../src/per_epoch_processing.rs | 151 ++++++-------- .../src/per_epoch_processing/attesters.rs | 195 ++++++++++++++++++ 4 files changed, 265 insertions(+), 102 deletions(-) create mode 100644 eth2/state_processing/src/per_epoch_processing/attesters.rs diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index e4981b200..93c6c7ebd 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -150,13 +150,15 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("calculate_attester_sets", move |b| { b.iter_batched( || state_clone.clone(), |mut state| { - calculate_attester_sets(&mut state, &spec_clone).unwrap(); + calculate_attester_sets(&mut state, &active_validator_indices, &spec_clone) + .unwrap(); state }, criterion::BatchSize::SmallInput, @@ -168,8 +170,8 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let attesters = calculate_attester_sets(&state, &active_validator_indices, &spec).unwrap(); let current_total_balance = state.get_total_balance(&active_validator_indices[..], &spec); let previous_total_balance = state.get_total_balance( &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], @@ -185,8 +187,8 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp &mut state, current_total_balance, previous_total_balance, - attesters.previous_epoch_boundary.balance, - attesters.current_epoch_boundary.balance, + attesters.balances.previous_epoch_boundary, + attesters.balances.current_epoch_boundary, &spec_clone, ); state @@ -214,8 +216,8 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let mut state_clone = state.clone(); let spec_clone = spec.clone(); let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let attesters = calculate_attester_sets(&state, &active_validator_indices, &spec).unwrap(); let previous_total_balance = state.get_total_balance( &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], &spec, @@ -229,7 +231,6 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| { process_rewards_and_penalities( &mut state, - &active_validator_indices, &attesters, previous_total_balance, &winning_root_for_shards, @@ -264,8 +265,8 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let mut state_clone = state.clone(); let spec_clone = spec.clone(); let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let attesters = calculate_attester_sets(&state, &active_validator_indices, &spec).unwrap(); let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); let previous_total_balance = state.get_total_balance( &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], @@ -279,8 +280,8 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp &mut state_clone, current_total_balance, previous_total_balance, - attesters.previous_epoch_boundary.balance, - attesters.current_epoch_boundary.balance, + attesters.balances.previous_epoch_boundary, + attesters.balances.current_epoch_boundary, spec, ); assert!( diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index ad8c4f714..c619e1ef7 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -18,8 +18,8 @@ pub fn state_processing(c: &mut Criterion) { Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init(); } - bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); + bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); } criterion_group!(benches, state_processing); diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 4abbe012c..4fe53dd6b 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,6 +1,5 @@ -use attester_sets::AttesterSets; +use attesters::Attesters; use errors::EpochProcessingError as Error; -use fnv::FnvHashMap; use fnv::FnvHashSet; use integer_sqrt::IntegerSquareRoot; use rayon::prelude::*; @@ -11,6 +10,7 @@ use types::{validator_registry::get_active_validator_indices, *}; use winning_root::{winning_root, WinningRoot}; pub mod attester_sets; +pub mod attesters; pub mod errors; pub mod inclusion_distance; pub mod tests; @@ -35,8 +35,6 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Next, spec)?; - let attesters = calculate_attester_sets(&state, spec)?; - let active_validator_indices = calculate_active_validator_indices(&state, spec); let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); @@ -46,14 +44,16 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result spec, ); + let attesters = calculate_attester_sets(&state, &active_validator_indices, spec)?; + process_eth1_data(state, spec); process_justification( state, current_total_balance, previous_total_balance, - attesters.previous_epoch_boundary.balance, - attesters.current_epoch_boundary.balance, + attesters.balances.previous_epoch_boundary, + attesters.balances.current_epoch_boundary, spec, ); @@ -63,7 +63,6 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result // Rewards and Penalities process_rewards_and_penalities( state, - &active_validator_indices, &attesters, previous_total_balance, &winning_root_for_shards, @@ -107,9 +106,13 @@ pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) /// Spec v0.4.0 pub fn calculate_attester_sets( state: &BeaconState, + active_validator_indices: &[usize], spec: &ChainSpec, -) -> Result { - AttesterSets::new(&state, spec) +) -> Result { + let mut attesters = Attesters::empty(state.validator_registry.len()); + attesters.process_active_validator_indices(&active_validator_indices); + attesters.process_attestations(&state, &state.latest_attestations, spec)?; + Ok(attesters) } /// Spec v0.4.0 @@ -283,22 +286,20 @@ pub fn process_crosslinks( /// Spec v0.4.0 pub fn process_rewards_and_penalities( state: &mut BeaconState, - active_validator_indices: &[usize], - attesters: &AttesterSets, + attesters: &Attesters, previous_total_balance: u64, winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { let next_epoch = state.next_epoch(spec); - let active_validator_indices: FnvHashSet = - FnvHashSet::from_iter(active_validator_indices.iter().cloned()); - + /* let previous_epoch_attestations: Vec<&PendingAttestation> = state .latest_attestations .par_iter() .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec)) .collect(); + */ let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; @@ -309,6 +310,7 @@ pub fn process_rewards_and_penalities( return Err(Error::PreviousTotalBalanceIsZero); } + /* // Map is ValidatorIndex -> ProposerIndex let mut inclusion_slots: FnvHashMap = FnvHashMap::default(); for a in &previous_epoch_attestations { @@ -330,79 +332,55 @@ pub fn process_rewards_and_penalities( ); } } + */ // Justification and finalization let epochs_since_finality = next_epoch - state.finalized_epoch; - if epochs_since_finality <= 4 { - state.validator_balances = state - .validator_balances - .par_iter() - .enumerate() - .map(|(index, &balance)| { - let mut balance = balance; + state.validator_balances = state + .validator_balances + .par_iter() + .enumerate() + .map(|(index, &balance)| { + let mut balance = balance; + let status = &attesters.statuses[index]; + + if epochs_since_finality <= 4 { let base_reward = state.base_reward(index, base_reward_quotient, spec); // Expected FFG source - if attesters.previous_epoch.indices.contains(&index) { + if status.is_previous_epoch { safe_add_assign!( balance, - base_reward * attesters.previous_epoch.balance / previous_total_balance + base_reward * attesters.balances.previous_epoch / previous_total_balance ); - } else if active_validator_indices.contains(&index) { + } else if status.is_active { safe_sub_assign!(balance, base_reward); } // Expected FFG target - if attesters.previous_epoch_boundary.indices.contains(&index) { + if status.is_previous_epoch_boundary { safe_add_assign!( balance, - base_reward * attesters.previous_epoch_boundary.balance + base_reward * attesters.balances.previous_epoch_boundary / previous_total_balance ); - } else if active_validator_indices.contains(&index) { + } else if status.is_active { safe_sub_assign!(balance, base_reward); } // Expected beacon chain head - if attesters.previous_epoch_head.indices.contains(&index) { + if status.is_previous_epoch_head { safe_add_assign!( balance, - base_reward * attesters.previous_epoch_head.balance + base_reward * attesters.balances.previous_epoch_head / previous_total_balance ); - } else if active_validator_indices.contains(&index) { + } else if status.is_active { safe_sub_assign!(balance, base_reward); }; - - if attesters.previous_epoch.indices.contains(&index) { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - - let (inclusion_distance, _) = inclusion_slots - .get(&index) - .expect("Inconsistent inclusion_slots."); - - if *inclusion_distance > 0 { - safe_add_assign!( - balance, - base_reward * spec.min_attestation_inclusion_delay - / inclusion_distance.as_u64() - ) - } - } - - balance - }) - .collect(); - } else { - state.validator_balances = state - .validator_balances - .par_iter() - .enumerate() - .map(|(index, &balance)| { - let mut balance = balance; - + } else { let inactivity_penalty = state.inactivity_penalty( index, epochs_since_finality, @@ -410,14 +388,14 @@ pub fn process_rewards_and_penalities( spec, ); - if active_validator_indices.contains(&index) { - if !attesters.previous_epoch.indices.contains(&index) { + if status.is_active { + if !status.is_previous_epoch { safe_sub_assign!(balance, inactivity_penalty); } - if !attesters.previous_epoch_boundary.indices.contains(&index) { + if !status.is_previous_epoch_boundary { safe_sub_assign!(balance, inactivity_penalty); } - if !attesters.previous_epoch_head.indices.contains(&index) { + if !status.is_previous_epoch_head { safe_sub_assign!(balance, inactivity_penalty); } @@ -426,42 +404,31 @@ pub fn process_rewards_and_penalities( safe_sub_assign!(balance, 2 * inactivity_penalty + base_reward); } } + } - if attesters.previous_epoch.indices.contains(&index) { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - - let (inclusion_distance, _) = inclusion_slots - .get(&index) - .expect("Inconsistent inclusion_slots."); - - if *inclusion_distance > 0 { - safe_add_assign!( - balance, - base_reward * spec.min_attestation_inclusion_delay - / inclusion_distance.as_u64() - ) - } - } - - balance - }) - .collect(); - } + balance + }) + .collect(); // Attestation inclusion - // - for &index in &attesters.previous_epoch.indices { - let (_, proposer_index) = inclusion_slots - .get(&index) - .ok_or_else(|| Error::InclusionSlotsInconsistent(index))?; + for (index, _validator) in state.validator_registry.iter().enumerate() { + let status = &attesters.statuses[index]; - let base_reward = state.base_reward(*proposer_index, base_reward_quotient, spec); + if status.is_previous_epoch { + let proposer_index = status.inclusion_info.proposer_index; + let inclusion_distance = status.inclusion_info.distance; - safe_add_assign!( - state.validator_balances[*proposer_index], - base_reward / spec.attestation_inclusion_reward_quotient - ); + let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); + + if inclusion_distance > 0 && inclusion_distance < Slot::max_value() { + safe_add_assign!( + state.validator_balances[proposer_index], + base_reward * spec.min_attestation_inclusion_delay + / inclusion_distance.as_u64() + ) + } + } } //Crosslinks diff --git a/eth2/state_processing/src/per_epoch_processing/attesters.rs b/eth2/state_processing/src/per_epoch_processing/attesters.rs new file mode 100644 index 000000000..662ddceed --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/attesters.rs @@ -0,0 +1,195 @@ +use types::*; + +macro_rules! set_self_if_other_is_true { + ($self_: ident, $other: ident, $var: ident) => { + $self_.$var = $other.$var & !$self_.$var; + }; +} + +#[derive(Clone)] +pub struct InclusionInfo { + pub slot: Slot, + pub distance: Slot, + pub proposer_index: usize, +} + +impl Default for InclusionInfo { + fn default() -> Self { + Self { + slot: Slot::max_value(), + distance: Slot::max_value(), + proposer_index: 0, + } + } +} + +impl InclusionInfo { + pub fn update(&mut self, other: &Self) { + if other.slot < self.slot { + self.slot = other.slot; + self.distance = other.distance; + self.proposer_index = other.proposer_index; + } + } +} + +#[derive(Default, Clone)] +pub struct AttesterStatus { + pub is_active: bool, + + pub is_current_epoch: bool, + pub is_current_epoch_boundary: bool, + pub is_previous_epoch: bool, + pub is_previous_epoch_boundary: bool, + pub is_previous_epoch_head: bool, + + pub inclusion_info: InclusionInfo, +} + +impl AttesterStatus { + pub fn update(&mut self, other: &Self) { + // Update all the bool fields, only updating `self` if `other` is true (never setting + // `self` to false). + set_self_if_other_is_true!(self, other, is_active); + set_self_if_other_is_true!(self, other, is_current_epoch); + set_self_if_other_is_true!(self, other, is_current_epoch_boundary); + set_self_if_other_is_true!(self, other, is_previous_epoch); + set_self_if_other_is_true!(self, other, is_previous_epoch_boundary); + set_self_if_other_is_true!(self, other, is_previous_epoch_head); + + self.inclusion_info.update(&other.inclusion_info); + } +} + +#[derive(Default, Clone)] +pub struct TotalBalances { + pub current_epoch: u64, + pub current_epoch_boundary: u64, + pub previous_epoch: u64, + pub previous_epoch_boundary: u64, + pub previous_epoch_head: u64, +} + +pub struct Attesters { + pub statuses: Vec, + pub balances: TotalBalances, +} + +impl Attesters { + pub fn empty(num_validators: usize) -> Self { + Self { + statuses: vec![AttesterStatus::default(); num_validators], + balances: TotalBalances::default(), + } + } + + pub fn process_active_validator_indices(&mut self, active_validator_indices: &[usize]) { + let status = AttesterStatus { + is_active: true, + ..AttesterStatus::default() + }; + + for &i in active_validator_indices { + self.statuses[i].update(&status); + } + } + + pub fn process_attestations( + &mut self, + state: &BeaconState, + attestations: &[PendingAttestation], + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { + for a in attestations { + let attesting_indices = + state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + let attesting_balance = state.get_total_balance(&attesting_indices, spec); + + let mut status = AttesterStatus::default(); + + // Profile this attestation, updating the total balances and generating an + // `AttesterStatus` object that applies to all participants in the attestation. + if is_from_epoch(a, state.current_epoch(spec), spec) { + self.balances.current_epoch += attesting_balance; + status.is_current_epoch = true; + + if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { + self.balances.current_epoch_boundary += attesting_balance; + status.is_current_epoch_boundary = true; + } + } else if is_from_epoch(a, state.previous_epoch(spec), spec) { + self.balances.previous_epoch += attesting_balance; + status.is_previous_epoch = true; + + // The inclusion slot and distance are only required for previous epoch attesters. + status.inclusion_info = InclusionInfo { + slot: a.inclusion_slot, + distance: inclusion_distance(a), + proposer_index: state.get_beacon_proposer_index(a.inclusion_slot, spec)?, + }; + + if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { + self.balances.previous_epoch_boundary += attesting_balance; + status.is_previous_epoch_boundary = true; + } + + if has_common_beacon_block_root(a, state, spec)? { + self.balances.previous_epoch_head += attesting_balance; + status.is_previous_epoch_head = true; + } + } + + // Loop through the participating validator indices and update the status vec. + for validator_index in attesting_indices { + self.statuses[validator_index].update(&status); + } + } + + Ok(()) + } +} + +fn inclusion_distance(a: &PendingAttestation) -> Slot { + a.inclusion_slot - a.data.slot +} + +/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. +/// +/// Spec v0.4.0 +fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { + a.data.slot.epoch(spec.slots_per_epoch) == epoch +} + +/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for +/// the first slot of the given epoch. +/// +/// Spec v0.4.0 +fn has_common_epoch_boundary_root( + a: &PendingAttestation, + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, +) -> Result { + let slot = epoch.start_slot(spec.slots_per_epoch); + let state_boundary_root = *state + .get_block_root(slot, spec) + .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + + Ok(a.data.epoch_boundary_root == state_boundary_root) +} + +/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for +/// the current slot of the `PendingAttestation`. +/// +/// Spec v0.4.0 +fn has_common_beacon_block_root( + a: &PendingAttestation, + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + let state_block_root = *state + .get_block_root(a.data.slot, spec) + .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + + Ok(a.data.beacon_block_root == state_block_root) +} From a319144835f89ccac00756879e158711a260d1b7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 12:17:43 +1100 Subject: [PATCH 041/154] Improve crosslink reward processing --- .../benches/bench_epoch_processing.rs | 6 +- .../src/per_epoch_processing.rs | 105 +++--------------- .../src/per_epoch_processing/attesters.rs | 41 +++++++ 3 files changed, 60 insertions(+), 92 deletions(-) diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index 93c6c7ebd..ab4f61c00 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -227,11 +227,11 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp &format!("{}/epoch_processing", desc), Benchmark::new("process_rewards_and_penalties", move |b| { b.iter_batched( - || state_clone.clone(), - |mut state| { + || (state_clone.clone(), attesters.clone()), + |(mut state, mut attesters)| { process_rewards_and_penalities( &mut state, - &attesters, + &mut attesters, previous_total_balance, &winning_root_for_shards, &spec_clone, diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 4fe53dd6b..2377d7ded 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,11 +1,9 @@ use attesters::Attesters; use errors::EpochProcessingError as Error; -use fnv::FnvHashSet; use integer_sqrt::IntegerSquareRoot; use rayon::prelude::*; use ssz::TreeHash; use std::collections::HashMap; -use std::iter::FromIterator; use types::{validator_registry::get_active_validator_indices, *}; use winning_root::{winning_root, WinningRoot}; @@ -44,7 +42,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result spec, ); - let attesters = calculate_attester_sets(&state, &active_validator_indices, spec)?; + let mut attesters = calculate_attester_sets(&state, &active_validator_indices, spec)?; process_eth1_data(state, spec); @@ -63,7 +61,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result // Rewards and Penalities process_rewards_and_penalities( state, - &attesters, + &mut attesters, previous_total_balance, &winning_root_for_shards, spec, @@ -286,21 +284,13 @@ pub fn process_crosslinks( /// Spec v0.4.0 pub fn process_rewards_and_penalities( state: &mut BeaconState, - attesters: &Attesters, + attesters: &mut Attesters, previous_total_balance: u64, winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { let next_epoch = state.next_epoch(spec); - /* - let previous_epoch_attestations: Vec<&PendingAttestation> = state - .latest_attestations - .par_iter() - .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec)) - .collect(); - */ - let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; if base_reward_quotient == 0 { @@ -310,29 +300,7 @@ pub fn process_rewards_and_penalities( return Err(Error::PreviousTotalBalanceIsZero); } - /* - // Map is ValidatorIndex -> ProposerIndex - let mut inclusion_slots: FnvHashMap = FnvHashMap::default(); - for a in &previous_epoch_attestations { - let participants = - state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; - let inclusion_distance = (a.inclusion_slot - a.data.slot).as_u64(); - for participant in participants { - if let Some((existing_distance, _)) = inclusion_slots.get(&participant) { - if *existing_distance <= inclusion_distance { - continue; - } - } - let proposer_index = state - .get_beacon_proposer_index(a.data.slot, spec) - .map_err(|_| Error::UnableToDetermineProducer)?; - inclusion_slots.insert( - participant, - (Slot::from(inclusion_distance), proposer_index), - ); - } - } - */ + attesters.process_winning_roots(state, winning_root_for_shards, spec)?; // Justification and finalization @@ -345,10 +313,9 @@ pub fn process_rewards_and_penalities( .map(|(index, &balance)| { let mut balance = balance; let status = &attesters.statuses[index]; + let base_reward = state.base_reward(index, base_reward_quotient, spec); if epochs_since_finality <= 4 { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - // Expected FFG source if status.is_previous_epoch { safe_add_assign!( @@ -406,6 +373,17 @@ pub fn process_rewards_and_penalities( } } + // Crosslinks + + if let Some(ref info) = status.winning_root_info { + safe_add_assign!( + balance, + base_reward * info.total_attesting_balance / info.total_committee_balance + ); + } else { + safe_sub_assign!(balance, base_reward); + } + balance }) .collect(); @@ -431,57 +409,6 @@ pub fn process_rewards_and_penalities( } } - //Crosslinks - - for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { - // Clone removes the borrow which becomes an issue when mutating `state.balances`. - let crosslink_committees_at_slot = - state.get_crosslink_committees_at_slot(slot, spec)?.clone(); - - for (crosslink_committee, shard) in crosslink_committees_at_slot { - let shard = shard as u64; - - // Note: I'm a little uncertain of the logic here -- I am waiting for spec v0.5.0 to - // clear it up. - // - // What happens here is: - // - // - If there was some crosslink root elected by the super-majority of this committee, - // then we reward all who voted for that root and penalize all that did not. - // - However, if there _was not_ some super-majority-voted crosslink root, then penalize - // all the validators. - // - // I'm not quite sure that the second case (no super-majority crosslink) is correct. - if let Some(winning_root) = winning_root_for_shards.get(&shard) { - // Hash set de-dedups and (hopefully) offers a speed improvement from faster - // lookups. - let attesting_validator_indices: FnvHashSet = - FnvHashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned()); - - for &index in &crosslink_committee { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - - let total_balance = state.get_total_balance(&crosslink_committee, spec); - - if attesting_validator_indices.contains(&index) { - safe_add_assign!( - state.validator_balances[index], - base_reward * winning_root.total_attesting_balance / total_balance - ); - } else { - safe_sub_assign!(state.validator_balances[index], base_reward); - } - } - } else { - for &index in &crosslink_committee { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - - safe_sub_assign!(state.validator_balances[index], base_reward); - } - } - } - } - Ok(()) } diff --git a/eth2/state_processing/src/per_epoch_processing/attesters.rs b/eth2/state_processing/src/per_epoch_processing/attesters.rs index 662ddceed..ef26d338d 100644 --- a/eth2/state_processing/src/per_epoch_processing/attesters.rs +++ b/eth2/state_processing/src/per_epoch_processing/attesters.rs @@ -1,3 +1,4 @@ +use super::WinningRootHashSet; use types::*; macro_rules! set_self_if_other_is_true { @@ -6,6 +7,12 @@ macro_rules! set_self_if_other_is_true { }; } +#[derive(Default, Clone)] +pub struct WinningRootInfo { + pub total_committee_balance: u64, + pub total_attesting_balance: u64, +} + #[derive(Clone)] pub struct InclusionInfo { pub slot: Slot, @@ -44,6 +51,7 @@ pub struct AttesterStatus { pub is_previous_epoch_head: bool, pub inclusion_info: InclusionInfo, + pub winning_root_info: Option, } impl AttesterStatus { @@ -70,6 +78,7 @@ pub struct TotalBalances { pub previous_epoch_head: u64, } +#[derive(Clone)] pub struct Attesters { pub statuses: Vec, pub balances: TotalBalances, @@ -147,6 +156,38 @@ impl Attesters { Ok(()) } + + pub fn process_winning_roots( + &mut self, + state: &BeaconState, + winning_roots: &WinningRootHashSet, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { + // Loop through each slot in the previous epoch. + for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { + let crosslink_committees_at_slot = + state.get_crosslink_committees_at_slot(slot, spec)?; + + // Loop through each committee in the slot. + for (crosslink_committee, shard) in crosslink_committees_at_slot { + // If there was some winning crosslink root for the committee's shard. + if let Some(winning_root) = winning_roots.get(&shard) { + let total_committee_balance = + state.get_total_balance(&crosslink_committee, spec); + for &validator_index in &winning_root.attesting_validator_indices { + // Take note of the balance information for the winning root, it will be + // used later to calculate rewards for that validator. + self.statuses[validator_index].winning_root_info = Some(WinningRootInfo { + total_committee_balance, + total_attesting_balance: winning_root.total_attesting_balance, + }) + } + } + } + } + + Ok(()) + } } fn inclusion_distance(a: &PendingAttestation) -> Slot { From 95599ddc66649511f5e75deddd03035d17c451f8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 12:49:48 +1100 Subject: [PATCH 042/154] Update Attesters struct - Renames variables - Moves total balance calculation into struct --- .../benches/bench_epoch_processing.rs | 123 ++---------------- .../src/per_epoch_processing.rs | 67 ++++------ .../src/per_epoch_processing/attesters.rs | 90 +++++++------ 3 files changed, 88 insertions(+), 192 deletions(-) diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index ab4f61c00..d95f1c819 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -4,14 +4,13 @@ use ssz::TreeHash; use state_processing::{ per_epoch_processing, per_epoch_processing::{ - calculate_active_validator_indices, calculate_attester_sets, clean_attestations, - process_crosslinks, process_eth1_data, process_justification, - process_rewards_and_penalities, process_validator_registry, update_active_tree_index_roots, - update_latest_slashed_balances, + calculate_attester_sets, clean_attestations, process_crosslinks, process_eth1_data, + process_justification, process_rewards_and_penalities, process_validator_registry, + update_active_tree_index_roots, update_latest_slashed_balances, }, }; use types::test_utils::TestingBeaconStateBuilder; -use types::{validator_registry::get_active_validator_indices, *}; +use types::*; pub const BENCHING_SAMPLE_SIZE: usize = 10; pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; @@ -73,64 +72,6 @@ pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: u /// /// `desc` will be added to the title of each bench. fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_active_validator_indices", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - calculate_active_validator_indices(&mut state, &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_current_total_balance", move |b| { - b.iter_batched( - || state_clone.clone(), - |state| { - state.get_total_balance(&active_validator_indices[..], &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_previous_total_balance", move |b| { - b.iter_batched( - || state_clone.clone(), - |state| { - state.get_total_balance( - &get_active_validator_indices( - &state.validator_registry, - state.previous_epoch(&spec_clone), - )[..], - &spec_clone, - ); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( @@ -150,15 +91,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("calculate_attester_sets", move |b| { b.iter_batched( || state_clone.clone(), |mut state| { - calculate_attester_sets(&mut state, &active_validator_indices, &spec_clone) - .unwrap(); + calculate_attester_sets(&mut state, &spec_clone).unwrap(); state }, criterion::BatchSize::SmallInput, @@ -169,14 +108,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let attesters = calculate_attester_sets(&state, &active_validator_indices, &spec).unwrap(); - let current_total_balance = state.get_total_balance(&active_validator_indices[..], &spec); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); + let attesters = calculate_attester_sets(&state, &spec).unwrap(); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("process_justification", move |b| { @@ -185,10 +117,10 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| { process_justification( &mut state, - current_total_balance, - previous_total_balance, - attesters.balances.previous_epoch_boundary, - attesters.balances.current_epoch_boundary, + attesters.balances.current_epoch_total, + attesters.balances.previous_epoch_total, + attesters.balances.previous_epoch_boundary_attesters, + attesters.balances.current_epoch_boundary_attesters, &spec_clone, ); state @@ -215,13 +147,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let mut state_clone = state.clone(); let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let attesters = calculate_attester_sets(&state, &active_validator_indices, &spec).unwrap(); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); + let attesters = calculate_attester_sets(&state, &spec).unwrap(); let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); c.bench( &format!("{}/epoch_processing", desc), @@ -232,7 +158,6 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp process_rewards_and_penalities( &mut state, &mut attesters, - previous_total_balance, &winning_root_for_shards, &spec_clone, ) @@ -262,32 +187,8 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp .sample_size(BENCHING_SAMPLE_SIZE), ); - let mut state_clone = state.clone(); + let state_clone = state.clone(); let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let attesters = calculate_attester_sets(&state, &active_validator_indices, &spec).unwrap(); - let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); - assert_eq!( - state_clone.finalized_epoch, state_clone.validator_registry_update_epoch, - "The last registry update should be at the last finalized epoch." - ); - process_justification( - &mut state_clone, - current_total_balance, - previous_total_balance, - attesters.balances.previous_epoch_boundary, - attesters.balances.current_epoch_boundary, - spec, - ); - assert!( - state_clone.finalized_epoch > state_clone.validator_registry_update_epoch, - "The state should have been finalized." - ); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("process_validator_registry", move |b| { diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 2377d7ded..03135df66 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -26,32 +26,21 @@ pub type WinningRootHashSet = HashMap; /// /// Spec v0.4.0 pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - let previous_epoch = state.previous_epoch(spec); - // Ensure all of the caches are built. state.build_epoch_cache(RelativeEpoch::Previous, spec)?; state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Next, spec)?; - let active_validator_indices = calculate_active_validator_indices(&state, spec); - - let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); - - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - spec, - ); - - let mut attesters = calculate_attester_sets(&state, &active_validator_indices, spec)?; + let mut attesters = calculate_attester_sets(&state, spec)?; process_eth1_data(state, spec); process_justification( state, - current_total_balance, - previous_total_balance, - attesters.balances.previous_epoch_boundary, - attesters.balances.current_epoch_boundary, + attesters.balances.current_epoch_total, + attesters.balances.previous_epoch_total, + attesters.balances.previous_epoch_boundary_attesters, + attesters.balances.current_epoch_boundary_attesters, spec, ); @@ -59,13 +48,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result let winning_root_for_shards = process_crosslinks(state, spec)?; // Rewards and Penalities - process_rewards_and_penalities( - state, - &mut attesters, - previous_total_balance, - &winning_root_for_shards, - spec, - )?; + process_rewards_and_penalities(state, &mut attesters, &winning_root_for_shards, spec)?; // Ejections state.process_ejections(spec); @@ -104,12 +87,12 @@ pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) /// Spec v0.4.0 pub fn calculate_attester_sets( state: &BeaconState, - active_validator_indices: &[usize], spec: &ChainSpec, ) -> Result { - let mut attesters = Attesters::empty(state.validator_registry.len()); - attesters.process_active_validator_indices(&active_validator_indices); + let mut attesters = Attesters::new(state, spec); + attesters.process_attestations(&state, &state.latest_attestations, spec)?; + Ok(attesters) } @@ -285,12 +268,13 @@ pub fn process_crosslinks( pub fn process_rewards_and_penalities( state: &mut BeaconState, attesters: &mut Attesters, - previous_total_balance: u64, winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { let next_epoch = state.next_epoch(spec); + let previous_total_balance = attesters.balances.previous_epoch_total; + let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; if base_reward_quotient == 0 { @@ -317,34 +301,35 @@ pub fn process_rewards_and_penalities( if epochs_since_finality <= 4 { // Expected FFG source - if status.is_previous_epoch { + if status.is_previous_epoch_attester { safe_add_assign!( balance, - base_reward * attesters.balances.previous_epoch / previous_total_balance + base_reward * attesters.balances.previous_epoch_attesters + / previous_total_balance ); - } else if status.is_active { + } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); } // Expected FFG target - if status.is_previous_epoch_boundary { + if status.is_previous_epoch_boundary_attester { safe_add_assign!( balance, - base_reward * attesters.balances.previous_epoch_boundary + base_reward * attesters.balances.previous_epoch_boundary_attesters / previous_total_balance ); - } else if status.is_active { + } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); } // Expected beacon chain head - if status.is_previous_epoch_head { + if status.is_previous_epoch_head_attester { safe_add_assign!( balance, - base_reward * attesters.balances.previous_epoch_head + base_reward * attesters.balances.previous_epoch_head_attesters / previous_total_balance ); - } else if status.is_active { + } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); }; } else { @@ -355,14 +340,14 @@ pub fn process_rewards_and_penalities( spec, ); - if status.is_active { - if !status.is_previous_epoch { + if status.is_active_in_previous_epoch { + if !status.is_previous_epoch_attester { safe_sub_assign!(balance, inactivity_penalty); } - if !status.is_previous_epoch_boundary { + if !status.is_previous_epoch_boundary_attester { safe_sub_assign!(balance, inactivity_penalty); } - if !status.is_previous_epoch_head { + if !status.is_previous_epoch_head_attester { safe_sub_assign!(balance, inactivity_penalty); } @@ -393,7 +378,7 @@ pub fn process_rewards_and_penalities( for (index, _validator) in state.validator_registry.iter().enumerate() { let status = &attesters.statuses[index]; - if status.is_previous_epoch { + if status.is_previous_epoch_attester { let proposer_index = status.inclusion_info.proposer_index; let inclusion_distance = status.inclusion_info.distance; diff --git a/eth2/state_processing/src/per_epoch_processing/attesters.rs b/eth2/state_processing/src/per_epoch_processing/attesters.rs index ef26d338d..1ffbdf652 100644 --- a/eth2/state_processing/src/per_epoch_processing/attesters.rs +++ b/eth2/state_processing/src/per_epoch_processing/attesters.rs @@ -42,28 +42,31 @@ impl InclusionInfo { #[derive(Default, Clone)] pub struct AttesterStatus { - pub is_active: bool, + pub is_active_in_current_epoch: bool, + pub is_active_in_previous_epoch: bool, - pub is_current_epoch: bool, - pub is_current_epoch_boundary: bool, - pub is_previous_epoch: bool, - pub is_previous_epoch_boundary: bool, - pub is_previous_epoch_head: bool, + pub is_current_epoch_attester: bool, + pub is_current_epoch_boundary_attester: bool, + pub is_previous_epoch_attester: bool, + pub is_previous_epoch_boundary_attester: bool, + pub is_previous_epoch_head_attester: bool, pub inclusion_info: InclusionInfo, pub winning_root_info: Option, } impl AttesterStatus { + /// Note: does not update the winning root info. pub fn update(&mut self, other: &Self) { // Update all the bool fields, only updating `self` if `other` is true (never setting // `self` to false). - set_self_if_other_is_true!(self, other, is_active); - set_self_if_other_is_true!(self, other, is_current_epoch); - set_self_if_other_is_true!(self, other, is_current_epoch_boundary); - set_self_if_other_is_true!(self, other, is_previous_epoch); - set_self_if_other_is_true!(self, other, is_previous_epoch_boundary); - set_self_if_other_is_true!(self, other, is_previous_epoch_head); + set_self_if_other_is_true!(self, other, is_active_in_current_epoch); + set_self_if_other_is_true!(self, other, is_active_in_previous_epoch); + set_self_if_other_is_true!(self, other, is_current_epoch_attester); + set_self_if_other_is_true!(self, other, is_current_epoch_boundary_attester); + set_self_if_other_is_true!(self, other, is_previous_epoch_attester); + set_self_if_other_is_true!(self, other, is_previous_epoch_boundary_attester); + set_self_if_other_is_true!(self, other, is_previous_epoch_head_attester); self.inclusion_info.update(&other.inclusion_info); } @@ -71,11 +74,13 @@ impl AttesterStatus { #[derive(Default, Clone)] pub struct TotalBalances { - pub current_epoch: u64, - pub current_epoch_boundary: u64, - pub previous_epoch: u64, - pub previous_epoch_boundary: u64, - pub previous_epoch_head: u64, + pub current_epoch_total: u64, + pub previous_epoch_total: u64, + pub current_epoch_attesters: u64, + pub current_epoch_boundary_attesters: u64, + pub previous_epoch_attesters: u64, + pub previous_epoch_boundary_attesters: u64, + pub previous_epoch_head_attesters: u64, } #[derive(Clone)] @@ -85,22 +90,27 @@ pub struct Attesters { } impl Attesters { - pub fn empty(num_validators: usize) -> Self { - Self { - statuses: vec![AttesterStatus::default(); num_validators], - balances: TotalBalances::default(), - } - } + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Self { + let mut statuses = Vec::with_capacity(state.validator_registry.len()); + let mut balances = TotalBalances::default(); - pub fn process_active_validator_indices(&mut self, active_validator_indices: &[usize]) { - let status = AttesterStatus { - is_active: true, - ..AttesterStatus::default() - }; + for (i, validator) in state.validator_registry.iter().enumerate() { + let mut status = AttesterStatus::default(); - for &i in active_validator_indices { - self.statuses[i].update(&status); + if validator.is_active_at(state.current_epoch(spec)) { + status.is_active_in_current_epoch = true; + balances.current_epoch_total += state.get_effective_balance(i, spec); + } + + if validator.is_active_at(state.previous_epoch(spec)) { + status.is_active_in_previous_epoch = true; + balances.previous_epoch_total += state.get_effective_balance(i, spec); + } + + statuses.push(status); } + + Self { statuses, balances } } pub fn process_attestations( @@ -119,16 +129,16 @@ impl Attesters { // Profile this attestation, updating the total balances and generating an // `AttesterStatus` object that applies to all participants in the attestation. if is_from_epoch(a, state.current_epoch(spec), spec) { - self.balances.current_epoch += attesting_balance; - status.is_current_epoch = true; + self.balances.current_epoch_attesters += attesting_balance; + status.is_current_epoch_attester = true; if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { - self.balances.current_epoch_boundary += attesting_balance; - status.is_current_epoch_boundary = true; + self.balances.current_epoch_boundary_attesters += attesting_balance; + status.is_current_epoch_boundary_attester = true; } } else if is_from_epoch(a, state.previous_epoch(spec), spec) { - self.balances.previous_epoch += attesting_balance; - status.is_previous_epoch = true; + self.balances.previous_epoch_attesters += attesting_balance; + status.is_previous_epoch_attester = true; // The inclusion slot and distance are only required for previous epoch attesters. status.inclusion_info = InclusionInfo { @@ -138,13 +148,13 @@ impl Attesters { }; if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { - self.balances.previous_epoch_boundary += attesting_balance; - status.is_previous_epoch_boundary = true; + self.balances.previous_epoch_boundary_attesters += attesting_balance; + status.is_previous_epoch_boundary_attester = true; } if has_common_beacon_block_root(a, state, spec)? { - self.balances.previous_epoch_head += attesting_balance; - status.is_previous_epoch_head = true; + self.balances.previous_epoch_head_attesters += attesting_balance; + status.is_previous_epoch_head_attester = true; } } From 10aee6214c4658322353de9b92df4e8194503c76 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 14:59:00 +1100 Subject: [PATCH 043/154] Tidy per_epoch_processing Mainly renaming variables and files for readability. --- .../benches/bench_epoch_processing.rs | 19 ++---- .../src/per_epoch_processing.rs | 66 ++++++++----------- .../{attesters.rs => validator_statuses.rs} | 37 ++++++----- 3 files changed, 57 insertions(+), 65 deletions(-) rename eth2/state_processing/src/per_epoch_processing/{attesters.rs => validator_statuses.rs} (89%) diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index d95f1c819..49b4f4371 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -4,7 +4,7 @@ use ssz::TreeHash; use state_processing::{ per_epoch_processing, per_epoch_processing::{ - calculate_attester_sets, clean_attestations, process_crosslinks, process_eth1_data, + clean_attestations, initialize_validator_statuses, process_crosslinks, process_eth1_data, process_justification, process_rewards_and_penalities, process_validator_registry, update_active_tree_index_roots, update_latest_slashed_balances, }, @@ -93,11 +93,11 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let spec_clone = spec.clone(); c.bench( &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_attester_sets", move |b| { + Benchmark::new("initialize_validator_statuses", move |b| { b.iter_batched( || state_clone.clone(), |mut state| { - calculate_attester_sets(&mut state, &spec_clone).unwrap(); + initialize_validator_statuses(&mut state, &spec_clone).unwrap(); state }, criterion::BatchSize::SmallInput, @@ -108,21 +108,14 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); + let attesters = initialize_validator_statuses(&state, &spec).unwrap(); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("process_justification", move |b| { b.iter_batched( || state_clone.clone(), |mut state| { - process_justification( - &mut state, - attesters.balances.current_epoch_total, - attesters.balances.previous_epoch_total, - attesters.balances.previous_epoch_boundary_attesters, - attesters.balances.current_epoch_boundary_attesters, - &spec_clone, - ); + process_justification(&mut state, &attesters.total_balances, &spec_clone); state }, criterion::BatchSize::SmallInput, @@ -147,7 +140,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let mut state_clone = state.clone(); let spec_clone = spec.clone(); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); + let attesters = initialize_validator_statuses(&state, &spec).unwrap(); let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); c.bench( &format!("{}/epoch_processing", desc), diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 03135df66..044d32eae 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,17 +1,17 @@ -use attesters::Attesters; use errors::EpochProcessingError as Error; use integer_sqrt::IntegerSquareRoot; use rayon::prelude::*; use ssz::TreeHash; use std::collections::HashMap; use types::{validator_registry::get_active_validator_indices, *}; +use validator_statuses::{TotalBalances, ValidatorStatuses}; use winning_root::{winning_root, WinningRoot}; pub mod attester_sets; -pub mod attesters; pub mod errors; pub mod inclusion_distance; pub mod tests; +pub mod validator_statuses; pub mod winning_root; /// Maps a shard to a winning root. @@ -31,24 +31,17 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Next, spec)?; - let mut attesters = calculate_attester_sets(&state, spec)?; + let mut statuses = initialize_validator_statuses(&state, spec)?; process_eth1_data(state, spec); - process_justification( - state, - attesters.balances.current_epoch_total, - attesters.balances.previous_epoch_total, - attesters.balances.previous_epoch_boundary_attesters, - attesters.balances.current_epoch_boundary_attesters, - spec, - ); + process_justification(state, &statuses.total_balances, spec); // Crosslinks let winning_root_for_shards = process_crosslinks(state, spec)?; // Rewards and Penalities - process_rewards_and_penalities(state, &mut attesters, &winning_root_for_shards, spec)?; + process_rewards_and_penalities(state, &mut statuses, &winning_root_for_shards, spec)?; // Ejections state.process_ejections(spec); @@ -85,15 +78,15 @@ pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) /// - etc. /// /// Spec v0.4.0 -pub fn calculate_attester_sets( +pub fn initialize_validator_statuses( state: &BeaconState, spec: &ChainSpec, -) -> Result { - let mut attesters = Attesters::new(state, spec); +) -> Result { + let mut statuses = ValidatorStatuses::new(state, spec); - attesters.process_attestations(&state, &state.latest_attestations, spec)?; + statuses.process_attestations(&state, &state.latest_attestations, spec)?; - Ok(attesters) + Ok(statuses) } /// Spec v0.4.0 @@ -121,10 +114,7 @@ pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { /// Spec v0.4.0 pub fn process_justification( state: &mut BeaconState, - current_total_balance: u64, - previous_total_balance: u64, - previous_epoch_boundary_attesting_balance: u64, - current_epoch_boundary_attesting_balance: u64, + total_balances: &TotalBalances, spec: &ChainSpec, ) { let previous_epoch = state.previous_epoch(spec); @@ -137,7 +127,8 @@ pub fn process_justification( // // - Set the 2nd bit of the bitfield. // - Set the previous epoch to be justified. - if (3 * previous_epoch_boundary_attesting_balance) >= (2 * previous_total_balance) { + if (3 * total_balances.previous_epoch_boundary_attesters) >= (2 * total_balances.previous_epoch) + { state.justification_bitfield |= 2; new_justified_epoch = previous_epoch; } @@ -145,7 +136,7 @@ pub fn process_justification( // // - Set the 1st bit of the bitfield. // - Set the current epoch to be justified. - if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { + if (3 * total_balances.current_epoch_boundary_attesters) >= (2 * total_balances.current_epoch) { state.justification_bitfield |= 1; new_justified_epoch = current_epoch; } @@ -267,25 +258,26 @@ pub fn process_crosslinks( /// Spec v0.4.0 pub fn process_rewards_and_penalities( state: &mut BeaconState, - attesters: &mut Attesters, + statuses: &mut ValidatorStatuses, winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { let next_epoch = state.next_epoch(spec); - let previous_total_balance = attesters.balances.previous_epoch_total; + statuses.process_winning_roots(state, winning_root_for_shards, spec)?; - let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; + let total_balances = &statuses.total_balances; + + let base_reward_quotient = + total_balances.previous_epoch.integer_sqrt() / spec.base_reward_quotient; if base_reward_quotient == 0 { return Err(Error::BaseRewardQuotientIsZero); } - if previous_total_balance == 0 { + if total_balances.previous_epoch == 0 { return Err(Error::PreviousTotalBalanceIsZero); } - attesters.process_winning_roots(state, winning_root_for_shards, spec)?; - // Justification and finalization let epochs_since_finality = next_epoch - state.finalized_epoch; @@ -296,7 +288,7 @@ pub fn process_rewards_and_penalities( .enumerate() .map(|(index, &balance)| { let mut balance = balance; - let status = &attesters.statuses[index]; + let status = &statuses.get(index); let base_reward = state.base_reward(index, base_reward_quotient, spec); if epochs_since_finality <= 4 { @@ -304,8 +296,8 @@ pub fn process_rewards_and_penalities( if status.is_previous_epoch_attester { safe_add_assign!( balance, - base_reward * attesters.balances.previous_epoch_attesters - / previous_total_balance + base_reward * total_balances.previous_epoch_attesters + / total_balances.previous_epoch ); } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); @@ -315,8 +307,8 @@ pub fn process_rewards_and_penalities( if status.is_previous_epoch_boundary_attester { safe_add_assign!( balance, - base_reward * attesters.balances.previous_epoch_boundary_attesters - / previous_total_balance + base_reward * total_balances.previous_epoch_boundary_attesters + / total_balances.previous_epoch ); } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); @@ -326,8 +318,8 @@ pub fn process_rewards_and_penalities( if status.is_previous_epoch_head_attester { safe_add_assign!( balance, - base_reward * attesters.balances.previous_epoch_head_attesters - / previous_total_balance + base_reward * total_balances.previous_epoch_head_attesters + / total_balances.previous_epoch ); } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); @@ -376,7 +368,7 @@ pub fn process_rewards_and_penalities( // Attestation inclusion for (index, _validator) in state.validator_registry.iter().enumerate() { - let status = &attesters.statuses[index]; + let status = &statuses.get(index); if status.is_previous_epoch_attester { let proposer_index = status.inclusion_info.proposer_index; diff --git a/eth2/state_processing/src/per_epoch_processing/attesters.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs similarity index 89% rename from eth2/state_processing/src/per_epoch_processing/attesters.rs rename to eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 1ffbdf652..70eeaf82a 100644 --- a/eth2/state_processing/src/per_epoch_processing/attesters.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -74,8 +74,8 @@ impl AttesterStatus { #[derive(Default, Clone)] pub struct TotalBalances { - pub current_epoch_total: u64, - pub previous_epoch_total: u64, + pub current_epoch: u64, + pub previous_epoch: u64, pub current_epoch_attesters: u64, pub current_epoch_boundary_attesters: u64, pub previous_epoch_attesters: u64, @@ -84,33 +84,40 @@ pub struct TotalBalances { } #[derive(Clone)] -pub struct Attesters { - pub statuses: Vec, - pub balances: TotalBalances, +pub struct ValidatorStatuses { + statuses: Vec, + pub total_balances: TotalBalances, } -impl Attesters { +impl ValidatorStatuses { pub fn new(state: &BeaconState, spec: &ChainSpec) -> Self { let mut statuses = Vec::with_capacity(state.validator_registry.len()); - let mut balances = TotalBalances::default(); + let mut total_balances = TotalBalances::default(); for (i, validator) in state.validator_registry.iter().enumerate() { let mut status = AttesterStatus::default(); if validator.is_active_at(state.current_epoch(spec)) { status.is_active_in_current_epoch = true; - balances.current_epoch_total += state.get_effective_balance(i, spec); + total_balances.current_epoch += state.get_effective_balance(i, spec); } if validator.is_active_at(state.previous_epoch(spec)) { status.is_active_in_previous_epoch = true; - balances.previous_epoch_total += state.get_effective_balance(i, spec); + total_balances.previous_epoch += state.get_effective_balance(i, spec); } statuses.push(status); } - Self { statuses, balances } + Self { + statuses, + total_balances, + } + } + + pub fn get(&self, i: usize) -> &AttesterStatus { + &self.statuses[i] } pub fn process_attestations( @@ -129,15 +136,15 @@ impl Attesters { // Profile this attestation, updating the total balances and generating an // `AttesterStatus` object that applies to all participants in the attestation. if is_from_epoch(a, state.current_epoch(spec), spec) { - self.balances.current_epoch_attesters += attesting_balance; + self.total_balances.current_epoch_attesters += attesting_balance; status.is_current_epoch_attester = true; if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { - self.balances.current_epoch_boundary_attesters += attesting_balance; + self.total_balances.current_epoch_boundary_attesters += attesting_balance; status.is_current_epoch_boundary_attester = true; } } else if is_from_epoch(a, state.previous_epoch(spec), spec) { - self.balances.previous_epoch_attesters += attesting_balance; + self.total_balances.previous_epoch_attesters += attesting_balance; status.is_previous_epoch_attester = true; // The inclusion slot and distance are only required for previous epoch attesters. @@ -148,12 +155,12 @@ impl Attesters { }; if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { - self.balances.previous_epoch_boundary_attesters += attesting_balance; + self.total_balances.previous_epoch_boundary_attesters += attesting_balance; status.is_previous_epoch_boundary_attester = true; } if has_common_beacon_block_root(a, state, spec)? { - self.balances.previous_epoch_head_attesters += attesting_balance; + self.total_balances.previous_epoch_head_attesters += attesting_balance; status.is_previous_epoch_head_attester = true; } } From f4959fc03c283a0c55193908e48b85e694f37974 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 15:10:20 +1100 Subject: [PATCH 044/154] Add TestingBeaconStateBuilder fn for cloned kps Allows for faster test setups. Implemented method for fork choice tests. --- eth2/fork_choice/tests/tests.rs | 6 +++--- .../test_utils/testing_beacon_state_builder.rs | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 5fb963ea5..cd5ff360f 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -24,7 +24,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::{fs::File, io::prelude::*, path::PathBuf}; use types::test_utils::TestingBeaconStateBuilder; -use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; +use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Keypair, Slot}; use yaml_rust::yaml; // Note: We Assume the block Id's are hex-encoded. @@ -218,7 +218,7 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec { // initialise a single validator and state. All blocks will reference this state root. fn setup_inital_state( fork_choice_algo: &ForkChoiceAlgorithm, - no_validators: usize, + num_validators: usize, ) -> (Box, Arc>, Hash256) { let db = Arc::new(MemoryDB::open()); let block_store = Arc::new(BeaconBlockStore::new(db.clone())); @@ -243,7 +243,7 @@ fn setup_inital_state( let spec = ChainSpec::foundation(); let state_builder = - TestingBeaconStateBuilder::from_deterministic_keypairs(no_validators, &spec); + TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec); let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index b2cf28c8a..c116cd1b7 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -74,6 +74,22 @@ impl TestingBeaconStateBuilder { TestingBeaconStateBuilder::from_keypairs(keypairs, spec) } + /// Uses the given keypair for all validators. + pub fn from_single_keypair( + validator_count: usize, + keypair: &Keypair, + spec: &ChainSpec, + ) -> Self { + debug!("Generating {} cloned keypairs...", validator_count); + + let mut keypairs = Vec::with_capacity(validator_count); + for _ in 0..validator_count { + keypairs.push(keypair.clone()) + } + + TestingBeaconStateBuilder::from_keypairs(keypairs, spec) + } + /// Creates the builder from an existing set of keypairs. pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { let validator_count = keypairs.len(); From 12214e7eed21e2eb80914369371f9939a4ce6b57 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 15:11:03 +1100 Subject: [PATCH 045/154] Ignore long running DB test --- beacon_node/db/src/stores/beacon_block_store.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs index bd5149cfd..92d296c37 100644 --- a/beacon_node/db/src/stores/beacon_block_store.rs +++ b/beacon_node/db/src/stores/beacon_block_store.rs @@ -198,6 +198,7 @@ mod tests { } #[test] + #[ignore] fn test_block_at_slot() { let db = Arc::new(MemoryDB::open()); let bs = Arc::new(BeaconBlockStore::new(db.clone())); From 086e9574d2bce180ae8a60ef0f2845d029f80d06 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 14 Mar 2019 15:22:45 +1100 Subject: [PATCH 046/154] Correct all fork choice rules for children with no votes. --- eth2/fork_choice/src/bitwise_lmd_ghost.rs | 22 ++++++++++++++----- eth2/fork_choice/src/optimized_lmd_ghost.rs | 22 ++++++++++++++----- eth2/fork_choice/src/protolambda_lmd_ghost.rs | 1 - eth2/fork_choice/src/slow_lmd_ghost.rs | 7 ++++++ .../tests/bitwise_lmd_ghost_test_vectors.yaml | 15 ++++++++++++- .../tests/lmd_ghost_test_vectors.yaml | 12 ++++++++++ 6 files changed, 67 insertions(+), 12 deletions(-) delete mode 100644 eth2/fork_choice/src/protolambda_lmd_ghost.rs diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index fd1c3dea4..d7b10015b 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -409,11 +409,23 @@ impl ForkChoice for BitwiseLMDGhost { *child_votes.entry(child).or_insert_with(|| 0) += vote; } } - // given the votes on the children, find the best child - current_head = self - .choose_best_child(&child_votes) - .ok_or(ForkChoiceError::CannotFindBestChild)?; - trace!("Best child found: {}", current_head); + // check if we have votes of children, if not select the smallest hash child + if child_votes.is_empty() { + current_head = *children + .iter() + .min_by(|child1, child2| child1.cmp(child2)) + .expect("Must be children here"); + trace!( + "Children have no votes - smallest hash chosen: {}", + current_head + ); + } else { + // given the votes on the children, find the best child + current_head = self + .choose_best_child(&child_votes) + .ok_or(ForkChoiceError::CannotFindBestChild)?; + trace!("Best child found: {}", current_head); + } } // didn't find head yet, proceed to next iteration diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index 636ccdabc..30c84e9e1 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -380,11 +380,23 @@ impl ForkChoice for OptimizedLMDGhost { *child_votes.entry(child).or_insert_with(|| 0) += vote; } } - // given the votes on the children, find the best child - current_head = self - .choose_best_child(&child_votes) - .ok_or(ForkChoiceError::CannotFindBestChild)?; - trace!("Best child found: {}", current_head); + // check if we have votes of children, if not select the smallest hash child + if child_votes.is_empty() { + current_head = *children + .iter() + .min_by(|child1, child2| child1.cmp(child2)) + .expect("Must be children here"); + trace!( + "Children have no votes - smallest hash chosen: {}", + current_head + ); + } else { + // given the votes on the children, find the best child + current_head = self + .choose_best_child(&child_votes) + .ok_or(ForkChoiceError::CannotFindBestChild)?; + trace!("Best child found: {}", current_head); + } } // didn't find head yet, proceed to next iteration diff --git a/eth2/fork_choice/src/protolambda_lmd_ghost.rs b/eth2/fork_choice/src/protolambda_lmd_ghost.rs deleted file mode 100644 index 8b1378917..000000000 --- a/eth2/fork_choice/src/protolambda_lmd_ghost.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index ab4cd2ada..abf13f21b 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -210,6 +210,7 @@ impl ForkChoice for SlowLMDGhost { trace!("Children found: {:?}", children); let mut head_vote_count = 0; + head_hash = children[0]; for child_hash in children { let vote_count = self.get_vote_count(&latest_votes, &child_hash)?; trace!("Vote count for child: {} is: {}", child_hash, vote_count); @@ -218,6 +219,12 @@ impl ForkChoice for SlowLMDGhost { head_hash = *child_hash; head_vote_count = vote_count; } + // resolve ties - choose smaller hash + else if vote_count == head_vote_count { + if *child_hash < head_hash { + head_hash = *child_hash; + } + } } } Ok(head_hash) diff --git a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml index 931d8decf..61b0b05c4 100644 --- a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml +++ b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml @@ -128,4 +128,17 @@ test_cases: - b8: 4 - b9: 2 heads: - - id: 'b7' \ No newline at end of file + - id: 'b7' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + weights: + - b1: 0 + - b2: 0 + heads: + - id: 'b1' + diff --git a/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml b/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml index dab998beb..e7847de11 100644 --- a/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml +++ b/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml @@ -51,3 +51,15 @@ test_cases: - b3: 6 heads: - id: 'b2' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + weights: + - b1: 0 + - b2: 0 + heads: + - id: 'b1' From 1c1c15a122e1c589649a06cb27b227b4d5321230 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 16:00:22 +1100 Subject: [PATCH 047/154] Tidy per epoch processing - Add comments to ValidatorStatuses - Add some checks to guard against a bad statuses list - Remove unused attester_sets.rs file. --- .../src/per_epoch_processing.rs | 16 ++- .../src/per_epoch_processing/attester_sets.rs | 133 ------------------ .../src/per_epoch_processing/errors.rs | 1 + .../validator_statuses.rs | 80 ++++++++++- 4 files changed, 87 insertions(+), 143 deletions(-) delete mode 100644 eth2/state_processing/src/per_epoch_processing/attester_sets.rs diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 044d32eae..8c4b8e88b 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -7,7 +7,6 @@ use types::{validator_registry::get_active_validator_indices, *}; use validator_statuses::{TotalBalances, ValidatorStatuses}; use winning_root::{winning_root, WinningRoot}; -pub mod attester_sets; pub mod errors; pub mod inclusion_distance; pub mod tests; @@ -271,12 +270,18 @@ pub fn process_rewards_and_penalities( let base_reward_quotient = total_balances.previous_epoch.integer_sqrt() / spec.base_reward_quotient; + // Guard against a divide-by-zero during the validator balance update. if base_reward_quotient == 0 { return Err(Error::BaseRewardQuotientIsZero); } + // Guard against a divide-by-zero during the validator balance update. if total_balances.previous_epoch == 0 { return Err(Error::PreviousTotalBalanceIsZero); } + // Guard against an out-of-bounds during the validator balance update. + if statuses.statuses.len() != state.validator_balances.len() { + return Err(Error::ValidatorStatusesInconsistent); + } // Justification and finalization @@ -288,7 +293,7 @@ pub fn process_rewards_and_penalities( .enumerate() .map(|(index, &balance)| { let mut balance = balance; - let status = &statuses.get(index); + let status = &statuses.statuses[index]; let base_reward = state.base_reward(index, base_reward_quotient, spec); if epochs_since_finality <= 4 { @@ -367,8 +372,13 @@ pub fn process_rewards_and_penalities( // Attestation inclusion + // Guard against an out-of-bounds during the attester inclusion balance update. + if statuses.statuses.len() != state.validator_registry.len() { + return Err(Error::ValidatorStatusesInconsistent); + } + for (index, _validator) in state.validator_registry.iter().enumerate() { - let status = &statuses.get(index); + let status = &statuses.statuses[index]; if status.is_previous_epoch_attester { let proposer_index = status.inclusion_info.proposer_index; diff --git a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs deleted file mode 100644 index 03f49c1d3..000000000 --- a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs +++ /dev/null @@ -1,133 +0,0 @@ -use fnv::FnvHashSet; -use types::*; - -/// A set of validator indices, along with the total balance of all those attesters. -#[derive(Default)] -pub struct Attesters { - /// A set of validator indices. - pub indices: FnvHashSet, - /// The total balance of all validators in `self.indices`. - pub balance: u64, -} - -impl Attesters { - /// Add the given indices to the set, incrementing the sets balance by the provided balance. - fn add(&mut self, additional_indices: &[usize], additional_balance: u64) { - self.indices.reserve(additional_indices.len()); - for i in additional_indices { - self.indices.insert(*i); - } - self.balance = self.balance.saturating_add(additional_balance); - } -} - -/// A collection of `Attester` objects, representing set of attesters that are rewarded/penalized -/// during an epoch transition. -pub struct AttesterSets { - /// All validators who attested during the state's current epoch. - pub current_epoch: Attesters, - /// All validators who attested that the beacon block root of the first slot of the state's - /// current epoch is the same as the one stored in this state. - /// - /// In short validators who agreed with the state about the first slot of the current epoch. - pub current_epoch_boundary: Attesters, - /// All validators who attested during the state's previous epoch. - pub previous_epoch: Attesters, - /// All validators who attested that the beacon block root of the first slot of the state's - /// previous epoch is the same as the one stored in this state. - /// - /// In short, validators who agreed with the state about the first slot of the previous epoch. - pub previous_epoch_boundary: Attesters, - /// All validators who attested that the beacon block root at the pending attestation's slot is - /// the same as the one stored in this state. - /// - /// In short, validators who agreed with the state about the current beacon block root when - /// they attested. - pub previous_epoch_head: Attesters, -} - -impl AttesterSets { - /// Loop through all attestations in the state and instantiate a complete `AttesterSets` struct. - /// - /// Spec v0.4.0 - pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { - let mut current_epoch = Attesters::default(); - let mut current_epoch_boundary = Attesters::default(); - let mut previous_epoch = Attesters::default(); - let mut previous_epoch_boundary = Attesters::default(); - let mut previous_epoch_head = Attesters::default(); - - for a in &state.latest_attestations { - let attesting_indices = - state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; - let attesting_balance = state.get_total_balance(&attesting_indices, spec); - - if is_from_epoch(a, state.current_epoch(spec), spec) { - current_epoch.add(&attesting_indices, attesting_balance); - - if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { - current_epoch_boundary.add(&attesting_indices, attesting_balance); - } - } else if is_from_epoch(a, state.previous_epoch(spec), spec) { - previous_epoch.add(&attesting_indices, attesting_balance); - - if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { - previous_epoch_boundary.add(&attesting_indices, attesting_balance); - } - - if has_common_beacon_block_root(a, state, spec)? { - previous_epoch_head.add(&attesting_indices, attesting_balance); - } - } - } - - Ok(Self { - current_epoch, - current_epoch_boundary, - previous_epoch, - previous_epoch_boundary, - previous_epoch_head, - }) - } -} - -/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. -/// -/// Spec v0.4.0 -fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { - a.data.slot.epoch(spec.slots_per_epoch) == epoch -} - -/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for -/// the first slot of the given epoch. -/// -/// Spec v0.4.0 -fn has_common_epoch_boundary_root( - a: &PendingAttestation, - state: &BeaconState, - epoch: Epoch, - spec: &ChainSpec, -) -> Result { - let slot = epoch.start_slot(spec.slots_per_epoch); - let state_boundary_root = *state - .get_block_root(slot, spec) - .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; - - Ok(a.data.epoch_boundary_root == state_boundary_root) -} - -/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for -/// the current slot of the `PendingAttestation`. -/// -/// Spec v0.4.0 -fn has_common_beacon_block_root( - a: &PendingAttestation, - state: &BeaconState, - spec: &ChainSpec, -) -> Result { - let state_block_root = *state - .get_block_root(a.data.slot, spec) - .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; - - Ok(a.data.beacon_block_root == state_block_root) -} diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index c60e00cae..94fc0cca5 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -8,6 +8,7 @@ pub enum EpochProcessingError { NoRandaoSeed, PreviousTotalBalanceIsZero, InclusionDistanceZero, + ValidatorStatusesInconsistent, /// Unable to get the inclusion distance for a validator that should have an inclusion /// distance. This indicates an internal inconsistency. /// diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 70eeaf82a..f76900f3b 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,26 +1,40 @@ use super::WinningRootHashSet; use types::*; +/// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self` +/// as is. macro_rules! set_self_if_other_is_true { ($self_: ident, $other: ident, $var: ident) => { - $self_.$var = $other.$var & !$self_.$var; + if $other.$var { + $self_.$var = true; + } }; } +/// The information required to reward some validator for their participation in a "winning" +/// crosslink root. #[derive(Default, Clone)] pub struct WinningRootInfo { + /// The total balance of the crosslink committee. pub total_committee_balance: u64, + /// The total balance of the crosslink committee that attested for the "winning" root. pub total_attesting_balance: u64, } +/// The information required to reward a block producer for including an attestation in a block. #[derive(Clone)] pub struct InclusionInfo { + /// The earliest slot a validator had an attestation included in the previous epoch. pub slot: Slot, + /// The distance between the attestation slot and the slot that attestation was included in a + /// block. pub distance: Slot, + /// The index of the proposer at the slot where the attestation was included. pub proposer_index: usize, } impl Default for InclusionInfo { + /// Defaults to `slot` and `distance` at their maximum values and `proposer_index` at zero. fn default() -> Self { Self { slot: Slot::max_value(), @@ -31,6 +45,8 @@ impl Default for InclusionInfo { } impl InclusionInfo { + /// Tests if some `other` `InclusionInfo` has a lower inclusion slot than `self`. If so, + /// replaces `self` with `other`. pub fn update(&mut self, other: &Self) { if other.slot < self.slot { self.slot = other.slot; @@ -40,23 +56,43 @@ impl InclusionInfo { } } +/// Information required to reward some validator during the current and previous epoch. #[derive(Default, Clone)] pub struct AttesterStatus { + /// True if the validator was active in the state's _current_ epoch. pub is_active_in_current_epoch: bool, + /// True if the validator was active in the state's _previous_ epoch. pub is_active_in_previous_epoch: bool, + /// True if the validator had an attestation included in the _current_ epoch. pub is_current_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _current_ + /// epoch matches the block root known to the state. pub is_current_epoch_boundary_attester: bool, + /// True if the validator had an attestation included in the _previous_ epoch. pub is_previous_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _previous_ + /// epoch matches the block root known to the state. pub is_previous_epoch_boundary_attester: bool, + /// True if the validator's beacon block root attestation in the _previous_ epoch at the + /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. pub is_previous_epoch_head_attester: bool, + /// Information used to reward the block producer of this validators earliest-included + /// attestation. pub inclusion_info: InclusionInfo, + /// Information used to reward/penalize the validator if they voted in the super-majority for + /// some shard block. pub winning_root_info: Option, } impl AttesterStatus { - /// Note: does not update the winning root info. + /// Accepts some `other` `AttesterStatus` and updates `self` if required. + /// + /// Will never set one of the `bool` fields to `false`, it will only set it to `true` if other + /// contains a `true` field. + /// + /// Note: does not update the winning root info, this is done manually. pub fn update(&mut self, other: &Self) { // Update all the bool fields, only updating `self` if `other` is true (never setting // `self` to false). @@ -72,24 +108,46 @@ impl AttesterStatus { } } +/// The total effective balances for different sets of validators during the previous and current +/// epochs. #[derive(Default, Clone)] pub struct TotalBalances { + /// The total effective balance of all active validators during the _current_ epoch. pub current_epoch: u64, + /// The total effective balance of all active validators during the _previous_ epoch. pub previous_epoch: u64, + /// The total effective balance of all validators who attested during the _current_ epoch. pub current_epoch_attesters: u64, + /// The total effective balance of all validators who attested during the _current_ epoch and + /// agreed with the state about the beacon block at the first slot of the _current_ epoch. pub current_epoch_boundary_attesters: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch. pub previous_epoch_attesters: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. pub previous_epoch_boundary_attesters: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the time of attestation. pub previous_epoch_head_attesters: u64, } +/// Summarised information about validator participation in the _previous and _current_ epochs of +/// some `BeaconState`. #[derive(Clone)] pub struct ValidatorStatuses { - statuses: Vec, + /// Information about each individual validator from the state's validator registy. + pub statuses: Vec, + /// Summed balances for various sets of validators. pub total_balances: TotalBalances, } impl ValidatorStatuses { + /// Initializes a new instance, determining: + /// + /// - Active validators + /// - Total balances for the current and previous epochs. + /// + /// Spec v0.4.0 pub fn new(state: &BeaconState, spec: &ChainSpec) -> Self { let mut statuses = Vec::with_capacity(state.validator_registry.len()); let mut total_balances = TotalBalances::default(); @@ -116,10 +174,10 @@ impl ValidatorStatuses { } } - pub fn get(&self, i: usize) -> &AttesterStatus { - &self.statuses[i] - } - + /// Process some attestations from the given `state` updating the `statuses` and + /// `total_balances` fields. + /// + /// Spec v0.4.0 pub fn process_attestations( &mut self, state: &BeaconState, @@ -174,6 +232,10 @@ impl ValidatorStatuses { Ok(()) } + /// Update the `statuses` for each validator based upon whether or not they attested to the + /// "winning" shard block root for the previous epoch. + /// + /// Spec v0.4.0 pub fn process_winning_roots( &mut self, state: &BeaconState, @@ -207,6 +269,10 @@ impl ValidatorStatuses { } } +/// Returns the distance between when the attestation was created and when it was included in a +/// block. +/// +/// Spec v0.4.0 fn inclusion_distance(a: &PendingAttestation) -> Slot { a.inclusion_slot - a.data.slot } From fc0cdb8226f66fa5056b9677252d5c66bb4303b5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 18:10:36 +1300 Subject: [PATCH 048/154] Start building non-worst case benches --- .../benches/bench_block_processing.rs | 231 +++++++++++------- .../testing_beacon_block_builder.rs | 15 +- 2 files changed, 152 insertions(+), 94 deletions(-) diff --git a/eth2/state_processing/benches/bench_block_processing.rs b/eth2/state_processing/benches/bench_block_processing.rs index 128b1051b..a315717b2 100644 --- a/eth2/state_processing/benches/bench_block_processing.rs +++ b/eth2/state_processing/benches/bench_block_processing.rs @@ -17,6 +17,8 @@ use types::*; pub fn bench_block_processing_n_validators(c: &mut Criterion, validator_count: usize) { let spec = ChainSpec::foundation(); + let bench_builder = BlockBenchingBuilder::new(validator_count, &spec); + let (mut state, keypairs) = build_state(validator_count, &spec); let block = build_block(&mut state, &keypairs, &spec); @@ -94,105 +96,160 @@ fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec BeaconBlock { - let mut builder = TestingBeaconBlockBuilder::new(spec); +pub struct BlockBenchingBuilder { + pub state_builder: TestingBeaconStateBuilder, + pub block_builder: TestingBeaconBlockBuilder, - builder.set_slot(state.slot); + pub num_validators: usize, + pub num_proposer_slashings: usize, + pub num_attester_slashings: usize, + pub num_indices_per_slashable_vote: usize, + pub num_attestations: usize, + pub num_deposits: usize, + pub num_exits: usize, + pub num_transfers: usize, +} - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; +impl BlockBenchingBuilder { + pub fn new(num_validators: usize, spec: &ChainSpec) -> Self { + let mut state_builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec); + let mut block_builder = TestingBeaconBlockBuilder::new(spec); - builder.set_randao_reveal(&keypair.sk, &state.fork, spec); - - // Used as a stream of validator indices for use in slashings, exits, etc. - let mut validators_iter = (0..keypairs.len() as u64).into_iter(); - - // Insert the maximum possible number of `ProposerSlashing` objects. - debug!( - "Inserting {} proposer slashings...", - spec.max_proposer_slashings - ); - for _ in 0..spec.max_proposer_slashings { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_proposer_slashing( - validator_index, - &keypairs[validator_index as usize].sk, - &state.fork, - spec, - ); + Self { + state_builder, + block_builder, + num_validators: 0, + num_proposer_slashings: 0, + num_attester_slashings: 0, + num_indices_per_slashable_vote: spec.max_indices_per_slashable_vote as usize, + num_attestations: 0, + num_deposits: 0, + num_exits: 0, + num_transfers: 0 + } } - // Insert the maximum possible number of `AttesterSlashing` objects - debug!( - "Inserting {} attester slashings...", - spec.max_attester_slashings - ); - for _ in 0..spec.max_attester_slashings { - let mut attesters: Vec = vec![]; - let mut secret_keys: Vec<&SecretKey> = vec![]; + pub fn maximize_block_operations(&mut self, spec: &ChainSpec) { + self.num_proposer_slashings = spec.max_proposer_slashings as usize; + self.num_attester_slashings = spec.max_attester_slashings as usize; + self.num_indices_per_slashable_vote = spec.max_indices_per_slashable_vote as usize; + self.num_attestations = spec.max_attestations as usize; + self.num_deposits = spec.max_deposits as usize; + self.num_exits = spec.max_voluntary_exits as usize; + self.num_transfers = spec.max_transfers as usize; + } - for _ in 0..spec.max_indices_per_slashable_vote { + pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) { + self.state_builder.teleport_to_slot(slot, &spec); + } + + pub fn build_caches(&mut self, spec: &ChainSpec) { + // Builds all caches; benches will not contain shuffling/committee building times. + self.state_builder.build_caches(&spec).unwrap(); + } + + pub fn build(self, spec: &ChainSpec) -> (BeaconBlock, BeaconState) { + let (mut state, keypairs) = self.state_builder.build(); + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); + let keypair = &keypairs[proposer_index]; + + builder.set_randao_reveal(&keypair.sk, &state.fork, spec); + + // Used as a stream of validator indices for use in slashings, exits, etc. + let mut validators_iter = (0..keypairs.len() as u64).into_iter(); + + // Insert `ProposerSlashing` objects. + debug!( + "Inserting {} proposer slashings...", + self.num_proposer_slashings + ); + for _ in 0..self.num_proposer_slashings { let validator_index = validators_iter.next().expect("Insufficient validators."); - attesters.push(validator_index); - secret_keys.push(&keypairs[validator_index as usize].sk); + builder.insert_proposer_slashing( + validator_index, + &keypairs[validator_index as usize].sk, + &state.fork, + spec, + ); } - builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); - } - - // Insert the maximum possible number of `Attestation` objects. - debug!("Inserting {} attestations...", spec.max_attestations); - let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); - builder - .fill_with_attestations(state, &all_secret_keys, spec) - .unwrap(); - - // Insert the maximum possible number of `Deposit` objects. - debug!("Inserting {} deposits...", spec.max_deposits); - for i in 0..spec.max_deposits { - builder.insert_deposit(32_000_000_000, state.deposit_index + i, state, spec); - } - - // Insert the maximum possible number of `Exit` objects. - debug!("Inserting {} exits...", spec.max_voluntary_exits); - for _ in 0..spec.max_voluntary_exits { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_exit( - state, - validator_index, - &keypairs[validator_index as usize].sk, - spec, + // Insert `AttesterSlashing` objects + debug!( + "Inserting {} attester slashings...", + self.num_attester_slashings ); + for _ in 0..self.num_attester_slashings { + let mut attesters: Vec = vec![]; + let mut secret_keys: Vec<&SecretKey> = vec![]; + + for _ in 0..self.num_indices_per_slashable_vote { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + attesters.push(validator_index); + secret_keys.push(&keypairs[validator_index as usize].sk); + } + + builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); + } + + // Insert `Attestation` objects. + debug!("Inserting {} attestations...", self.num_attestations); + let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); + builder + .insert_attestations(&state, &all_secret_keys, self.num_attestations as usize, spec) + .unwrap(); + + // Insert `Deposit` objects. + debug!("Inserting {} deposits...", self.num_deposits); + for i in 0..self.num_deposits { + builder.insert_deposit(32_000_000_000, state.deposit_index + (i as u64), &state, spec); + } + + // Insert the maximum possible number of `Exit` objects. + debug!("Inserting {} exits...", self.num_exits); + for _ in 0..self.num_exits { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + builder.insert_exit( + &state, + validator_index, + &keypairs[validator_index as usize].sk, + spec, + ); + } + + // Insert the maximum possible number of `Transfer` objects. + debug!("Inserting {} transfers...", self.num_transfers); + for _ in 0..self.num_transfers { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + // Manually set the validator to be withdrawn. + state.validator_registry[validator_index as usize].withdrawable_epoch = + state.previous_epoch(spec); + + builder.insert_transfer( + &state, + validator_index, + validator_index, + 1, + keypairs[validator_index as usize].clone(), + spec, + ); + } + + let mut block = builder.build(&keypair.sk, &state.fork, spec); + + // Set the eth1 data to be different from the state. + block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]); + + (block, state) } - - // Insert the maximum possible number of `Transfer` objects. - debug!("Inserting {} transfers...", spec.max_transfers); - for _ in 0..spec.max_transfers { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - // Manually set the validator to be withdrawn. - state.validator_registry[validator_index as usize].withdrawable_epoch = - state.previous_epoch(spec); - - builder.insert_transfer( - state, - validator_index, - validator_index, - 1, - keypairs[validator_index as usize].clone(), - spec, - ); - } - - let mut block = builder.build(&keypair.sk, &state.fork, spec); - - // Set the eth1 data to be different from the state. - block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]); - - block } /// Run the detailed benchmarking suite on the given `BeaconState`. diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index 97e395e1f..ecb42e27b 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -74,19 +74,20 @@ impl TestingBeaconBlockBuilder { self.block.body.attester_slashings.push(attester_slashing); } - /// Fills the block with `MAX_ATTESTATIONS` attestations. + /// Fills the block with `num_attestations` attestations. /// /// It will first go and get each committee that is able to include an attestation in this - /// block. If there are enough committees, it will produce an attestation for each. If there - /// are _not_ enough committees, it will start splitting the committees in half until it + /// block. If there _are_ enough committees, it will produce an attestation for each. If there + /// _are not_ enough committees, it will start splitting the committees in half until it /// achieves the target. It will then produce separate attestations for each split committee. /// /// Note: the signed messages of the split committees will be identical -- it would be possible /// to aggregate these split attestations. - pub fn fill_with_attestations( + pub fn insert_attestations( &mut self, state: &BeaconState, secret_keys: &[&SecretKey], + num_attestations: usize, spec: &ChainSpec, ) -> Result<(), BeaconStateError> { let mut slot = self.block.slot - spec.min_attestation_inclusion_delay; @@ -110,7 +111,7 @@ impl TestingBeaconBlockBuilder { } for (committee, shard) in state.get_crosslink_committees_at_slot(slot, spec)? { - if attestations_added >= spec.max_attestations { + if attestations_added >= num_attestations { break; } @@ -125,12 +126,12 @@ impl TestingBeaconBlockBuilder { // Loop through all the committees, splitting each one in half until we have // `MAX_ATTESTATIONS` committees. loop { - if committees.len() >= spec.max_attestations as usize { + if committees.len() >= num_attestations as usize { break; } for index in 0..committees.len() { - if committees.len() >= spec.max_attestations as usize { + if committees.len() >= num_attestations as usize { break; } From 2535f47f132006480b0f5dc85d2a92f6e6ddd066 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 16:44:56 +1100 Subject: [PATCH 049/154] Add builder for benches Also adds a "sane" case for block processing --- .../benches/bench_block_processing.rs | 243 +----------------- eth2/state_processing/benches/benches.rs | 94 ++++++- .../benches/block_benching_builder.rs | 175 +++++++++++++ .../testing_beacon_block_builder.rs | 2 +- 4 files changed, 266 insertions(+), 248 deletions(-) create mode 100644 eth2/state_processing/benches/block_benching_builder.rs diff --git a/eth2/state_processing/benches/bench_block_processing.rs b/eth2/state_processing/benches/bench_block_processing.rs index a315717b2..2ee08c96a 100644 --- a/eth2/state_processing/benches/bench_block_processing.rs +++ b/eth2/state_processing/benches/bench_block_processing.rs @@ -1,6 +1,5 @@ use criterion::Criterion; use criterion::{black_box, Benchmark}; -use log::debug; use ssz::TreeHash; use state_processing::{ per_block_processing, @@ -10,252 +9,12 @@ use state_processing::{ verify_block_signature, }, }; -use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; use types::*; -/// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn bench_block_processing_n_validators(c: &mut Criterion, validator_count: usize) { - let spec = ChainSpec::foundation(); - - let bench_builder = BlockBenchingBuilder::new(validator_count, &spec); - - let (mut state, keypairs) = build_state(validator_count, &spec); - let block = build_block(&mut state, &keypairs, &spec); - - assert_eq!( - block.body.proposer_slashings.len(), - spec.max_proposer_slashings as usize, - "The block should have the maximum possible proposer slashings" - ); - - assert_eq!( - block.body.attester_slashings.len(), - spec.max_attester_slashings as usize, - "The block should have the maximum possible attester slashings" - ); - - for attester_slashing in &block.body.attester_slashings { - let len_1 = attester_slashing - .slashable_attestation_1 - .validator_indices - .len(); - let len_2 = attester_slashing - .slashable_attestation_1 - .validator_indices - .len(); - assert!( - (len_1 == len_2) && (len_2 == spec.max_indices_per_slashable_vote as usize), - "Each attester slashing should have the maximum possible validator indices" - ); - } - - assert_eq!( - block.body.attestations.len(), - spec.max_attestations as usize, - "The block should have the maximum possible attestations." - ); - - assert_eq!( - block.body.deposits.len(), - spec.max_deposits as usize, - "The block should have the maximum possible deposits." - ); - - assert_eq!( - block.body.voluntary_exits.len(), - spec.max_voluntary_exits as usize, - "The block should have the maximum possible voluntary exits." - ); - - assert_eq!( - block.body.transfers.len(), - spec.max_transfers as usize, - "The block should have the maximum possible transfers." - ); - - bench_block_processing( - c, - &block, - &state, - &spec, - &format!("{}_validators", validator_count), - ); -} - -fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec) { - let mut builder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); - - // Set the state to be just before an epoch transition. - let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); - builder.teleport_to_slot(target_slot, &spec); - - // Builds all caches; benches will not contain shuffling/committee building times. - builder.build_caches(&spec).unwrap(); - - builder.build() -} - -pub struct BlockBenchingBuilder { - pub state_builder: TestingBeaconStateBuilder, - pub block_builder: TestingBeaconBlockBuilder, - - pub num_validators: usize, - pub num_proposer_slashings: usize, - pub num_attester_slashings: usize, - pub num_indices_per_slashable_vote: usize, - pub num_attestations: usize, - pub num_deposits: usize, - pub num_exits: usize, - pub num_transfers: usize, -} - -impl BlockBenchingBuilder { - pub fn new(num_validators: usize, spec: &ChainSpec) -> Self { - let mut state_builder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec); - let mut block_builder = TestingBeaconBlockBuilder::new(spec); - - Self { - state_builder, - block_builder, - num_validators: 0, - num_proposer_slashings: 0, - num_attester_slashings: 0, - num_indices_per_slashable_vote: spec.max_indices_per_slashable_vote as usize, - num_attestations: 0, - num_deposits: 0, - num_exits: 0, - num_transfers: 0 - } - } - - pub fn maximize_block_operations(&mut self, spec: &ChainSpec) { - self.num_proposer_slashings = spec.max_proposer_slashings as usize; - self.num_attester_slashings = spec.max_attester_slashings as usize; - self.num_indices_per_slashable_vote = spec.max_indices_per_slashable_vote as usize; - self.num_attestations = spec.max_attestations as usize; - self.num_deposits = spec.max_deposits as usize; - self.num_exits = spec.max_voluntary_exits as usize; - self.num_transfers = spec.max_transfers as usize; - } - - pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) { - self.state_builder.teleport_to_slot(slot, &spec); - } - - pub fn build_caches(&mut self, spec: &ChainSpec) { - // Builds all caches; benches will not contain shuffling/committee building times. - self.state_builder.build_caches(&spec).unwrap(); - } - - pub fn build(self, spec: &ChainSpec) -> (BeaconBlock, BeaconState) { - let (mut state, keypairs) = self.state_builder.build(); - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; - - builder.set_randao_reveal(&keypair.sk, &state.fork, spec); - - // Used as a stream of validator indices for use in slashings, exits, etc. - let mut validators_iter = (0..keypairs.len() as u64).into_iter(); - - // Insert `ProposerSlashing` objects. - debug!( - "Inserting {} proposer slashings...", - self.num_proposer_slashings - ); - for _ in 0..self.num_proposer_slashings { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_proposer_slashing( - validator_index, - &keypairs[validator_index as usize].sk, - &state.fork, - spec, - ); - } - - // Insert `AttesterSlashing` objects - debug!( - "Inserting {} attester slashings...", - self.num_attester_slashings - ); - for _ in 0..self.num_attester_slashings { - let mut attesters: Vec = vec![]; - let mut secret_keys: Vec<&SecretKey> = vec![]; - - for _ in 0..self.num_indices_per_slashable_vote { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - attesters.push(validator_index); - secret_keys.push(&keypairs[validator_index as usize].sk); - } - - builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); - } - - // Insert `Attestation` objects. - debug!("Inserting {} attestations...", self.num_attestations); - let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); - builder - .insert_attestations(&state, &all_secret_keys, self.num_attestations as usize, spec) - .unwrap(); - - // Insert `Deposit` objects. - debug!("Inserting {} deposits...", self.num_deposits); - for i in 0..self.num_deposits { - builder.insert_deposit(32_000_000_000, state.deposit_index + (i as u64), &state, spec); - } - - // Insert the maximum possible number of `Exit` objects. - debug!("Inserting {} exits...", self.num_exits); - for _ in 0..self.num_exits { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_exit( - &state, - validator_index, - &keypairs[validator_index as usize].sk, - spec, - ); - } - - // Insert the maximum possible number of `Transfer` objects. - debug!("Inserting {} transfers...", self.num_transfers); - for _ in 0..self.num_transfers { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - // Manually set the validator to be withdrawn. - state.validator_registry[validator_index as usize].withdrawable_epoch = - state.previous_epoch(spec); - - builder.insert_transfer( - &state, - validator_index, - validator_index, - 1, - keypairs[validator_index as usize].clone(), - spec, - ); - } - - let mut block = builder.build(&keypair.sk, &state.fork, spec); - - // Set the eth1 data to be different from the state. - block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]); - - (block, state) - } -} - /// Run the detailed benchmarking suite on the given `BeaconState`. /// /// `desc` will be added to the title of each bench. -fn bench_block_processing( +pub fn bench_block_processing( c: &mut Criterion, initial_block: &BeaconBlock, initial_state: &BeaconState, diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index af384b00a..685858c78 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,23 +1,107 @@ +use block_benching_builder::BlockBenchingBuilder; use criterion::Criterion; use criterion::{criterion_group, criterion_main}; use env_logger::{Builder, Env}; +use log::info; +use types::*; mod bench_block_processing; mod bench_epoch_processing; +mod block_benching_builder; pub const VALIDATOR_COUNT: usize = 16_384; // `LOG_LEVEL == "debug"` gives logs, but they're very noisy and slow down benching. -pub const LOG_LEVEL: &str = ""; +pub const LOG_LEVEL: &str = "info"; -pub fn state_processing(c: &mut Criterion) { +/// Build a worst-case block and benchmark processing it. +pub fn block_processing_worst_case(c: &mut Criterion) { if LOG_LEVEL != "" { Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init(); } + info!( + "Building worst case block bench with {} validators", + VALIDATOR_COUNT + ); - bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); - bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); + // Use the specifications from the Eth2.0 spec. + let spec = ChainSpec::foundation(); + + // Create a builder for configuring the block and state for benching. + let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); + + // Set the number of included operations to be maximum (e.g., `MAX_ATTESTATIONS`, etc.) + bench_builder.maximize_block_operations(&spec); + + // Set the state and block to be in the last slot of the 4th epoch. + let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + bench_builder.set_slot(last_slot_of_epoch, &spec); + + // Build all the state caches so the build times aren't included in the benches. + bench_builder.build_caches(&spec); + + // Generate the block and state. + let (block, state) = bench_builder.build(&spec); + + // Run the benches. + bench_block_processing::bench_block_processing( + c, + &block, + &state, + &spec, + &format!("{}_validators/worst_case", VALIDATOR_COUNT), + ); } -criterion_group!(benches, state_processing); +/// Build a reasonable-case block and benchmark processing it. +pub fn block_processing_reasonable_case(c: &mut Criterion) { + info!( + "Building reasonable case block bench with {} validators", + VALIDATOR_COUNT + ); + + // Use the specifications from the Eth2.0 spec. + let spec = ChainSpec::foundation(); + + // Create a builder for configuring the block and state for benching. + let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); + + // Set the number of included operations to what we might expect normally. + bench_builder.num_proposer_slashings = 0; + bench_builder.num_attester_slashings = 0; + bench_builder.num_attestations = (spec.shard_count / spec.slots_per_epoch) as usize; + bench_builder.num_deposits = 2; + bench_builder.num_exits = 2; + bench_builder.num_transfers = 2; + + // Set the state and block to be in the last slot of the 4th epoch. + let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + bench_builder.set_slot(last_slot_of_epoch, &spec); + + // Build all the state caches so the build times aren't included in the benches. + bench_builder.build_caches(&spec); + + // Generate the block and state. + let (block, state) = bench_builder.build(&spec); + + // Run the benches. + bench_block_processing::bench_block_processing( + c, + &block, + &state, + &spec, + &format!("{}_validators/reasonable_case", VALIDATOR_COUNT), + ); +} + +pub fn state_processing(c: &mut Criterion) { + bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); +} + +criterion_group!( + benches, + block_processing_reasonable_case, + block_processing_worst_case, + state_processing +); criterion_main!(benches); diff --git a/eth2/state_processing/benches/block_benching_builder.rs b/eth2/state_processing/benches/block_benching_builder.rs new file mode 100644 index 000000000..b993851d7 --- /dev/null +++ b/eth2/state_processing/benches/block_benching_builder.rs @@ -0,0 +1,175 @@ +use log::info; +use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; +use types::*; + +pub struct BlockBenchingBuilder { + pub state_builder: TestingBeaconStateBuilder, + pub block_builder: TestingBeaconBlockBuilder, + + pub num_validators: usize, + pub num_proposer_slashings: usize, + pub num_attester_slashings: usize, + pub num_indices_per_slashable_vote: usize, + pub num_attestations: usize, + pub num_deposits: usize, + pub num_exits: usize, + pub num_transfers: usize, +} + +impl BlockBenchingBuilder { + pub fn new(num_validators: usize, spec: &ChainSpec) -> Self { + let state_builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec); + let block_builder = TestingBeaconBlockBuilder::new(spec); + + Self { + state_builder, + block_builder, + num_validators: 0, + num_proposer_slashings: 0, + num_attester_slashings: 0, + num_indices_per_slashable_vote: spec.max_indices_per_slashable_vote as usize, + num_attestations: 0, + num_deposits: 0, + num_exits: 0, + num_transfers: 0, + } + } + + pub fn maximize_block_operations(&mut self, spec: &ChainSpec) { + self.num_proposer_slashings = spec.max_proposer_slashings as usize; + self.num_attester_slashings = spec.max_attester_slashings as usize; + self.num_indices_per_slashable_vote = spec.max_indices_per_slashable_vote as usize; + self.num_attestations = spec.max_attestations as usize; + self.num_deposits = spec.max_deposits as usize; + self.num_exits = spec.max_voluntary_exits as usize; + self.num_transfers = spec.max_transfers as usize; + } + + pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) { + self.state_builder.teleport_to_slot(slot, &spec); + } + + pub fn build_caches(&mut self, spec: &ChainSpec) { + // Builds all caches; benches will not contain shuffling/committee building times. + self.state_builder.build_caches(&spec).unwrap(); + } + + pub fn build(mut self, spec: &ChainSpec) -> (BeaconBlock, BeaconState) { + let (mut state, keypairs) = self.state_builder.build(); + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); + let keypair = &keypairs[proposer_index]; + + builder.set_randao_reveal(&keypair.sk, &state.fork, spec); + + // Used as a stream of validator indices for use in slashings, exits, etc. + let mut validators_iter = (0..keypairs.len() as u64).into_iter(); + + // Insert `ProposerSlashing` objects. + for _ in 0..self.num_proposer_slashings { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + builder.insert_proposer_slashing( + validator_index, + &keypairs[validator_index as usize].sk, + &state.fork, + spec, + ); + } + info!( + "Inserted {} proposer slashings.", + builder.block.body.proposer_slashings.len() + ); + + // Insert `AttesterSlashing` objects + for _ in 0..self.num_attester_slashings { + let mut attesters: Vec = vec![]; + let mut secret_keys: Vec<&SecretKey> = vec![]; + + for _ in 0..self.num_indices_per_slashable_vote { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + attesters.push(validator_index); + secret_keys.push(&keypairs[validator_index as usize].sk); + } + + builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); + } + info!( + "Inserted {} attester slashings.", + builder.block.body.attester_slashings.len() + ); + + // Insert `Attestation` objects. + let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); + builder + .insert_attestations( + &state, + &all_secret_keys, + self.num_attestations as usize, + spec, + ) + .unwrap(); + info!( + "Inserted {} attestations.", + builder.block.body.attestations.len() + ); + + // Insert `Deposit` objects. + for i in 0..self.num_deposits { + builder.insert_deposit( + 32_000_000_000, + state.deposit_index + (i as u64), + &state, + spec, + ); + } + info!("Inserted {} deposits.", builder.block.body.deposits.len()); + + // Insert the maximum possible number of `Exit` objects. + for _ in 0..self.num_exits { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + builder.insert_exit( + &state, + validator_index, + &keypairs[validator_index as usize].sk, + spec, + ); + } + info!( + "Inserted {} exits.", + builder.block.body.voluntary_exits.len() + ); + + // Insert the maximum possible number of `Transfer` objects. + for _ in 0..self.num_transfers { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + // Manually set the validator to be withdrawn. + state.validator_registry[validator_index as usize].withdrawable_epoch = + state.previous_epoch(spec); + + builder.insert_transfer( + &state, + validator_index, + validator_index, + 1, + keypairs[validator_index as usize].clone(), + spec, + ); + } + info!("Inserted {} transfers.", builder.block.body.transfers.len()); + + let mut block = self.block_builder.build(&keypair.sk, &state.fork, spec); + + // Set the eth1 data to be different from the state. + block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]); + + (block, state) + } +} diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index ecb42e27b..58633b5ce 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -12,7 +12,7 @@ use ssz::{SignedRoot, TreeHash}; /// /// This struct should **never be used for production purposes.** pub struct TestingBeaconBlockBuilder { - block: BeaconBlock, + pub block: BeaconBlock, } impl TestingBeaconBlockBuilder { From 4fd8551e8b6a541720bb93cce85dcca302fa021b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 17:49:39 +1100 Subject: [PATCH 050/154] Update loglevel, comments in benches --- eth2/state_processing/benches/benches.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 685858c78..0cf797147 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -11,7 +11,7 @@ mod block_benching_builder; pub const VALIDATOR_COUNT: usize = 16_384; -// `LOG_LEVEL == "debug"` gives logs, but they're very noisy and slow down benching. +// `LOG_LEVEL == "info"` gives handy messages. pub const LOG_LEVEL: &str = "info"; /// Build a worst-case block and benchmark processing it. @@ -40,10 +40,8 @@ pub fn block_processing_worst_case(c: &mut Criterion) { // Build all the state caches so the build times aren't included in the benches. bench_builder.build_caches(&spec); - // Generate the block and state. + // Generate the block and state then run benches. let (block, state) = bench_builder.build(&spec); - - // Run the benches. bench_block_processing::bench_block_processing( c, &block, @@ -81,10 +79,8 @@ pub fn block_processing_reasonable_case(c: &mut Criterion) { // Build all the state caches so the build times aren't included in the benches. bench_builder.build_caches(&spec); - // Generate the block and state. + // Generate the block and state then run benches. let (block, state) = bench_builder.build(&spec); - - // Run the benches. bench_block_processing::bench_block_processing( c, &block, From 3dfdfc95ac84aac141ee24ddc89c8f35b53c87e7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 17:53:13 +1100 Subject: [PATCH 051/154] Fix test_utils macro definition It needed to be defined before it was used in an module. --- eth2/types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 4d13fd16c..7b1d84837 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -2,6 +2,7 @@ #[macro_use] pub mod test_utils; + pub mod attestation; pub mod attestation_data; pub mod attestation_data_and_custody_bit; @@ -24,7 +25,6 @@ pub mod proposer_slashing; pub mod readers; pub mod shard_reassignment_record; pub mod slashable_attestation; -pub mod test_utils; pub mod transfer; pub mod voluntary_exit; #[macro_use] From 2bfc8ed4dadc86848a6a98c5427131d2db6dee9f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 18:08:09 +1100 Subject: [PATCH 052/154] Fix failing doc test --- beacon_node/beacon_chain/test_harness/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/test_harness/src/lib.rs b/beacon_node/beacon_chain/test_harness/src/lib.rs index f58c1b598..0703fd4a5 100644 --- a/beacon_node/beacon_chain/test_harness/src/lib.rs +++ b/beacon_node/beacon_chain/test_harness/src/lib.rs @@ -15,7 +15,7 @@ //! let validator_count = 8; //! let spec = ChainSpec::few_validators(); //! -//! let mut harness = BeaconChainHarness::new(spec, validator_count, None, true); +//! let mut harness = BeaconChainHarness::new(spec, validator_count); //! //! harness.advance_chain_with_block(); //! From 8cc89b98206f0cf3abe9707e2dd2641303e9bb8b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 18:08:21 +1100 Subject: [PATCH 053/154] Fix clippy warnings --- .../test_harness/src/beacon_chain_harness.rs | 5 -- .../beacon_chain_harness/generate_deposits.rs | 46 ------------------- .../test_harness/src/test_case.rs | 1 - 3 files changed, 52 deletions(-) delete mode 100644 beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 28723a203..d74464ad4 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -12,14 +12,9 @@ use slot_clock::TestingSlotClock; use ssz::TreeHash; use std::collections::HashSet; use std::iter::FromIterator; -use std::path::Path; use std::sync::Arc; use types::{test_utils::TestingBeaconStateBuilder, *}; -mod generate_deposits; - -pub use generate_deposits::generate_deposits_from_keypairs; - /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// to it. Each validator is provided a borrow to the beacon chain, where it may read /// information and submit blocks/attestations for processing. diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs deleted file mode 100644 index bba3aec1c..000000000 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs +++ /dev/null @@ -1,46 +0,0 @@ -use bls::get_withdrawal_credentials; -use log::debug; -use rayon::prelude::*; -use types::*; - -/// Generates a `Deposit` for each keypairs -pub fn generate_deposits_from_keypairs( - keypairs: &[Keypair], - genesis_time: u64, - domain: u64, - spec: &ChainSpec, -) -> Vec { - debug!( - "Generating {} validator deposits from random keypairs...", - keypairs.len() - ); - - let initial_validator_deposits = keypairs - .par_iter() - .map(|keypair| { - let withdrawal_credentials = Hash256::from_slice( - &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], - ); - Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - // Validator can withdraw using their main keypair. - withdrawal_credentials: withdrawal_credentials.clone(), - proof_of_possession: DepositInput::create_proof_of_possession( - &keypair, - &withdrawal_credentials, - domain, - ), - }, - }, - } - }) - .collect(); - - initial_validator_deposits -} diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index cee78f6c4..0a6206972 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -6,7 +6,6 @@ use beacon_chain::CheckPoint; use bls::get_withdrawal_credentials; use log::{info, warn}; use ssz::SignedRoot; -use std::path::Path; use types::*; use types::test_utils::{TestingAttesterSlashingBuilder, TestingProposerSlashingBuilder}; From 65e3b388a0c7ffe0e578d9bcc6746170c5227b49 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 18:17:32 +1100 Subject: [PATCH 054/154] Update signature-scheme to v0.6.1 --- eth2/utils/bls/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 468ed8050..2466605b0 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.6.0" } +bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.6.1" } hashing = { path = "../hashing" } hex = "0.3" serde = "1.0" From 96d96ba9ba55c927a1e3228aa8af89b9aac6b097 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 20:54:30 +1100 Subject: [PATCH 055/154] Remove assertion in benches --- .../state_processing/benches/bench_epoch_processing.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index 49b4f4371..cc7701296 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -48,16 +48,6 @@ pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: u "The state should have an attestation for each committee." ); - // Assert that each attestation in the state has full participation. - let committee_size = validator_count / committees_per_epoch as usize; - for a in &state.latest_attestations { - assert_eq!( - a.aggregation_bitfield.num_set_bits(), - committee_size, - "Each attestation in the state should have full participation" - ); - } - // Assert that we will run the first arm of process_rewards_and_penalities let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; assert_eq!( From 81543971142c32c90cd9ecfc7085004aa22ec17b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 20:54:44 +1100 Subject: [PATCH 056/154] Set map initial cacacity in epoch cache builder This should help reduce reallocations --- eth2/types/src/beacon_state/epoch_cache.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index bbc991646..e6bacd351 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -33,12 +33,13 @@ impl EpochCache { ) -> Result { let mut epoch_committees: Vec = Vec::with_capacity(spec.slots_per_epoch as usize); - let mut attestation_duty_map: AttestationDutyMap = HashMap::new(); let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new(); let shuffling = state.get_shuffling_for_slot(epoch.start_slot(spec.slots_per_epoch), false, spec)?; + let mut attestation_duty_map: AttestationDutyMap = HashMap::with_capacity(shuffling.len()); + for (epoch_committeess_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() { let slot_committees = state.calculate_crosslink_committees_at_slot( slot, From 902b80a579315499cd61925076d19e454483ce3d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 14 Mar 2019 22:16:21 +1100 Subject: [PATCH 057/154] Optimise epoch building --- eth2/types/src/beacon_state.rs | 35 +++++-------- eth2/types/src/beacon_state/epoch_cache.rs | 60 ++++++++++------------ eth2/types/src/validator_registry.rs | 22 ++++---- 3 files changed, 51 insertions(+), 66 deletions(-) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index a1dd8983c..2463f4701 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -537,8 +537,8 @@ impl BeaconState { let cache = self.cache(relative_epoch)?; let (committee_slot_index, committee_index) = cache - .shard_committee_index_map - .get(&attestation_data.shard) + .shard_committee_indices + .get(attestation_data.shard as usize) .ok_or_else(|| Error::ShardOutOfBounds)?; let (committee, shard) = &cache.committees[*committee_slot_index][*committee_index]; @@ -787,7 +787,6 @@ impl BeaconState { &self, slot: Slot, registry_change: bool, - spec: &ChainSpec, ) -> Result>, Error> { let (_committees_per_epoch, seed, shuffling_epoch, _shuffling_start_shard) = @@ -810,7 +809,6 @@ impl BeaconState { ) -> Result>, Error> { let active_validator_indices = get_active_validator_indices(&self.validator_registry, epoch); - if active_validator_indices.is_empty() { error!("get_shuffling: no validators."); return Err(Error::InsufficientValidators); @@ -912,22 +910,18 @@ impl BeaconState { } } - /// Return the list of ``(committee, shard)`` tuples for the ``slot``. + /// Return the ordered list of shards tuples for the `slot`. /// /// Note: There are two possible shufflings for crosslink committees for a /// `slot` in the next epoch: with and without a `registry_change` /// - /// Note: does not utilize the cache, `get_crosslink_committees_at_slot` is an equivalent - /// function which uses the cache. - /// /// Spec v0.4.0 - pub(crate) fn calculate_crosslink_committees_at_slot( + pub(crate) fn get_shards_for_slot( &self, slot: Slot, registry_change: bool, - shuffling: Vec>, spec: &ChainSpec, - ) -> Result, u64)>, Error> { + ) -> Result, Error> { let (committees_per_epoch, _seed, _shuffling_epoch, shuffling_start_shard) = self.get_committee_params_at_slot(slot, registry_change, spec)?; @@ -936,15 +930,12 @@ impl BeaconState { let slot_start_shard = (shuffling_start_shard + committees_per_slot * offset) % spec.shard_count; - let mut crosslinks_at_slot = vec![]; + let mut shards_at_slot = vec![]; for i in 0..committees_per_slot { - let tuple = ( - shuffling[(committees_per_slot * offset + i) as usize].clone(), - (slot_start_shard + i) % spec.shard_count, - ); - crosslinks_at_slot.push(tuple) + shards_at_slot.push((slot_start_shard + i) % spec.shard_count) } - Ok(crosslinks_at_slot) + + Ok(shards_at_slot) } /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an @@ -962,10 +953,10 @@ impl BeaconState { ) -> Result, Error> { let cache = self.cache(RelativeEpoch::Current)?; - Ok(cache - .attestation_duty_map - .get(&(validator_index as u64)) - .and_then(|tuple| Some(*tuple))) + Ok(*cache + .attestation_duties + .get(validator_index) + .ok_or_else(|| Error::UnknownValidator)?) } /// Process the slashings. diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index e6bacd351..f9bc0d2e7 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -1,8 +1,6 @@ -use super::{AttestationDutyMap, BeaconState, CrosslinkCommittees, Error, ShardCommitteeIndexMap}; +use super::{AttestationDuty, BeaconState, CrosslinkCommittees, Error}; use crate::{ChainSpec, Epoch}; -use log::trace; use serde_derive::Serialize; -use std::collections::HashMap; #[derive(Debug, PartialEq, Clone, Serialize)] pub struct EpochCache { @@ -11,21 +9,23 @@ pub struct EpochCache { /// The crosslink committees for an epoch. pub committees: Vec, /// Maps validator index to a slot, shard and committee index for attestation. - pub attestation_duty_map: AttestationDutyMap, + pub attestation_duties: Vec>, /// Maps a shard to an index of `self.committees`. - pub shard_committee_index_map: ShardCommitteeIndexMap, + pub shard_committee_indices: Vec<(usize, usize)>, } impl EpochCache { + /// Return a new, completely empty cache. pub fn empty() -> EpochCache { EpochCache { initialized: false, committees: vec![], - attestation_duty_map: AttestationDutyMap::new(), - shard_committee_index_map: ShardCommitteeIndexMap::new(), + attestation_duties: vec![], + shard_committee_indices: vec![], } } + /// Return a new, fully initialized cache. pub fn initialized( state: &BeaconState, epoch: Epoch, @@ -33,42 +33,36 @@ impl EpochCache { ) -> Result { let mut epoch_committees: Vec = Vec::with_capacity(spec.slots_per_epoch as usize); - let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new(); - let shuffling = + let mut attestation_duties = vec![None; state.validator_registry.len()]; + + let mut shard_committee_indices = vec![(0, 0); spec.shard_count as usize]; + + let mut shuffling = state.get_shuffling_for_slot(epoch.start_slot(spec.slots_per_epoch), false, spec)?; - let mut attestation_duty_map: AttestationDutyMap = HashMap::with_capacity(shuffling.len()); + for (epoch_committees_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() { + let mut slot_committees: Vec<(Vec, u64)> = vec![]; - for (epoch_committeess_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() { - let slot_committees = state.calculate_crosslink_committees_at_slot( - slot, - false, - shuffling.clone(), - spec, - )?; + let shards = state.get_shards_for_slot(slot, false, spec)?; + for shard in shards { + let committee = shuffling.remove(0); + slot_committees.push((committee, shard)); + } for (slot_committees_index, (committee, shard)) in slot_committees.iter().enumerate() { - // Empty committees are not permitted. if committee.is_empty() { return Err(Error::InsufficientValidators); } - trace!( - "shard: {}, epoch_i: {}, slot_i: {}", - shard, - epoch_committeess_index, - slot_committees_index - ); - - shard_committee_index_map - .insert(*shard, (epoch_committeess_index, slot_committees_index)); + // Store the slot and committee index for this shard. + shard_committee_indices[*shard as usize] = + (epoch_committees_index, slot_committees_index); + // For each validator, store their attestation duties. for (committee_index, validator_index) in committee.iter().enumerate() { - attestation_duty_map.insert( - *validator_index as u64, - (slot, *shard, committee_index as u64), - ); + attestation_duties[*validator_index] = + Some((slot, *shard, committee_index as u64)) } } @@ -78,8 +72,8 @@ impl EpochCache { Ok(EpochCache { initialized: true, committees: epoch_committees, - attestation_duty_map, - shard_committee_index_map, + attestation_duties, + shard_committee_indices, }) } } diff --git a/eth2/types/src/validator_registry.rs b/eth2/types/src/validator_registry.rs index 7b55e78cb..db35ae993 100644 --- a/eth2/types/src/validator_registry.rs +++ b/eth2/types/src/validator_registry.rs @@ -7,17 +7,17 @@ use crate::Epoch; /// /// Spec v0.4.0 pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { - validators - .iter() - .enumerate() - .filter_map(|(index, validator)| { - if validator.is_active_at(epoch) { - Some(index) - } else { - None - } - }) - .collect::>() + let mut active = Vec::with_capacity(validators.len()); + + for (index, validator) in validators.iter().enumerate() { + if validator.is_active_at(epoch) { + active.push(index) + } + } + + active.shrink_to_fit(); + + active } #[cfg(test)] From c06e8ffa5b497e238470caf985bf3fabca156fa0 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 15 Mar 2019 01:50:59 +1100 Subject: [PATCH 058/154] Initial Libp2p RPC implementation. --- beacon_node/Cargo.toml | 1 - beacon_node/libp2p/Cargo.toml | 2 + beacon_node/libp2p/src/lib.rs | 1 + beacon_node/libp2p/src/rpc/handler.rs | 0 beacon_node/libp2p/src/rpc/methods.rs | 38 ++++++ beacon_node/libp2p/src/rpc/mod.rs | 120 +++++++++++++++++ beacon_node/libp2p/src/rpc/protocol.rs | 179 +++++++++++++++++++++++++ beacon_node/libp2p/src/service.rs | 2 +- 8 files changed, 341 insertions(+), 2 deletions(-) create mode 100644 beacon_node/libp2p/src/rpc/handler.rs create mode 100644 beacon_node/libp2p/src/rpc/methods.rs create mode 100644 beacon_node/libp2p/src/rpc/mod.rs create mode 100644 beacon_node/libp2p/src/rpc/protocol.rs diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 8b2641786..56f5c654e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -7,7 +7,6 @@ edition = "2018" [dependencies] client = { path = "client" } version = { path = "version" } - clap = "2.32.0" slog = "^2.2.3" slog-term = "^2.4.0" diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/libp2p/Cargo.toml index ecd91e170..dcbc04d0b 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/libp2p/Cargo.toml @@ -8,6 +8,8 @@ edition = "2018" # SigP repository until PR is merged libp2p = { git = "https://github.com/SigP/rust-libp2p", branch = "gossipsub" } types = { path = "../../eth2/types" } +ssz = { path = "../../eth2/utils/ssz" } +ssz_derive = { path = "../../eth2/utils/ssz_derive" } slog = "2.4.1" version = { path = "../version" } tokio = "0.1.16" diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index a1bf4402c..718b7fc22 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -5,6 +5,7 @@ pub mod behaviour; pub mod error; mod network_config; +mod rpc; mod service; pub use libp2p::{ diff --git a/beacon_node/libp2p/src/rpc/handler.rs b/beacon_node/libp2p/src/rpc/handler.rs new file mode 100644 index 000000000..e69de29bb diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs new file mode 100644 index 000000000..d299e9bb7 --- /dev/null +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -0,0 +1,38 @@ +/// Available RPC methods types and ids. +use ssz_derive::{Decode, Encode}; +use types::{Epoch, Hash256, Slot}; + +#[derive(Debug)] +pub enum RPCMethod { + Hello, + Unknown, +} + +impl From for RPCMethod { + fn from(method_id: u16) -> Self { + match method_id { + 0 => RPCMethod::Hello, + _ => RPCMethod::Unknown, + } + } +} + +#[derive(Debug, Clone)] +pub enum RPCRequest { + HelloRequest, +} + +#[derive(Debug, Clone)] +pub enum RPCResponse { + HelloResponse(HelloResponse), +} + +// request/response structs for RPC methods +#[derive(Encode, Decode, Clone, Debug)] +pub struct HelloResponse { + pub network_id: u8, + pub latest_finalized_root: Hash256, + pub latest_finalized_epoch: Epoch, + pub best_root: Hash256, + pub best_slot: Slot, +} diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs new file mode 100644 index 000000000..004f17d9e --- /dev/null +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -0,0 +1,120 @@ +mod handler; +mod methods; +/// RPC Protocol over libp2p. +/// +/// This is purpose built for Ethereum 2.0 serenity and the protocol listens on +/// `/eth/serenity/rpc/1.0.0` +mod protocol; + +use futures::prelude::*; +use libp2p::core::protocols_handler::{OneShotHandler, ProtocolsHandler}; +use libp2p::core::swarm::{ + ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, +}; +use libp2p::{Multiaddr, PeerId}; +use methods::RPCRequest; +use protocol::{RPCProtocol, RpcEvent}; +use std::marker::PhantomData; +use tokio::io::{AsyncRead, AsyncWrite}; + +/// The network behaviour handles RPC requests/responses as specified in the Eth 2.0 phase 0 +/// specification. + +pub struct Rpc { + /// Queue of events to processed. + events: Vec, + /// Pins the generic substream. + marker: PhantomData, +} + +impl Rpc { + pub fn new() -> Self { + Rpc { + events: Vec::new(), + marker: PhantomData, + } + } + + /// Submits and RPC request. + pub fn send_request(&mut self, id: u64, method_id: u16, body: RPCRequest) { + let request = RpcEvent::Request { + id, + method_id, + body, + }; + self.events.push(request); + } +} + +impl NetworkBehaviour for Rpc +where + TSubstream: AsyncRead + AsyncWrite, +{ + type ProtocolsHandler = OneShotHandler; + type OutEvent = RpcEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + Default::default() + } + + fn addresses_of_peer(&mut self, _peer_id: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _: PeerId, _: ConnectedPoint) {} + + fn inject_disconnected(&mut self, _: &PeerId, _: ConnectedPoint) {} + + fn inject_node_event( + &mut self, + source: PeerId, + event: ::OutEvent, + ) { + // ignore successful sends event + let event = match event { + OneShotEvent::Rx(event) => event, + OneShotEvent::Sent => return, + }; + + // send the event to the user + self.events.push(event); + } + + fn poll( + &mut self, + _: &mut PollParameters<'_>, + ) -> Async< + NetworkBehaviourAction< + ::InEvent, + Self::OutEvent, + >, + > { + if !self.events.is_empty() { + return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + } + Async::NotReady + } +} + +/// Transmission between the `OneShotHandler` and the `RpcEvent`. +#[derive(Debug)] +pub enum OneShotEvent { + /// We received an RPC from a remote. + Rx(RpcEvent), + /// We successfully sent an RPC request. + Sent, +} + +impl From for OneShotEvent { + #[inline] + fn from(rpc: RpcEvent) -> OneShotEvent { + OneShotEvent::Rx(rpc) + } +} + +impl From<()> for OneShotEvent { + #[inline] + fn from(_: ()) -> OneShotEvent { + OneShotEvent::Sent + } +} diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/libp2p/src/rpc/protocol.rs new file mode 100644 index 000000000..e65927b03 --- /dev/null +++ b/beacon_node/libp2p/src/rpc/protocol.rs @@ -0,0 +1,179 @@ +use super::methods::HelloResponse; +use super::methods::{RPCMethod, RPCRequest, RPCResponse}; +//use crate::rpc_proto; +//use byteorder::{BigEndian, ByteOrder}; +//use bytes::BytesMut; +use futures::{future, stream, Future, Stream}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, PeerId, UpgradeInfo}; +//use std::{io, iter}; +use ssz::{ssz_encode, Decodable, Encodable, SszStream}; +use std::io; +use std::iter; +use tokio::io::{AsyncRead, AsyncWrite}; + +/// The maximum bytes that can be sent across the RPC. +const MAX_READ_SIZE: usize = 2048; + +/// Implementation of the `ConnectionUpgrade` for the rpc protocol. + +#[derive(Debug, Clone)] +pub struct RPCProtocol; + +impl UpgradeInfo for RPCProtocol { + type Info = &'static [u8]; + type InfoIter = iter::Once; + + #[inline] + fn protocol_info(&self) -> Self::InfoIter { + iter::once(b"/eth/serenity/rpc/1.0.0") + } +} + +impl Default for RPCProtocol { + fn default() -> Self { + RPCProtocol + } +} + +/// The RPC types which are sent/received in this protocol. +#[derive(Debug, Clone)] +pub enum RpcEvent { + Request { + id: u64, + method_id: u16, + body: RPCRequest, + }, + Response { + id: u64, + method_id: u16, //TODO: Remove and process decoding upstream + result: RPCResponse, + }, +} + +impl UpgradeInfo for RpcEvent { + type Info = &'static [u8]; + type InfoIter = iter::Once; + + #[inline] + fn protocol_info(&self) -> Self::InfoIter { + iter::once(b"/eth/serenity/rpc/1.0.0") + } +} + +impl InboundUpgrade for RPCProtocol +where + TSocket: AsyncRead + AsyncWrite, +{ + type Output = RpcEvent; + type Error = DecodeError; + type Future = + upgrade::ReadOneThen, ()) -> Result>; + + fn upgrade_inbound(self, socket: TSocket, _: Self::Info) -> Self::Future { + upgrade::read_one_then(socket, MAX_READ_SIZE, (), |packet, ()| Ok(decode(packet)?)) + } +} + +fn decode(packet: Vec) -> Result { + // decode the header of the rpc + // request/response + let (request, index) = bool::ssz_decode(&packet, 0)?; + let (id, index) = u64::ssz_decode(&packet, index)?; + let (method_id, index) = u16::ssz_decode(&packet, index)?; + + if request { + let body = match RPCMethod::from(method_id) { + RPCMethod::Hello => RPCRequest::HelloRequest, + RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), + }; + + return Ok(RpcEvent::Request { + id, + method_id, + body, + }); + } + // we have received a response + else { + let result = match RPCMethod::from(method_id) { + RPCMethod::Hello => { + let (hello_response, _index) = HelloResponse::ssz_decode(&packet, index)?; + RPCResponse::HelloResponse(hello_response) + } + RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), + }; + return Ok(RpcEvent::Response { + id, + method_id, + result, + }); + } +} + +impl OutboundUpgrade for RpcEvent +where + TSocket: AsyncWrite, +{ + type Output = (); + type Error = io::Error; + type Future = upgrade::WriteOne; + + #[inline] + fn upgrade_outbound(self, socket: TSocket, _: Self::Info) -> Self::Future { + let bytes = ssz_encode(&self); + upgrade::write_one(socket, bytes) + } +} + +impl Encodable for RpcEvent { + fn ssz_append(&self, s: &mut SszStream) { + match self { + RpcEvent::Request { + id, + method_id, + body, + } => { + s.append(&true); + s.append(id); + s.append(method_id); + match body { + RPCRequest::HelloRequest => {} + } + } + RpcEvent::Response { + id, + method_id, + result, + } => { + s.append(&false); + s.append(id); + s.append(method_id); + match result { + RPCResponse::HelloResponse(response) => { + s.append(response); + } + } + } + } + } +} + +pub enum DecodeError { + ReadError(upgrade::ReadOneError), + SSZDecodeError(ssz::DecodeError), + UnknownRPCMethod, +} + +impl From for DecodeError { + #[inline] + fn from(err: upgrade::ReadOneError) -> Self { + DecodeError::ReadError(err) + } +} + +impl From for DecodeError { + #[inline] + fn from(err: ssz::DecodeError) -> Self { + DecodeError::SSZDecodeError(err) + } +} diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 26154beb6..00c11101c 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -15,7 +15,7 @@ use libp2p::{PeerId, Swarm}; use slog::{debug, info, trace, warn}; use std::io::{Error, ErrorKind}; use std::time::Duration; -use types::{Topic, TopicBuilder}; +use types::TopicBuilder; /// The configuration and state of the libp2p components for the beacon node. pub struct Service { From 24c7f180e2fa409c156260a2c911499a27d136cb Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 15 Mar 2019 02:13:16 +1100 Subject: [PATCH 059/154] Update rpc event handling. --- beacon_node/libp2p/src/rpc/mod.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index 004f17d9e..d5f700058 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -22,7 +22,7 @@ use tokio::io::{AsyncRead, AsyncWrite}; pub struct Rpc { /// Queue of events to processed. - events: Vec, + events: Vec>, /// Pins the generic substream. marker: PhantomData, } @@ -36,13 +36,16 @@ impl Rpc { } /// Submits and RPC request. - pub fn send_request(&mut self, id: u64, method_id: u16, body: RPCRequest) { + pub fn send_request(&mut self, peer_id: PeerId, id: u64, method_id: u16, body: RPCRequest) { let request = RpcEvent::Request { id, method_id, body, }; - self.events.push(request); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id, + event: request, + }); } } @@ -67,17 +70,18 @@ where fn inject_node_event( &mut self, - source: PeerId, + _source: PeerId, event: ::OutEvent, ) { - // ignore successful sends event + // ignore successful send events let event = match event { OneShotEvent::Rx(event) => event, OneShotEvent::Sent => return, }; // send the event to the user - self.events.push(event); + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); } fn poll( @@ -90,7 +94,7 @@ where >, > { if !self.events.is_empty() { - return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + return Async::Ready(self.events.remove(0)); } Async::NotReady } From 7b6a653d05c43de741ee85865d12a5593dd9f0ed Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 15 Mar 2019 02:48:09 +1100 Subject: [PATCH 060/154] Add RPC protocol to lh network behaviour. --- beacon_node/libp2p/src/behaviour.rs | 36 +++++++++++++++++++++++++- beacon_node/libp2p/src/rpc/mod.rs | 8 +++--- beacon_node/libp2p/src/rpc/protocol.rs | 1 + 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs index be49abb94..2c0371095 100644 --- a/beacon_node/libp2p/src/behaviour.rs +++ b/beacon_node/libp2p/src/behaviour.rs @@ -1,3 +1,4 @@ +use crate::rpc::{RPCMethod, RPCRequest, RPCResponse, Rpc, RpcEvent}; use futures::prelude::*; use libp2p::{ core::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, @@ -15,7 +16,7 @@ pub struct Behaviour { gossipsub: Gossipsub, // TODO: Add Kademlia for peer discovery /// The events generated by this behaviour to be consumed in the swarm poll. - // We use gossipsub events for now, generalise later. + serenity_rpc: Rpc, #[behaviour(ignore)] events: Vec, } @@ -37,10 +38,34 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: RpcEvent) { + match event { + RpcEvent::Request { + id, + method_id, + body, + } => self.events.push(BehaviourEvent::RPCRequest { + id, + method: RPCMethod::from(method_id), + body, + }), + RpcEvent::Response { + id, + method_id, + result, + } => self.events.push(BehaviourEvent::RPCResponse { id, result }), + } + } +} + impl Behaviour { pub fn new(local_peer_id: PeerId, gs_config: GossipsubConfig) -> Self { Behaviour { gossipsub: Gossipsub::new(local_peer_id, gs_config), + serenity_rpc: Rpc::new(), events: Vec::new(), } } @@ -70,6 +95,15 @@ impl Behaviour { /// The types of events than can be obtained from polling the behaviour. pub enum BehaviourEvent { + RPCRequest { + id: u64, + method: RPCMethod, + body: RPCRequest, + }, + RPCResponse { + id: u64, + result: RPCResponse, + }, // TODO: This is a stub at the moment Message(String), } diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index d5f700058..f66f531eb 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -1,9 +1,9 @@ -mod handler; -mod methods; /// RPC Protocol over libp2p. /// /// This is purpose built for Ethereum 2.0 serenity and the protocol listens on /// `/eth/serenity/rpc/1.0.0` +mod handler; +mod methods; mod protocol; use futures::prelude::*; @@ -12,8 +12,8 @@ use libp2p::core::swarm::{ ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, }; use libp2p::{Multiaddr, PeerId}; -use methods::RPCRequest; -use protocol::{RPCProtocol, RpcEvent}; +pub use methods::{RPCMethod, RPCRequest, RPCResponse}; +pub use protocol::{RPCProtocol, RpcEvent}; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/libp2p/src/rpc/protocol.rs index e65927b03..2c6b3caa0 100644 --- a/beacon_node/libp2p/src/rpc/protocol.rs +++ b/beacon_node/libp2p/src/rpc/protocol.rs @@ -158,6 +158,7 @@ impl Encodable for RpcEvent { } } +#[derive(Debug)] pub enum DecodeError { ReadError(upgrade::ReadOneError), SSZDecodeError(ssz::DecodeError), From 236b97476ae110c608678f34de16a85d990186b6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 13:31:30 +1100 Subject: [PATCH 061/154] Marge fixes to test_harness, add serdehex crate --- Cargo.toml | 1 + .../src/test_utils/epoch_map.rs | 4 +- eth2/state_processing/Cargo.toml | 3 + eth2/types/src/attestation.rs | 15 ++++- eth2/types/src/attestation_data.rs | 3 +- eth2/types/src/attester_slashing.rs | 4 +- eth2/types/src/beacon_block.rs | 15 ++++- eth2/types/src/beacon_block_body.rs | 6 +- eth2/types/src/beacon_state.rs | 28 ++++----- eth2/types/src/beacon_state/epoch_cache.rs | 14 +---- eth2/types/src/beacon_state/pubkey_cache.rs | 11 +--- eth2/types/src/chain_spec.rs | 32 ++++++---- eth2/types/src/crosslink.rs | 14 ++++- eth2/types/src/eth1_data.rs | 6 +- eth2/types/src/eth1_data_vote.rs | 6 +- eth2/types/src/fork.rs | 33 ++++++++--- eth2/types/src/pending_attestation.rs | 4 +- eth2/types/src/proposal.rs | 15 ++++- eth2/types/src/proposer_slashing.rs | 4 +- eth2/types/src/slashable_attestation.rs | 15 ++++- eth2/types/src/test_utils/test_random.rs | 14 +++++ eth2/types/src/transfer.rs | 15 ++++- eth2/types/src/voluntary_exit.rs | 15 ++++- eth2/utils/bls/Cargo.toml | 1 + eth2/utils/bls/src/aggregate_signature.rs | 16 ++++- eth2/utils/bls/src/public_key.rs | 13 ++-- eth2/utils/boolean-bitfield/Cargo.toml | 1 + eth2/utils/boolean-bitfield/src/lib.rs | 19 +++++- eth2/utils/serde_hex/Cargo.toml | 9 +++ eth2/utils/serde_hex/src/lib.rs | 59 +++++++++++++++++++ eth2/utils/ssz/src/impl_decode.rs | 27 +++++++++ eth2/utils/ssz/src/impl_encode.rs | 25 ++++++++ 32 files changed, 355 insertions(+), 92 deletions(-) create mode 100644 eth2/utils/serde_hex/Cargo.toml create mode 100644 eth2/utils/serde_hex/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index c5aae7f43..d149030b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "eth2/utils/honey-badger-split", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", + "eth2/utils/serde_hex", "eth2/utils/slot_clock", "eth2/utils/ssz", "eth2/utils/ssz_derive", diff --git a/eth2/block_proposer/src/test_utils/epoch_map.rs b/eth2/block_proposer/src/test_utils/epoch_map.rs index 6658c7526..c06c376c6 100644 --- a/eth2/block_proposer/src/test_utils/epoch_map.rs +++ b/eth2/block_proposer/src/test_utils/epoch_map.rs @@ -28,8 +28,8 @@ impl DutiesReader for EpochMap { fn fork(&self) -> Result { Ok(Fork { - previous_version: 0, - current_version: 0, + previous_version: [0; 4], + current_version: [0; 4], epoch: Epoch::new(0), }) } diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index f6692b259..4e37fce0c 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -11,6 +11,9 @@ harness = false [dev-dependencies] criterion = "0.2" env_logger = "0.6.0" +serde = "1.0" +serde_derive = "1.0" +serde_yaml = "0.8" [dependencies] bls = { path = "../utils/bls" } diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index dcc4c1fda..4b3c2e89c 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -1,7 +1,7 @@ use super::{AggregateSignature, AttestationData, Bitfield}; use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::TreeHash; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -9,7 +9,18 @@ use test_random_derive::TestRandom; /// Details an attestation that can be slashable. /// /// Spec v0.4.0 -#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] pub struct Attestation { pub aggregation_bitfield: Bitfield, pub data: AttestationData, diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 6e3cb3891..791ba00d2 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Crosslink, Epoch, Hash256, Slot}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::TreeHash; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -15,6 +15,7 @@ use test_random_derive::TestRandom; PartialEq, Default, Serialize, + Deserialize, Hash, Encode, Decode, diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index f437d41f2..195c0fdcc 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -1,13 +1,13 @@ use crate::{test_utils::TestRandom, SlashableAttestation}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// Two conflicting attestations. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct AttesterSlashing { pub slashable_attestation_1: SlashableAttestation, pub slashable_attestation_2: SlashableAttestation, diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 615d9f928..56f77c8d2 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Proposal, Slot}; use bls::Signature; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::{SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -10,7 +10,18 @@ use test_random_derive::TestRandom; /// A block of the `BeaconChain`. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] pub struct BeaconBlock { pub slot: Slot, pub parent_root: Hash256, diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 70ce24dbe..ce8020fec 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,14 +1,16 @@ use super::{Attestation, AttesterSlashing, Deposit, ProposerSlashing, Transfer, VoluntaryExit}; use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// The body of a `BeaconChain` block, containing operations. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct BeaconBlockBody { pub proposer_slashings: Vec, pub attester_slashings: Vec, diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 2463f4701..2644b3e73 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -7,7 +7,7 @@ use int_to_bytes::int_to_bytes32; use log::{debug, error, trace}; use pubkey_cache::PubkeyCache; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::{hash, Decodable, DecodeError, Encodable, SignedRoot, SszStream, TreeHash}; use std::collections::HashMap; use swap_or_not_shuffle::shuffle_list; @@ -72,7 +72,7 @@ macro_rules! safe_sub_assign { }; } -#[derive(Debug, PartialEq, Clone, Default, Serialize)] +#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] pub struct BeaconState { // Misc pub slot: Slot, @@ -114,7 +114,9 @@ pub struct BeaconState { // Caching (not in the spec) pub cache_index_offset: usize, + #[serde(default)] pub caches: Vec, + #[serde(default)] pub pubkey_cache: PubkeyCache, } @@ -137,11 +139,7 @@ impl BeaconState { */ slot: spec.genesis_slot, genesis_time, - fork: Fork { - previous_version: spec.genesis_fork_version, - current_version: spec.genesis_fork_version, - epoch: spec.genesis_epoch, - }, + fork: Fork::genesis(spec), /* * Validator registry @@ -193,8 +191,8 @@ impl BeaconState { * Caching (not in spec) */ cache_index_offset: 0, - caches: vec![EpochCache::empty(); CACHED_EPOCHS], - pubkey_cache: PubkeyCache::empty(), + caches: vec![EpochCache::default(); CACHED_EPOCHS], + pubkey_cache: PubkeyCache::default(), } } @@ -276,7 +274,7 @@ impl BeaconState { /// Removes the specified cache and sets it to uninitialized. pub fn drop_cache(&mut self, relative_epoch: RelativeEpoch) { let previous_cache_index = self.cache_index(relative_epoch); - self.caches[previous_cache_index] = EpochCache::empty(); + self.caches[previous_cache_index] = EpochCache::default(); } /// Returns the index of `self.caches` for some `RelativeEpoch`. @@ -324,7 +322,7 @@ impl BeaconState { /// Completely drops the `pubkey_cache`, replacing it with a new, empty cache. pub fn drop_pubkey_cache(&mut self) { - self.pubkey_cache = PubkeyCache::empty() + self.pubkey_cache = PubkeyCache::default() } /// If a validator pubkey exists in the validator registry, returns `Some(i)`, otherwise @@ -1227,8 +1225,8 @@ impl Decodable for BeaconState { eth1_data_votes, deposit_index, cache_index_offset: 0, - caches: vec![EpochCache::empty(); CACHED_EPOCHS], - pubkey_cache: PubkeyCache::empty(), + caches: vec![EpochCache::default(); CACHED_EPOCHS], + pubkey_cache: PubkeyCache::default(), }, i, )) @@ -1298,8 +1296,8 @@ impl TestRandom for BeaconState { eth1_data_votes: <_>::random_for_test(rng), deposit_index: <_>::random_for_test(rng), cache_index_offset: 0, - caches: vec![EpochCache::empty(); CACHED_EPOCHS], - pubkey_cache: PubkeyCache::empty(), + caches: vec![EpochCache::default(); CACHED_EPOCHS], + pubkey_cache: PubkeyCache::default(), } } } diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index f9bc0d2e7..ddcca0a9a 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -1,8 +1,8 @@ use super::{AttestationDuty, BeaconState, CrosslinkCommittees, Error}; use crate::{ChainSpec, Epoch}; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; -#[derive(Debug, PartialEq, Clone, Serialize)] +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] pub struct EpochCache { /// True if this cache has been initialized. pub initialized: bool, @@ -15,16 +15,6 @@ pub struct EpochCache { } impl EpochCache { - /// Return a new, completely empty cache. - pub fn empty() -> EpochCache { - EpochCache { - initialized: false, - committees: vec![], - attestation_duties: vec![], - shard_committee_indices: vec![], - } - } - /// Return a new, fully initialized cache. pub fn initialized( state: &BeaconState, diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs index c05147579..340bdb311 100644 --- a/eth2/types/src/beacon_state/pubkey_cache.rs +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -1,22 +1,15 @@ use crate::*; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use std::collections::HashMap; type ValidatorIndex = usize; -#[derive(Debug, PartialEq, Clone, Default, Serialize)] +#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] pub struct PubkeyCache { map: HashMap, } impl PubkeyCache { - /// Instantiates a new, empty cache. - pub fn empty() -> Self { - Self { - map: HashMap::new(), - } - } - /// Returns the number of validator indices already in the map. pub fn len(&self) -> ValidatorIndex { self.map.len() diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 789bb6c0c..108516695 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -1,5 +1,7 @@ use crate::{Address, Epoch, Fork, Hash256, Slot}; use bls::Signature; +use int_to_bytes::int_to_bytes4; +use serde_derive::Deserialize; const GWEI: u64 = 1_000_000_000; @@ -15,7 +17,8 @@ pub enum Domain { /// Holds all the "constants" for a BeaconChain. /// /// Spec v0.4.0 -#[derive(PartialEq, Debug, Clone)] +#[derive(PartialEq, Debug, Clone, Deserialize)] +#[serde(default)] pub struct ChainSpec { /* * Misc @@ -45,7 +48,7 @@ pub struct ChainSpec { /* * Initial Values */ - pub genesis_fork_version: u64, + pub genesis_fork_version: u32, pub genesis_slot: Slot, pub genesis_epoch: Epoch, pub genesis_start_shard: u64, @@ -100,12 +103,12 @@ pub struct ChainSpec { * * Use `ChainSpec::get_domain(..)` to access these values. */ - domain_deposit: u64, - domain_attestation: u64, - domain_proposal: u64, - domain_exit: u64, - domain_randao: u64, - domain_transfer: u64, + domain_deposit: u32, + domain_attestation: u32, + domain_proposal: u32, + domain_exit: u32, + domain_randao: u32, + domain_transfer: u32, } impl ChainSpec { @@ -135,8 +138,11 @@ impl ChainSpec { Domain::Transfer => self.domain_transfer, }; - let fork_version = fork.get_fork_version(epoch); - fork_version * u64::pow(2, 32) + domain_constant + let mut fork_and_domain = [0; 8]; + fork_and_domain.copy_from_slice(&fork.get_fork_version(epoch)); + fork_and_domain.copy_from_slice(&int_to_bytes4(domain_constant)); + + u64::from_le_bytes(fork_and_domain) } /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. @@ -254,6 +260,12 @@ impl ChainSpec { } } +impl Default for ChainSpec { + fn default() -> Self { + Self::foundation() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index 5db5e20a6..dfa0311ef 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -1,7 +1,7 @@ use crate::test_utils::TestRandom; use crate::{Epoch, Hash256}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; @@ -9,7 +9,17 @@ use test_random_derive::TestRandom; /// /// Spec v0.4.0 #[derive( - Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode, TreeHash, TestRandom, + Debug, + Clone, + PartialEq, + Default, + Serialize, + Deserialize, + Hash, + Encode, + Decode, + TreeHash, + TestRandom, )] pub struct Crosslink { pub epoch: Epoch, diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index c4b2b1894..0f1dbfec5 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -1,14 +1,16 @@ use super::Hash256; use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// Contains data obtained from the Eth1 chain. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct Eth1Data { pub deposit_root: Hash256, pub block_hash: Hash256, diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index 4788833bd..d709608d5 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -1,14 +1,16 @@ use super::Eth1Data; use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// A summation of votes for some `Eth1Data`. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive( + Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct Eth1DataVote { pub eth1_data: Eth1Data, pub vote_count: u64, diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index f3b62f5a8..f0e3d1046 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -1,24 +1,41 @@ -use crate::{test_utils::TestRandom, Epoch}; +use crate::{test_utils::TestRandom, ChainSpec, Epoch}; +use int_to_bytes::int_to_bytes4; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// -/// Spec v0.4.0 -#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] +/// Spec v0.5.0 +#[derive( + Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] pub struct Fork { - pub previous_version: u64, - pub current_version: u64, + pub previous_version: [u8; 4], + pub current_version: [u8; 4], pub epoch: Epoch, } impl Fork { + /// Initialize the `Fork` from the genesis parameters in the `spec`. + /// + /// Spec v0.5.0 + pub fn genesis(spec: &ChainSpec) -> Self { + let mut current_version: [u8; 4] = [0; 4]; + current_version.copy_from_slice(&int_to_bytes4(spec.genesis_fork_version)); + + Self { + previous_version: current_version, + current_version, + epoch: spec.genesis_epoch, + } + } + /// Return the fork version of the given ``epoch``. /// - /// Spec v0.4.0 - pub fn get_fork_version(&self, epoch: Epoch) -> u64 { + /// Spec v0.5.0 + pub fn get_fork_version(&self, epoch: Epoch) -> [u8; 4] { if epoch < self.epoch { return self.previous_version; } diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 68dd1c345..70907c29d 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -1,14 +1,14 @@ use crate::test_utils::TestRandom; use crate::{AttestationData, Bitfield, Slot}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.4.0 -#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct PendingAttestation { pub aggregation_bitfield: Bitfield, pub data: AttestationData, diff --git a/eth2/types/src/proposal.rs b/eth2/types/src/proposal.rs index 59d6370e1..36fba5603 100644 --- a/eth2/types/src/proposal.rs +++ b/eth2/types/src/proposal.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::{Hash256, Slot}; use bls::Signature; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::TreeHash; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -10,7 +10,18 @@ use test_random_derive::TestRandom; /// A proposal for some shard or beacon block. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] pub struct Proposal { pub slot: Slot, /// Shard number (spec.beacon_chain_shard_number for beacon chain) diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 26c3d67a7..bc5b8665e 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -1,14 +1,14 @@ use super::Proposal; use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct ProposerSlashing { pub proposer_index: u64, pub proposal_1: Proposal, diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index 56c9dfc2f..bc9b2769a 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -1,6 +1,6 @@ use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield, ChainSpec}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::TreeHash; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -10,7 +10,18 @@ use test_random_derive::TestRandom; /// To be included in an `AttesterSlashing`. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] pub struct SlashableAttestation { /// Lists validator registry indices, not committee indices. pub validator_indices: Vec, diff --git a/eth2/types/src/test_utils/test_random.rs b/eth2/types/src/test_utils/test_random.rs index 3b172463e..cb7abe3a4 100644 --- a/eth2/types/src/test_utils/test_random.rs +++ b/eth2/types/src/test_utils/test_random.rs @@ -51,3 +51,17 @@ where ] } } + +macro_rules! impl_test_random_for_u8_array { + ($len: expr) => { + impl TestRandom for [u8; $len] { + fn random_for_test(rng: &mut T) -> Self { + let mut bytes = [0; $len]; + rng.fill_bytes(&mut bytes); + bytes + } + } + }; +} + +impl_test_random_for_u8_array!(4); diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index af3b18ef4..a46e24e24 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -2,7 +2,7 @@ use super::Slot; use crate::test_utils::TestRandom; use bls::{PublicKey, Signature}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::TreeHash; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -10,7 +10,18 @@ use test_random_derive::TestRandom; /// The data submitted to the deposit contract. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] pub struct Transfer { pub from: u64, pub to: u64, diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 38630a057..5fdfcdd82 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -1,7 +1,7 @@ use crate::{test_utils::TestRandom, Epoch}; use bls::Signature; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz::TreeHash; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -9,7 +9,18 @@ use test_random_derive::TestRandom; /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] pub struct VoluntaryExit { pub epoch: Epoch, pub validator_index: u64, diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 2466605b0..4230a06ea 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -10,4 +10,5 @@ hashing = { path = "../hashing" } hex = "0.3" serde = "1.0" serde_derive = "1.0" +serde_hex = { path = "../serde_hex" } ssz = { path = "../ssz" } diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 9c5ed0375..7b80d3bbf 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -2,7 +2,9 @@ use super::{AggregatePublicKey, Signature}; use bls_aggregates::{ AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature, }; +use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; use ssz::{ decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, }; @@ -82,7 +84,19 @@ impl Serialize for AggregateSignature { where S: Serializer, { - serializer.serialize_bytes(&ssz_encode(self)) + serializer.serialize_str(&hex_encode(ssz_encode(self))) + } +} + +impl<'de> Deserialize<'de> for AggregateSignature { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; + let (obj, _) = <_>::ssz_decode(&bytes[..], 0) + .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; + Ok(obj) } } diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index c85760bbf..5a348f530 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -1,9 +1,8 @@ -use super::serde_vistors::HexVisitor; use super::SecretKey; use bls_aggregates::PublicKey as RawPublicKey; -use hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; use ssz::{ decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, }; @@ -81,7 +80,7 @@ impl Serialize for PublicKey { where S: Serializer, { - serializer.serialize_str(&hex_encode(ssz_encode(self))) + serializer.serialize_str(&hex_encode(self.as_raw().as_bytes())) } } @@ -90,10 +89,10 @@ impl<'de> Deserialize<'de> for PublicKey { where D: Deserializer<'de>, { - let bytes = deserializer.deserialize_str(HexVisitor)?; - let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) - .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; - Ok(pubkey) + let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; + let obj = PublicKey::from_bytes(&bytes[..]) + .map_err(|e| serde::de::Error::custom(format!("invalid pubkey ({:?})", e)))?; + Ok(obj) } } diff --git a/eth2/utils/boolean-bitfield/Cargo.toml b/eth2/utils/boolean-bitfield/Cargo.toml index d94b9f7b1..cf037c5d7 100644 --- a/eth2/utils/boolean-bitfield/Cargo.toml +++ b/eth2/utils/boolean-bitfield/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] +serde_hex = { path = "../serde_hex" } ssz = { path = "../ssz" } bit-vec = "0.5.0" serde = "1.0" diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index a0fce1f0a..443cd06da 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -3,7 +3,10 @@ extern crate ssz; use bit_vec::BitVec; +use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_hex::{encode, PrefixedHexVisitor}; +use ssz::Decodable; use std::cmp; use std::default; @@ -178,11 +181,25 @@ impl ssz::Decodable for BooleanBitfield { } impl Serialize for BooleanBitfield { + /// Serde serialization is compliant the Ethereum YAML test format. fn serialize(&self, serializer: S) -> Result where S: Serializer, { - serializer.serialize_bytes(&ssz::ssz_encode(self)) + serializer.serialize_str(&encode(&ssz::ssz_encode(self))) + } +} + +impl<'de> Deserialize<'de> for BooleanBitfield { + /// Serde serialization is compliant the Ethereum YAML test format. + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; + let (bitfield, _) = <_>::ssz_decode(&bytes[..], 0) + .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; + Ok(bitfield) } } diff --git a/eth2/utils/serde_hex/Cargo.toml b/eth2/utils/serde_hex/Cargo.toml new file mode 100644 index 000000000..b28194dd6 --- /dev/null +++ b/eth2/utils/serde_hex/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "serde_hex" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +serde = "1.0" +hex = "0.3" diff --git a/eth2/utils/serde_hex/src/lib.rs b/eth2/utils/serde_hex/src/lib.rs new file mode 100644 index 000000000..3be20d93f --- /dev/null +++ b/eth2/utils/serde_hex/src/lib.rs @@ -0,0 +1,59 @@ +use hex; +use hex::ToHex; +use serde::de::{self, Visitor}; +use std::fmt; + +pub fn encode>(data: T) -> String { + let mut hex = String::with_capacity(data.as_ref().len() * 2); + + // Writing to a string never errors, so we can unwrap here. + data.write_hex(&mut hex).unwrap(); + + let mut s = "0x".to_string(); + + s.push_str(hex.as_str()); + + s +} + +pub struct PrefixedHexVisitor; + +impl<'de> Visitor<'de> for PrefixedHexVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a hex string with 0x prefix") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + if value.starts_with("0x") { + Ok(hex::decode(&value[2..]) + .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?) + } else { + Err(de::Error::custom("missing 0x prefix")) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn encoding() { + let bytes = vec![0, 255]; + let hex = encode(&bytes); + assert_eq!(hex.as_str(), "0x00ff"); + + let bytes = vec![]; + let hex = encode(&bytes); + assert_eq!(hex.as_str(), "0x"); + + let bytes = vec![1, 2, 3]; + let hex = encode(&bytes); + assert_eq!(hex.as_str(), "0x010203"); + } +} diff --git a/eth2/utils/ssz/src/impl_decode.rs b/eth2/utils/ssz/src/impl_decode.rs index b13cbeb5d..152e36760 100644 --- a/eth2/utils/ssz/src/impl_decode.rs +++ b/eth2/utils/ssz/src/impl_decode.rs @@ -24,11 +24,30 @@ macro_rules! impl_decodable_for_uint { }; } +macro_rules! impl_decodable_for_u8_array { + ($len: expr) => { + impl Decodable for [u8; $len] { + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + if index + $len > bytes.len() { + Err(DecodeError::TooShort) + } else { + let mut array: [u8; $len] = [0; $len]; + array.copy_from_slice(&bytes[index..index + $len]); + + Ok((array, index + $len)) + } + } + } + }; +} + impl_decodable_for_uint!(u16, 16); impl_decodable_for_uint!(u32, 32); impl_decodable_for_uint!(u64, 64); impl_decodable_for_uint!(usize, 64); +impl_decodable_for_u8_array!(4); + impl Decodable for u8 { fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { if index >= bytes.len() { @@ -246,4 +265,12 @@ mod tests { let result: Result<(bool, usize), DecodeError> = decode_ssz(&ssz, 0); assert_eq!(result, Err(DecodeError::Invalid)); } + + #[test] + fn test_decode_u8_array() { + let ssz = vec![0, 1, 2, 3]; + let (result, index): ([u8; 4], usize) = decode_ssz(&ssz, 0).unwrap(); + assert_eq!(index, 4); + assert_eq!(result, [0, 1, 2, 3]); + } } diff --git a/eth2/utils/ssz/src/impl_encode.rs b/eth2/utils/ssz/src/impl_encode.rs index bb1ec42d5..b7d008ccf 100644 --- a/eth2/utils/ssz/src/impl_encode.rs +++ b/eth2/utils/ssz/src/impl_encode.rs @@ -40,12 +40,25 @@ macro_rules! impl_encodable_for_uint { }; } +macro_rules! impl_encodable_for_u8_array { + ($len: expr) => { + impl Encodable for [u8; $len] { + fn ssz_append(&self, s: &mut SszStream) { + let bytes: Vec = self.iter().cloned().collect(); + s.append_encoded_raw(&bytes); + } + } + }; +} + impl_encodable_for_uint!(u8, 8); impl_encodable_for_uint!(u16, 16); impl_encodable_for_uint!(u32, 32); impl_encodable_for_uint!(u64, 64); impl_encodable_for_uint!(usize, 64); +impl_encodable_for_u8_array!(4); + impl Encodable for bool { fn ssz_append(&self, s: &mut SszStream) { let byte = if *self { 0b1000_0000 } else { 0b0000_0000 }; @@ -77,6 +90,7 @@ where #[cfg(test)] mod tests { use super::*; + use crate::ssz_encode; #[test] fn test_ssz_encode_h256() { @@ -226,4 +240,15 @@ mod tests { ssz.append(&x); assert_eq!(ssz.drain(), vec![0b1000_0000]); } + + #[test] + fn test_ssz_encode_u8_array() { + let x: [u8; 4] = [0, 1, 7, 8]; + let ssz = ssz_encode(&x); + assert_eq!(ssz, vec![0, 1, 7, 8]); + + let x: [u8; 4] = [255, 255, 255, 255]; + let ssz = ssz_encode(&x); + assert_eq!(ssz, vec![255, 255, 255, 255]); + } } From ef86948259c11ba2129982dd53e0619ed2cdedd1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 13:32:23 +1100 Subject: [PATCH 062/154] Add bones of YAML state test parsing. Is incomplete, need to update all our types to v0.5.0 first --- eth2/state_processing/specs/example.yml | 347 ++++++++++++++++++++++++ eth2/state_processing/tests/tests.rs | 42 +++ 2 files changed, 389 insertions(+) create mode 100644 eth2/state_processing/specs/example.yml create mode 100644 eth2/state_processing/tests/tests.rs diff --git a/eth2/state_processing/specs/example.yml b/eth2/state_processing/specs/example.yml new file mode 100644 index 000000000..95b749bc4 --- /dev/null +++ b/eth2/state_processing/specs/example.yml @@ -0,0 +1,347 @@ +title: Sanity tests +summary: Basic sanity checks from phase 0 spec pythonization. All tests are run with + `verify_signatures` as set to False. +test_suite: beacon_state +fork: tchaikovsky +version: v0.5.0 +test_cases: +- name: test_empty_block_transition + config: {SHARD_COUNT: 8, TARGET_COMMITTEE_SIZE: 4, MAX_BALANCE_CHURN_QUOTIENT: 32, + MAX_INDICES_PER_SLASHABLE_VOTE: 4096, MAX_EXIT_DEQUEUES_PER_EPOCH: 4, SHUFFLE_ROUND_COUNT: 90, + DEPOSIT_CONTRACT_TREE_DEPTH: 32, MIN_DEPOSIT_AMOUNT: 1000000000, MAX_DEPOSIT_AMOUNT: 32000000000, + FORK_CHOICE_BALANCE_INCREMENT: 1000000000, EJECTION_BALANCE: 16000000000, GENESIS_FORK_VERSION: 0, + GENESIS_SLOT: 4294967296, GENESIS_EPOCH: 536870912, GENESIS_START_SHARD: 0, BLS_WITHDRAWAL_PREFIX_BYTE: 0, + SECONDS_PER_SLOT: 6, MIN_ATTESTATION_INCLUSION_DELAY: 2, SLOTS_PER_EPOCH: 8, MIN_SEED_LOOKAHEAD: 1, + ACTIVATION_EXIT_DELAY: 4, EPOCHS_PER_ETH1_VOTING_PERIOD: 16, SLOTS_PER_HISTORICAL_ROOT: 64, + MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256, PERSISTENT_COMMITTEE_PERIOD: 2048, LATEST_RANDAO_MIXES_LENGTH: 64, + LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 64, LATEST_SLASHED_EXIT_LENGTH: 64, BASE_REWARD_QUOTIENT: 32, + WHISTLEBLOWER_REWARD_QUOTIENT: 512, ATTESTATION_INCLUSION_REWARD_QUOTIENT: 8, + INACTIVITY_PENALTY_QUOTIENT: 16777216, MIN_PENALTY_QUOTIENT: 32, MAX_PROPOSER_SLASHINGS: 16, + MAX_ATTESTER_SLASHINGS: 1, MAX_ATTESTATIONS: 128, MAX_DEPOSITS: 16, MAX_VOLUNTARY_EXITS: 16, + MAX_TRANSFERS: 16, DOMAIN_BEACON_BLOCK: 0, DOMAIN_RANDAO: 1, DOMAIN_ATTESTATION: 2, + DOMAIN_DEPOSIT: 3, DOMAIN_VOLUNTARY_EXIT: 4, DOMAIN_TRANSFER: 5} + verify_signatures: false + initial_state: + slot: 4294967296 + genesis_time: 0 + fork: {previous_version: 0, current_version: 0, epoch: 536870912} + validator_registry: + - {pubkey: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x0a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x0c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x0d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x0e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x0f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x150000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x160000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x170000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x1a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x1c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x1d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x1e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + - {pubkey: '0x1f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', + withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222', + activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615, + initiated_exit: false, slashed: false} + validator_balances: [32000000000, 32000000000, 32000000000, 32000000000, 32000000000, + 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, + 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, + 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, + 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000, + 32000000000, 32000000000, 32000000000] + validator_registry_update_epoch: 536870912 + latest_randao_mixes: ['0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000'] + previous_shuffling_start_shard: 0 + current_shuffling_start_shard: 0 + previous_shuffling_epoch: 536870912 + current_shuffling_epoch: 536870912 + previous_shuffling_seed: '0x0000000000000000000000000000000000000000000000000000000000000000' + current_shuffling_seed: '0x94ab448e948e6d501a2b48c1e9a0946f871100969f6fa70a990acf2348c9b185' + previous_epoch_attestations: [] + current_epoch_attestations: [] + previous_justified_epoch: 536870912 + current_justified_epoch: 536870912 + previous_justified_root: '0x0000000000000000000000000000000000000000000000000000000000000000' + current_justified_root: '0x0000000000000000000000000000000000000000000000000000000000000000' + justification_bitfield: 0 + finalized_epoch: 536870912 + finalized_root: '0x0000000000000000000000000000000000000000000000000000000000000000' + latest_crosslinks: + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + - {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'} + latest_block_roots: ['0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000'] + latest_state_roots: ['0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', + '0x0000000000000000000000000000000000000000000000000000000000000000'] + latest_active_index_roots: ['0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', + '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42'] + latest_slashed_balances: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + latest_block_header: {slot: 4294967296, previous_block_root: '0x0000000000000000000000000000000000000000000000000000000000000000', + state_root: '0x0000000000000000000000000000000000000000000000000000000000000000', + block_body_root: '0x5359b62990beb1d78e1cec479f5a4d80af84709886a8e16c535dff0556dc0e2d', + signature: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'} + historical_roots: [] + latest_eth1_data: {deposit_root: '0xb05de6a9059df0c9a2ab5f76708d256941dfe9eb89e6fda549b30713087d2a5e', + block_hash: '0x0000000000000000000000000000000000000000000000000000000000000000'} + eth1_data_votes: [] + deposit_index: 32 + blocks: + - slot: 4294967297 + previous_block_root: '0x92ed652508d2b4c109a857107101716b18e257e7ce0d199d4b16232956e9e27e' + state_root: '0x0000000000000000000000000000000000000000000000000000000000000000' + body: + randao_reveal: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' + eth1_data: {deposit_root: '0x0000000000000000000000000000000000000000000000000000000000000000', + block_hash: '0x0000000000000000000000000000000000000000000000000000000000000000'} + proposer_slashings: [] + attester_slashings: [] + attestations: [] + deposits: [] + voluntary_exits: [] + transfers: [] + signature: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' + expected_state: {slot: 4294967297} diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs new file mode 100644 index 000000000..9cee7b34c --- /dev/null +++ b/eth2/state_processing/tests/tests.rs @@ -0,0 +1,42 @@ +use serde_derive::Deserialize; +use types::*; + +#[derive(Debug, Deserialize)] +pub struct TestCase { + pub name: String, + pub config: ChainSpec, + pub verify_signatures: bool, + pub initial_state: BeaconState, + pub blocks: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct TestDoc { + pub title: String, + pub summary: String, + pub fork: String, + pub version: String, + pub test_cases: Vec, +} + +#[test] +#[ignore] +fn yaml() { + use serde_yaml; + use std::{fs::File, io::prelude::*, path::PathBuf}; + + let mut file = { + let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + file_path_buf.push("specs/example.yml"); + + File::open(file_path_buf).unwrap() + }; + + let mut yaml_str = String::new(); + + file.read_to_string(&mut yaml_str).unwrap(); + + let yaml_str = yaml_str.to_lowercase(); + + let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap(); +} From f9964ebd8b04439ff222db5b2e803413141b73ba Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 13:33:07 +1100 Subject: [PATCH 063/154] Update Fork struct to v0.5.0 --- beacon_node/src/main.rs | 6 +-- eth2/types/src/chain_spec.rs | 35 ++++++++++++++-- eth2/types/src/fork.rs | 45 +++++++++++++++++++++ validator_client/src/duties/epoch_duties.rs | 4 +- 4 files changed, 79 insertions(+), 11 deletions(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index c3182c789..eacbffa3e 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -119,11 +119,7 @@ fn main() { // Get domain from genesis fork_version spec.genesis_epoch, Domain::Deposit, - &Fork { - previous_version: spec.genesis_fork_version, - current_version: spec.genesis_fork_version, - epoch: spec.genesis_epoch, - }, + &Fork::genesis(&spec), ), ), }, diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 108516695..ae521cc92 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -1,4 +1,4 @@ -use crate::{Address, Epoch, Fork, Hash256, Slot}; +use crate::*; use bls::Signature; use int_to_bytes::int_to_bytes4; use serde_derive::Deserialize; @@ -127,7 +127,7 @@ impl ChainSpec { /// Get the domain number that represents the fork meta and signature domain. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { let domain_constant = match domain { Domain::Deposit => self.domain_deposit, @@ -138,9 +138,11 @@ impl ChainSpec { Domain::Transfer => self.domain_transfer, }; + let mut bytes: Vec = fork.get_fork_version(epoch).to_vec(); + bytes.append(&mut int_to_bytes4(domain_constant)); + let mut fork_and_domain = [0; 8]; - fork_and_domain.copy_from_slice(&fork.get_fork_version(epoch)); - fork_and_domain.copy_from_slice(&int_to_bytes4(domain_constant)); + fork_and_domain.copy_from_slice(&bytes); u64::from_le_bytes(fork_and_domain) } @@ -269,9 +271,34 @@ impl Default for ChainSpec { #[cfg(test)] mod tests { use super::*; + use int_to_bytes::int_to_bytes8; #[test] fn test_foundation_spec_can_be_constructed() { let _ = ChainSpec::foundation(); } + + fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) { + let fork = Fork::genesis(&spec); + let epoch = Epoch::new(0); + + let domain = spec.get_domain(epoch, domain_type, &fork); + + let mut expected = fork.get_fork_version(epoch).to_vec(); + expected.append(&mut int_to_bytes4(raw_domain)); + + assert_eq!(int_to_bytes8(domain), expected); + } + + #[test] + fn test_get_domain() { + let spec = ChainSpec::foundation(); + + test_domain(Domain::Deposit, spec.domain_deposit, &spec); + test_domain(Domain::Attestation, spec.domain_attestation, &spec); + test_domain(Domain::Proposal, spec.domain_proposal, &spec); + test_domain(Domain::Exit, spec.domain_exit, &spec); + test_domain(Domain::Randao, spec.domain_randao, &spec); + test_domain(Domain::Transfer, spec.domain_transfer, &spec); + } } diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index f0e3d1046..b780b95ef 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -48,4 +48,49 @@ mod tests { use super::*; ssz_tests!(Fork); + + fn test_genesis(version: u32, epoch: Epoch) { + let mut spec = ChainSpec::foundation(); + + spec.genesis_fork_version = version; + spec.genesis_epoch = epoch; + + let fork = Fork::genesis(&spec); + + assert_eq!(fork.epoch, spec.genesis_epoch, "epoch incorrect"); + assert_eq!( + fork.previous_version, fork.current_version, + "previous and current are not identical" + ); + assert_eq!( + fork.current_version, + version.to_le_bytes(), + "current version incorrect" + ); + } + + #[test] + fn genesis() { + test_genesis(0, Epoch::new(0)); + test_genesis(9, Epoch::new(11)); + test_genesis(2_u32.pow(31), Epoch::new(2_u64.pow(63))); + test_genesis(u32::max_value(), Epoch::max_value()); + } + + #[test] + fn get_fork_version() { + let previous_version = [1; 4]; + let current_version = [2; 4]; + let epoch = Epoch::new(10); + + let fork = Fork { + previous_version, + current_version, + epoch, + }; + + assert_eq!(fork.get_fork_version(epoch - 1), previous_version); + assert_eq!(fork.get_fork_version(epoch), current_version); + assert_eq!(fork.get_fork_version(epoch + 1), current_version); + } } diff --git a/validator_client/src/duties/epoch_duties.rs b/validator_client/src/duties/epoch_duties.rs index 35668b4a9..71f5f26ab 100644 --- a/validator_client/src/duties/epoch_duties.rs +++ b/validator_client/src/duties/epoch_duties.rs @@ -81,8 +81,8 @@ impl DutiesReader for EpochDutiesMap { // // It will almost certainly cause signatures to fail verification. Ok(Fork { - previous_version: 0, - current_version: 0, + previous_version: [0; 4], + current_version: [0; 4], epoch: Epoch::new(0), }) } From 8050ed7a2600ef17bab8c1cc43305dbad08456ec Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 14:33:40 +1100 Subject: [PATCH 064/154] Fast-forward unchanged 0.4.0 structs to 0.5.0 --- eth2/types/src/crosslink.rs | 2 +- eth2/types/src/eth1_data.rs | 2 +- eth2/types/src/eth1_data_vote.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index dfa0311ef..f91680c75 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -7,7 +7,7 @@ use test_random_derive::TestRandom; /// Specifies the block hash for a shard at an epoch. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, Clone, diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index 0f1dbfec5..deced19fb 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -7,7 +7,7 @@ use test_random_derive::TestRandom; /// Contains data obtained from the Eth1 chain. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index d709608d5..2f3a1ade1 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -7,7 +7,7 @@ use test_random_derive::TestRandom; /// A summation of votes for some `Eth1Data`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] From 20a439101e57dd55293721c01a7024384ccc6d10 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 15:19:17 +1100 Subject: [PATCH 065/154] Update "attestation" family of structs in types Also adds/splits up some testing builders. --- eth2/types/src/attestation_data.rs | 17 +++-- .../src/attestation_data_and_custody_bit.rs | 2 +- eth2/types/src/slashable_attestation.rs | 14 ++-- eth2/types/src/test_utils/mod.rs | 4 + .../test_utils/testing_attestation_builder.rs | 38 +--------- .../testing_attestation_data_builder.rs | 66 ++++++++++++++++ .../testing_attester_slashing_builder.rs | 48 +++++------- .../testing_beacon_state_builder.rs | 76 ++----------------- .../testing_pending_attestation_builder.rs | 55 ++++++++++++++ 9 files changed, 175 insertions(+), 145 deletions(-) create mode 100644 eth2/types/src/test_utils/testing_attestation_data_builder.rs create mode 100644 eth2/types/src/test_utils/testing_pending_attestation_builder.rs diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 791ba00d2..4a6b57823 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -8,7 +8,7 @@ use test_random_derive::TestRandom; /// The data upon which an attestation is based. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, Clone, @@ -24,14 +24,19 @@ use test_random_derive::TestRandom; SignedRoot, )] pub struct AttestationData { + // LMD GHOST vote pub slot: Slot, - pub shard: u64, pub beacon_block_root: Hash256, - pub epoch_boundary_root: Hash256, + + // FFG Vote + pub source_epoch: Epoch, + pub source_root: Hash256, + pub target_root: Hash256, + + // Crosslink Vote + pub shard: u64, + pub previous_crosslink: Crosslink, pub crosslink_data_root: Hash256, - pub latest_crosslink: Crosslink, - pub justified_epoch: Epoch, - pub justified_block_root: Hash256, } impl Eq for AttestationData {} diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 020b07d28..2cc6bc80c 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -6,7 +6,7 @@ use ssz_derive::{Decode, Encode, TreeHash}; /// Used for pairing an attestation with a proof-of-custody. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)] pub struct AttestationDataAndCustodyBit { pub data: AttestationData, diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index bc9b2769a..05c41a72b 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -9,7 +9,7 @@ use test_random_derive::TestRandom; /// /// To be included in an `AttesterSlashing`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, PartialEq, @@ -33,17 +33,17 @@ pub struct SlashableAttestation { impl SlashableAttestation { /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn is_double_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { self.data.slot.epoch(spec.slots_per_epoch) == other.data.slot.epoch(spec.slots_per_epoch) } /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn is_surround_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { - let source_epoch_1 = self.data.justified_epoch; - let source_epoch_2 = other.data.justified_epoch; + let source_epoch_1 = self.data.source_epoch; + let source_epoch_2 = other.data.source_epoch; let target_epoch_1 = self.data.slot.epoch(spec.slots_per_epoch); let target_epoch_2 = other.data.slot.epoch(spec.slots_per_epoch); @@ -134,14 +134,14 @@ mod tests { fn create_slashable_attestation( slot_factor: u64, - justified_epoch: u64, + source_epoch: u64, spec: &ChainSpec, ) -> SlashableAttestation { let mut rng = XorShiftRng::from_seed([42; 16]); let mut slashable_vote = SlashableAttestation::random_for_test(&mut rng); slashable_vote.data.slot = Slot::new(slot_factor * spec.slots_per_epoch); - slashable_vote.data.justified_epoch = Epoch::new(justified_epoch); + slashable_vote.data.source_epoch = Epoch::new(source_epoch); slashable_vote } } diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 9d04d1ca7..bc8da0548 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -4,10 +4,12 @@ mod generate_deterministic_keypairs; mod keypairs_file; mod test_random; mod testing_attestation_builder; +mod testing_attestation_data_builder; mod testing_attester_slashing_builder; mod testing_beacon_block_builder; mod testing_beacon_state_builder; mod testing_deposit_builder; +mod testing_pending_attestation_builder; mod testing_proposer_slashing_builder; mod testing_transfer_builder; mod testing_voluntary_exit_builder; @@ -17,10 +19,12 @@ pub use keypairs_file::KeypairsFile; pub use rand::{prng::XorShiftRng, SeedableRng}; pub use test_random::TestRandom; pub use testing_attestation_builder::TestingAttestationBuilder; +pub use testing_attestation_data_builder::TestingAttestationDataBuilder; pub use testing_attester_slashing_builder::TestingAttesterSlashingBuilder; pub use testing_beacon_block_builder::TestingBeaconBlockBuilder; pub use testing_beacon_state_builder::{keypairs_path, TestingBeaconStateBuilder}; pub use testing_deposit_builder::TestingDepositBuilder; +pub use testing_pending_attestation_builder::TestingPendingAttestationBuilder; pub use testing_proposer_slashing_builder::TestingProposerSlashingBuilder; pub use testing_transfer_builder::TestingTransferBuilder; pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; diff --git a/eth2/types/src/test_utils/testing_attestation_builder.rs b/eth2/types/src/test_utils/testing_attestation_builder.rs index 8c86d756d..60624b48d 100644 --- a/eth2/types/src/test_utils/testing_attestation_builder.rs +++ b/eth2/types/src/test_utils/testing_attestation_builder.rs @@ -1,3 +1,4 @@ +use crate::test_utils::TestingAttestationDataBuilder; use crate::*; use ssz::TreeHash; @@ -18,31 +19,7 @@ impl TestingAttestationBuilder { shard: u64, spec: &ChainSpec, ) -> Self { - let current_epoch = state.current_epoch(spec); - let previous_epoch = state.previous_epoch(spec); - - let is_previous_epoch = - state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); - - let justified_epoch = if is_previous_epoch { - state.previous_justified_epoch - } else { - state.justified_epoch - }; - - let epoch_boundary_root = if is_previous_epoch { - *state - .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - } else { - *state - .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - }; - - let justified_block_root = *state - .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap(); + let data_builder = TestingAttestationDataBuilder::new(state, shard, slot, spec); let mut aggregation_bitfield = Bitfield::new(); let mut custody_bitfield = Bitfield::new(); @@ -54,16 +31,7 @@ impl TestingAttestationBuilder { let attestation = Attestation { aggregation_bitfield, - data: AttestationData { - slot, - shard, - beacon_block_root: *state.get_block_root(slot, spec).unwrap(), - epoch_boundary_root, - crosslink_data_root: Hash256::zero(), - latest_crosslink: state.latest_crosslinks[shard as usize].clone(), - justified_epoch, - justified_block_root, - }, + data: data_builder.build(), custody_bitfield, aggregate_signature: AggregateSignature::new(), }; diff --git a/eth2/types/src/test_utils/testing_attestation_data_builder.rs b/eth2/types/src/test_utils/testing_attestation_data_builder.rs new file mode 100644 index 000000000..f31de2fbd --- /dev/null +++ b/eth2/types/src/test_utils/testing_attestation_data_builder.rs @@ -0,0 +1,66 @@ +use crate::*; + +/// Builds an `AttestationData` to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** +pub struct TestingAttestationDataBuilder { + data: AttestationData, +} + +impl TestingAttestationDataBuilder { + /// Configures a new `AttestationData` which attests to all of the same parameters as the + /// state. + pub fn new(state: &BeaconState, shard: u64, slot: Slot, spec: &ChainSpec) -> Self { + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let is_previous_epoch = + state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); + + let source_epoch = if is_previous_epoch { + state.previous_justified_epoch + } else { + state.justified_epoch + }; + + let target_root = if is_previous_epoch { + *state + .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + } else { + *state + .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + }; + + let source_root = *state + .get_block_root(source_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap(); + + let data = AttestationData { + // LMD GHOST vote + slot, + beacon_block_root: *state.get_block_root(slot, spec).unwrap(), + + // FFG Vote + source_epoch, + source_root, + target_root, + + // Crosslink vote + shard, + previous_crosslink: Crosslink { + epoch: slot.epoch(spec.slots_per_epoch), + crosslink_data_root: spec.zero_hash, + }, + crosslink_data_root: spec.zero_hash, + }; + + Self { data } + } + + /// Returns the `AttestationData`, consuming the builder. + pub fn build(self) -> AttestationData { + self.data + } +} diff --git a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs index 92c7fe814..fcaa3285b 100644 --- a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs @@ -23,45 +23,39 @@ impl TestingAttesterSlashingBuilder { { let double_voted_slot = Slot::new(0); let shard = 0; - let justified_epoch = Epoch::new(0); let epoch = Epoch::new(0); let hash_1 = Hash256::from_low_u64_le(1); let hash_2 = Hash256::from_low_u64_le(2); + let data_1 = AttestationData { + slot: double_voted_slot, + beacon_block_root: hash_1, + source_epoch: epoch, + source_root: hash_1, + target_root: hash_1, + shard, + previous_crosslink: Crosslink { + epoch, + crosslink_data_root: hash_1, + }, + crosslink_data_root: hash_1, + }; + + let data_2 = AttestationData { + beacon_block_root: hash_2, + ..data_1.clone() + }; + let mut slashable_attestation_1 = SlashableAttestation { validator_indices: validator_indices.to_vec(), - data: AttestationData { - slot: double_voted_slot, - shard, - beacon_block_root: hash_1, - epoch_boundary_root: hash_1, - crosslink_data_root: hash_1, - latest_crosslink: Crosslink { - epoch, - crosslink_data_root: hash_1, - }, - justified_epoch, - justified_block_root: hash_1, - }, + data: data_1, custody_bitfield: Bitfield::new(), aggregate_signature: AggregateSignature::new(), }; let mut slashable_attestation_2 = SlashableAttestation { validator_indices: validator_indices.to_vec(), - data: AttestationData { - slot: double_voted_slot, - shard, - beacon_block_root: hash_2, - epoch_boundary_root: hash_2, - crosslink_data_root: hash_2, - latest_crosslink: Crosslink { - epoch, - crosslink_data_root: hash_2, - }, - justified_epoch, - justified_block_root: hash_2, - }, + data: data_2, custody_bitfield: Bitfield::new(), aggregate_signature: AggregateSignature::new(), }; diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index d3033634a..5b96dc455 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -1,5 +1,6 @@ use super::{generate_deterministic_keypairs, KeypairsFile}; use crate::beacon_state::BeaconStateBuilder; +use crate::test_utils::TestingPendingAttestationBuilder; use crate::*; use bls::get_withdrawal_credentials; use dirs; @@ -227,76 +228,13 @@ impl TestingBeaconStateBuilder { .clone(); for (committee, shard) in committees { - state - .latest_attestations - .push(committee_to_pending_attestation( - state, &committee, shard, slot, spec, - )) + let mut builder = TestingPendingAttestationBuilder::new(state, shard, slot, spec); + // The entire committee should have signed the pending attestation. + let signers = vec![true; committee.len()]; + builder.add_committee_participation(signers); + + state.latest_attestations.push(builder.build()) } } } } - -/// Maps a committee to a `PendingAttestation`. -/// -/// The committee will be signed by all validators in the committee. -fn committee_to_pending_attestation( - state: &BeaconState, - committee: &[usize], - shard: u64, - slot: Slot, - spec: &ChainSpec, -) -> PendingAttestation { - let current_epoch = state.current_epoch(spec); - let previous_epoch = state.previous_epoch(spec); - - let mut aggregation_bitfield = Bitfield::new(); - let mut custody_bitfield = Bitfield::new(); - - for (i, _) in committee.iter().enumerate() { - aggregation_bitfield.set(i, true); - custody_bitfield.set(i, true); - } - - let is_previous_epoch = - state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); - - let justified_epoch = if is_previous_epoch { - state.previous_justified_epoch - } else { - state.justified_epoch - }; - - let epoch_boundary_root = if is_previous_epoch { - *state - .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - } else { - *state - .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - }; - - let justified_block_root = *state - .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap(); - - PendingAttestation { - aggregation_bitfield, - data: AttestationData { - slot, - shard, - beacon_block_root: *state.get_block_root(slot, spec).unwrap(), - epoch_boundary_root, - crosslink_data_root: Hash256::zero(), - latest_crosslink: Crosslink { - epoch: slot.epoch(spec.slots_per_epoch), - crosslink_data_root: Hash256::zero(), - }, - justified_epoch, - justified_block_root, - }, - custody_bitfield, - inclusion_slot: slot + spec.min_attestation_inclusion_delay, - } -} diff --git a/eth2/types/src/test_utils/testing_pending_attestation_builder.rs b/eth2/types/src/test_utils/testing_pending_attestation_builder.rs new file mode 100644 index 000000000..655b3d1e8 --- /dev/null +++ b/eth2/types/src/test_utils/testing_pending_attestation_builder.rs @@ -0,0 +1,55 @@ +use crate::test_utils::TestingAttestationDataBuilder; +use crate::*; + +/// Builds an `AttesterSlashing` to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** +pub struct TestingPendingAttestationBuilder { + pending_attestation: PendingAttestation, +} + +impl TestingPendingAttestationBuilder { + /// Create a new valid* `PendingAttestation` for the given parameters. + /// + /// The `inclusion_slot` will be set to be the earliest possible slot the `Attestation` could + /// have been included (`slot + MIN_ATTESTATION_INCLUSION_DELAY`). + /// + /// * The aggregation and custody bitfields will all be empty, they need to be set with + /// `Self::add_committee_participation`. + pub fn new(state: &BeaconState, shard: u64, slot: Slot, spec: &ChainSpec) -> Self { + let data_builder = TestingAttestationDataBuilder::new(state, shard, slot, spec); + + let pending_attestation = PendingAttestation { + aggregation_bitfield: Bitfield::new(), + data: data_builder.build(), + custody_bitfield: Bitfield::new(), + inclusion_slot: slot + spec.min_attestation_inclusion_delay, + }; + + Self { + pending_attestation, + } + } + + /// Sets the committee participation in the `PendingAttestation`. + /// + /// The `PendingAttestation` will appear to be signed by each committee member who's value in + /// `signers` is true. + pub fn add_committee_participation(&mut self, signers: Vec) { + let mut aggregation_bitfield = Bitfield::new(); + let mut custody_bitfield = Bitfield::new(); + + for (i, signed) in signers.iter().enumerate() { + aggregation_bitfield.set(i, *signed); + custody_bitfield.set(i, false); // Fixed to `false` for phase 0. + } + + self.pending_attestation.aggregation_bitfield = aggregation_bitfield; + self.pending_attestation.custody_bitfield = custody_bitfield; + } + + /// Returns the `PendingAttestation`, consuming the builder. + pub fn build(self) -> PendingAttestation { + self.pending_attestation + } +} From 2295322e3c3d1921b205fab3cf13ec5bde5ad85e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 15:33:14 +1100 Subject: [PATCH 066/154] Update DepositInput to spec v0.5.0 Also modifies the API for creating a proof of possession and adds a test --- eth2/types/src/deposit_data.rs | 2 +- eth2/types/src/deposit_input.rs | 43 +++++++++++++------ .../src/test_utils/testing_deposit_builder.rs | 9 ++-- 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 9d6c1bda7..a1e30032f 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -7,7 +7,7 @@ use test_random_derive::TestRandom; /// Data generated by the deposit contract. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct DepositData { pub amount: u64, diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 9a9031901..9da53a042 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -9,7 +9,7 @@ use test_random_derive::TestRandom; /// The data supplied by the user to the deposit contract. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, PartialEq, @@ -31,25 +31,23 @@ pub struct DepositInput { impl DepositInput { /// Generate the 'proof_of_posession' signature for a given DepositInput details. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn create_proof_of_possession( - keypair: &Keypair, - withdrawal_credentials: &Hash256, - domain: u64, + &self, + secret_key: &SecretKey, + epoch: Epoch, + fork: &Fork, + spec: &ChainSpec, ) -> Signature { - let signable_deposit_input = DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials: withdrawal_credentials.clone(), - proof_of_possession: Signature::empty_signature(), - }; - let msg = signable_deposit_input.signed_root(); + let msg = self.signed_root(); + let domain = spec.get_domain(epoch, Domain::Deposit, fork); - Signature::new(msg.as_slice(), domain, &keypair.sk) + Signature::new(msg.as_slice(), domain, secret_key) } /// Verify that proof-of-possession is valid. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn validate_proof_of_possession( &self, epoch: Epoch, @@ -68,4 +66,23 @@ mod tests { use super::*; ssz_tests!(DepositInput); + + #[test] + fn can_create_and_validate() { + let spec = ChainSpec::foundation(); + let fork = Fork::genesis(&spec); + let keypair = Keypair::random(); + let epoch = Epoch::new(0); + + let mut deposit_input = DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), + proof_of_possession: Signature::empty_signature(), + }; + + deposit_input.proof_of_possession = + deposit_input.create_proof_of_possession(&keypair.sk, epoch, &fork, &spec); + + assert!(deposit_input.validate_proof_of_possession(epoch, &fork, &spec)); + } } diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index 729311468..90c8d325d 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -46,15 +46,18 @@ impl TestingDepositBuilder { ); let epoch = state.current_epoch(spec); - let domain = spec.get_domain(epoch, Domain::Deposit, &state.fork); self.deposit.deposit_data.deposit_input.pubkey = keypair.pk.clone(); self.deposit .deposit_data .deposit_input .withdrawal_credentials = withdrawal_credentials.clone(); - self.deposit.deposit_data.deposit_input.proof_of_possession = - DepositInput::create_proof_of_possession(&keypair, &withdrawal_credentials, domain); + + self.deposit.deposit_data.deposit_input.proof_of_possession = self + .deposit + .deposit_data + .deposit_input + .create_proof_of_possession(&keypair.sk, epoch, &state.fork, spec); } /// Builds the deposit, consuming the builder. From d84850b892ba2bc5ffaafb4d9b6f267e64ad88c5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 15:40:28 +1100 Subject: [PATCH 067/154] Add `BeaconBlockHeader` type. --- eth2/types/src/beacon_block_header.rs | 38 +++++++++++++++++++++++++++ eth2/types/src/lib.rs | 2 ++ 2 files changed, 40 insertions(+) create mode 100644 eth2/types/src/beacon_block_header.rs diff --git a/eth2/types/src/beacon_block_header.rs b/eth2/types/src/beacon_block_header.rs new file mode 100644 index 000000000..029c7e56b --- /dev/null +++ b/eth2/types/src/beacon_block_header.rs @@ -0,0 +1,38 @@ +use crate::test_utils::TestRandom; +use crate::*; +use bls::Signature; +use rand::RngCore; +use serde_derive::{Deserialize, Serialize}; +use ssz::TreeHash; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use test_random_derive::TestRandom; + +/// A header of a `BeaconBlock`. +/// +/// Spec v0.5.0 +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, + SignedRoot, +)] +pub struct BeaconBlockHeader { + pub slot: Slot, + pub previous_block_root: Hash256, + pub state_root: Hash256, + pub block_body_root: Hash256, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_tests!(BeaconBlockHeader); +} diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 7b1d84837..2fcb3237d 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -9,6 +9,7 @@ pub mod attestation_data_and_custody_bit; pub mod attester_slashing; pub mod beacon_block; pub mod beacon_block_body; +pub mod beacon_block_header; pub mod beacon_state; pub mod chain_spec; pub mod crosslink; @@ -43,6 +44,7 @@ pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block_body::BeaconBlockBody; +pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_state::{BeaconState, Error as BeaconStateError, RelativeEpoch}; pub use crate::chain_spec::{ChainSpec, Domain}; pub use crate::crosslink::Crosslink; From 4ffdfbc9938f3fa253a4d6bf944e2e24e82d7ef0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 15:48:33 +1100 Subject: [PATCH 068/154] Update `Validator` to 0.5.0, add tests --- eth2/types/src/validator.rs | 69 +++++++++++++++++++++++++++---------- 1 file changed, 50 insertions(+), 19 deletions(-) diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 6d1936bfd..f57261175 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -6,7 +6,7 @@ use test_random_derive::TestRandom; /// Information about a `BeaconChain` validator. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] pub struct Validator { pub pubkey: PublicKey, @@ -53,29 +53,60 @@ impl Default for Validator { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; #[test] - fn test_validator_can_be_active() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let mut validator = Validator::random_for_test(&mut rng); + fn default() { + let v = Validator::default(); - let activation_epoch = u64::random_for_test(&mut rng); - let exit_epoch = activation_epoch + 234; + let epoch = Epoch::new(0); - validator.activation_epoch = Epoch::from(activation_epoch); - validator.exit_epoch = Epoch::from(exit_epoch); + assert_eq!(v.is_active_at(epoch), false); + assert_eq!(v.is_exited_at(epoch), false); + assert_eq!(v.is_withdrawable_at(epoch), false); + assert_eq!(v.initiated_exit, false); + assert_eq!(v.slashed, false); + } - for slot in (activation_epoch - 100)..(exit_epoch + 100) { - let slot = Epoch::from(slot); - if slot < activation_epoch { - assert!(!validator.is_active_at(slot)); - } else if slot >= exit_epoch { - assert!(!validator.is_active_at(slot)); - } else { - assert!(validator.is_active_at(slot)); - } - } + #[test] + fn is_active_at() { + let epoch = Epoch::new(10); + + let v = Validator { + activation_epoch: epoch, + ..Validator::default() + }; + + assert_eq!(v.is_active_at(epoch - 1), false); + assert_eq!(v.is_active_at(epoch), true); + assert_eq!(v.is_active_at(epoch + 1), true); + } + + #[test] + fn is_exited_at() { + let epoch = Epoch::new(10); + + let v = Validator { + exit_epoch: epoch, + ..Validator::default() + }; + + assert_eq!(v.is_exited_at(epoch - 1), false); + assert_eq!(v.is_exited_at(epoch), true); + assert_eq!(v.is_exited_at(epoch + 1), true); + } + + #[test] + fn is_withdrawable_at() { + let epoch = Epoch::new(10); + + let v = Validator { + withdrawable_epoch: epoch, + ..Validator::default() + }; + + assert_eq!(v.is_withdrawable_at(epoch - 1), false); + assert_eq!(v.is_withdrawable_at(epoch), true); + assert_eq!(v.is_withdrawable_at(epoch + 1), true); } ssz_tests!(Validator); From 142aaae8ce6cedaefdd57a670568bae22e712369 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 15:51:57 +1100 Subject: [PATCH 069/154] Fast-forward PendingAttestation to v0.5.0 --- eth2/types/src/pending_attestation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 70907c29d..ca50b6d1c 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -7,7 +7,7 @@ use test_random_derive::TestRandom; /// An attestation that has been included in the state but not yet fully processed. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct PendingAttestation { pub aggregation_bitfield: Bitfield, From 15c3e5eab586feccd1082664395cd6227c6c8af2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 15:52:17 +1100 Subject: [PATCH 070/154] Add HistoricalBatch type --- eth2/types/src/historical_batch.rs | 22 ++++++++++++++++++++++ eth2/types/src/lib.rs | 2 ++ 2 files changed, 24 insertions(+) create mode 100644 eth2/types/src/historical_batch.rs diff --git a/eth2/types/src/historical_batch.rs b/eth2/types/src/historical_batch.rs new file mode 100644 index 000000000..77859ed1a --- /dev/null +++ b/eth2/types/src/historical_batch.rs @@ -0,0 +1,22 @@ +use crate::test_utils::TestRandom; +use crate::Hash256; +use rand::RngCore; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode, TreeHash}; +use test_random_derive::TestRandom; + +/// Historical block and state roots. +/// +/// Spec v0.5.0 +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +pub struct HistoricalBatch { + pub block_roots: Vec, + pub state_roots: Vec, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_tests!(HistoricalBatch); +} diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 2fcb3237d..4f50e0ea5 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -20,6 +20,7 @@ pub mod eth1_data; pub mod eth1_data_vote; pub mod fork; pub mod free_attestation; +pub mod historical_batch; pub mod pending_attestation; pub mod proposal; pub mod proposer_slashing; @@ -55,6 +56,7 @@ pub use crate::eth1_data::Eth1Data; pub use crate::eth1_data_vote::Eth1DataVote; pub use crate::fork::Fork; pub use crate::free_attestation::FreeAttestation; +pub use crate::historical_batch::HistoricalBatch; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposal::Proposal; pub use crate::proposer_slashing::ProposerSlashing; From dc2755c4ea1c7d96dacf0b361ef5685cae59dd89 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 15:59:04 +1100 Subject: [PATCH 071/154] Update ProposerSlashing to v0.5.0 --- eth2/types/src/deposit_input.rs | 2 +- eth2/types/src/proposer_slashing.rs | 8 ++++---- .../testing_proposer_slashing_builder.rs | 18 +++++++++--------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 9da53a042..3f8a6177a 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use bls::{Keypair, PublicKey, Signature}; +use bls::{PublicKey, Signature}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz::{SignedRoot, TreeHash}; diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index bc5b8665e..881f0e405 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -1,4 +1,4 @@ -use super::Proposal; +use super::BeaconBlockHeader; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; @@ -7,12 +7,12 @@ use test_random_derive::TestRandom; /// Two conflicting proposals from the same proposer (validator). /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct ProposerSlashing { pub proposer_index: u64, - pub proposal_1: Proposal, - pub proposal_2: Proposal, + pub proposal_1: BeaconBlockHeader, + pub proposal_2: BeaconBlockHeader, } #[cfg(test)] diff --git a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs index 7f16b679f..fc38c185d 100644 --- a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs @@ -22,20 +22,20 @@ impl TestingProposerSlashingBuilder { F: Fn(u64, &[u8], Epoch, Domain) -> Signature, { let slot = Slot::new(0); - let shard = 0; + let hash_1 = Hash256::from([1; 32]); + let hash_2 = Hash256::from([2; 32]); - let mut proposal_1 = Proposal { + let mut proposal_1 = BeaconBlockHeader { slot, - shard, - block_root: Hash256::from_low_u64_le(1), + previous_block_root: hash_1, + state_root: hash_1, + block_body_root: hash_1, signature: Signature::empty_signature(), }; - let mut proposal_2 = Proposal { - slot, - shard, - block_root: Hash256::from_low_u64_le(2), - signature: Signature::empty_signature(), + let mut proposal_2 = BeaconBlockHeader { + previous_block_root: hash_2, + ..proposal_1.clone() }; proposal_1.signature = { From 77db79900e5911fcd2027bbe9e87aae4555194af Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 16:02:23 +1100 Subject: [PATCH 072/154] Fast-forward some unchanged types to v0.5.0 --- eth2/types/src/attestation.rs | 2 +- eth2/types/src/attester_slashing.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 4b3c2e89c..0b660466e 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -8,7 +8,7 @@ use test_random_derive::TestRandom; /// Details an attestation that can be slashable. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, Clone, diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 195c0fdcc..6fc404f42 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -6,7 +6,7 @@ use test_random_derive::TestRandom; /// Two conflicting attestations. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct AttesterSlashing { pub slashable_attestation_1: SlashableAttestation, From db26b8fde732167839ed53ed84a0545fbbbe1fc7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 16:02:47 +1100 Subject: [PATCH 073/154] Update Deposit to v0.5.0 --- eth2/types/src/deposit.rs | 4 ++-- eth2/types/src/test_utils/testing_deposit_builder.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 14eb19ad6..ff8d83d77 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -7,10 +7,10 @@ use test_random_derive::TestRandom; /// A deposit to potentially become a beacon chain validator. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct Deposit { - pub branch: Vec, + pub proof: Vec, pub index: u64, pub deposit_data: DepositData, } diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index 90c8d325d..0d1c962f0 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -14,7 +14,7 @@ impl TestingDepositBuilder { let keypair = Keypair::random(); let deposit = Deposit { - branch: vec![], + proof: vec![], index: 0, deposit_data: DepositData { amount, From a1ee1a45237bb769c3bf0abb7636f8c6038c05b2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 16:05:53 +1100 Subject: [PATCH 074/154] Update Transfer to v0.5.0 --- eth2/types/src/test_utils/testing_transfer_builder.rs | 6 +++--- eth2/types/src/transfer.rs | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/eth2/types/src/test_utils/testing_transfer_builder.rs b/eth2/types/src/test_utils/testing_transfer_builder.rs index c4256ebea..354e29aa5 100644 --- a/eth2/types/src/test_utils/testing_transfer_builder.rs +++ b/eth2/types/src/test_utils/testing_transfer_builder.rs @@ -10,12 +10,12 @@ pub struct TestingTransferBuilder { impl TestingTransferBuilder { /// Instantiates a new builder. - pub fn new(from: u64, to: u64, amount: u64, slot: Slot) -> Self { + pub fn new(sender: u64, recipient: u64, amount: u64, slot: Slot) -> Self { let keypair = Keypair::random(); let transfer = Transfer { - from, - to, + sender, + recipient, amount, fee: 0, slot, diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index a46e24e24..1c9968702 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -23,8 +23,8 @@ use test_random_derive::TestRandom; SignedRoot, )] pub struct Transfer { - pub from: u64, - pub to: u64, + pub sender: u64, + pub recipient: u64, pub amount: u64, pub fee: u64, pub slot: Slot, From a51de99d4093704a8013326f0e959f44632b26d3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 16:06:02 +1100 Subject: [PATCH 075/154] Fast-forward unchanged exit to v0.5.0 --- eth2/types/src/voluntary_exit.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 5fdfcdd82..f64f950cb 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -8,7 +8,7 @@ use test_random_derive::TestRandom; /// An exit voluntarily submitted a validator who wishes to withdraw. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, PartialEq, From 563304c8d7e9b33dd3bd1d2d92baa5ab2a2a76f9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 16:30:05 +1100 Subject: [PATCH 076/154] Update "block" family types to 0.5.0 - Removes Proposal - Removes "readers" as they aren't actually being used anywhere. --- eth2/types/src/beacon_block.rs | 40 ++++------- eth2/types/src/beacon_block_body.rs | 10 +-- eth2/types/src/lib.rs | 3 - eth2/types/src/proposal.rs | 67 ------------------- eth2/types/src/readers/block_reader.rs | 35 ---------- eth2/types/src/readers/mod.rs | 5 -- eth2/types/src/readers/state_reader.rs | 25 ------- .../testing_beacon_block_builder.rs | 5 +- 8 files changed, 20 insertions(+), 170 deletions(-) delete mode 100644 eth2/types/src/proposal.rs delete mode 100644 eth2/types/src/readers/block_reader.rs delete mode 100644 eth2/types/src/readers/mod.rs delete mode 100644 eth2/types/src/readers/state_reader.rs diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 56f77c8d2..7fa3f5e11 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -1,15 +1,15 @@ use crate::test_utils::TestRandom; -use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Proposal, Slot}; +use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; use bls::Signature; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{SignedRoot, TreeHash}; +use ssz::TreeHash; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; /// A block of the `BeaconChain`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, PartialEq, @@ -24,29 +24,27 @@ use test_random_derive::TestRandom; )] pub struct BeaconBlock { pub slot: Slot, - pub parent_root: Hash256, + pub previous_block_root: Hash256, pub state_root: Hash256, - pub randao_reveal: Signature, - pub eth1_data: Eth1Data, pub body: BeaconBlockBody, pub signature: Signature, } impl BeaconBlock { - /// Produce the first block of the Beacon Chain. + /// The first block of the Beacon Chain. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn genesis(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock { BeaconBlock { slot: spec.genesis_slot, - parent_root: spec.zero_hash, + previous_block_root: spec.zero_hash, state_root, - randao_reveal: spec.empty_signature.clone(), - eth1_data: Eth1Data { - deposit_root: spec.zero_hash, - block_hash: spec.zero_hash, - }, body: BeaconBlockBody { + randao_reveal: spec.empty_signature.clone(), + eth1_data: Eth1Data { + deposit_root: spec.zero_hash, + block_hash: spec.zero_hash, + }, proposer_slashings: vec![], attester_slashings: vec![], attestations: vec![], @@ -60,22 +58,10 @@ impl BeaconBlock { /// Returns the `hash_tree_root` of the block. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.hash_tree_root()[..]) } - - /// Returns an unsigned proposal for block. - /// - /// Spec v0.4.0 - pub fn proposal(&self, spec: &ChainSpec) -> Proposal { - Proposal { - slot: self.slot, - shard: spec.beacon_chain_shard_number, - block_root: Hash256::from_slice(&self.signed_root()), - signature: spec.empty_signature.clone(), - } - } } #[cfg(test)] diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index ce8020fec..677e24cec 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,5 +1,5 @@ -use super::{Attestation, AttesterSlashing, Deposit, ProposerSlashing, Transfer, VoluntaryExit}; use crate::test_utils::TestRandom; +use crate::*; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; @@ -7,11 +7,11 @@ use test_random_derive::TestRandom; /// The body of a `BeaconChain` block, containing operations. /// -/// Spec v0.4.0 -#[derive( - Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, -)] +/// Spec v0.5.0 +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct BeaconBlockBody { + pub randao_reveal: Signature, + pub eth1_data: Eth1Data, pub proposer_slashings: Vec, pub attester_slashings: Vec, pub attestations: Vec, diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 4f50e0ea5..a1a58198b 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -22,9 +22,7 @@ pub mod fork; pub mod free_attestation; pub mod historical_batch; pub mod pending_attestation; -pub mod proposal; pub mod proposer_slashing; -pub mod readers; pub mod shard_reassignment_record; pub mod slashable_attestation; pub mod transfer; @@ -58,7 +56,6 @@ pub use crate::fork::Fork; pub use crate::free_attestation::FreeAttestation; pub use crate::historical_batch::HistoricalBatch; pub use crate::pending_attestation::PendingAttestation; -pub use crate::proposal::Proposal; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::slashable_attestation::SlashableAttestation; pub use crate::slot_epoch::{Epoch, Slot}; diff --git a/eth2/types/src/proposal.rs b/eth2/types/src/proposal.rs deleted file mode 100644 index 36fba5603..000000000 --- a/eth2/types/src/proposal.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::test_utils::TestRandom; -use crate::{Hash256, Slot}; -use bls::Signature; -use rand::RngCore; -use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; -use test_random_derive::TestRandom; - -/// A proposal for some shard or beacon block. -/// -/// Spec v0.4.0 -#[derive( - Debug, - PartialEq, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - SignedRoot, -)] -pub struct Proposal { - pub slot: Slot, - /// Shard number (spec.beacon_chain_shard_number for beacon chain) - pub shard: u64, - pub block_root: Hash256, - pub signature: Signature, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{SignedRoot, TreeHash}; - - #[derive(TreeHash)] - struct SignedProposal { - pub slot: Slot, - pub shard: u64, - pub block_root: Hash256, - } - - impl Into for Proposal { - fn into(self) -> SignedProposal { - SignedProposal { - slot: self.slot, - shard: self.shard, - block_root: self.block_root, - } - } - } - - #[test] - pub fn test_signed_root() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Proposal::random_for_test(&mut rng); - - let other: SignedProposal = original.clone().into(); - - assert_eq!(original.signed_root(), other.hash_tree_root()); - } - - ssz_tests!(Proposal); -} diff --git a/eth2/types/src/readers/block_reader.rs b/eth2/types/src/readers/block_reader.rs deleted file mode 100644 index 93157a1a3..000000000 --- a/eth2/types/src/readers/block_reader.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crate::{BeaconBlock, Hash256, Slot}; -use std::fmt::Debug; - -/// The `BeaconBlockReader` provides interfaces for reading a subset of fields of a `BeaconBlock`. -/// -/// The purpose of this trait is to allow reading from either; -/// - a standard `BeaconBlock` struct, or -/// - a SSZ serialized byte array. -/// -/// Note: presently, direct SSZ reading has not been implemented so this trait is being used for -/// "future proofing". -pub trait BeaconBlockReader: Debug + PartialEq { - fn slot(&self) -> Slot; - fn parent_root(&self) -> Hash256; - fn state_root(&self) -> Hash256; - fn into_beacon_block(self) -> Option; -} - -impl BeaconBlockReader for BeaconBlock { - fn slot(&self) -> Slot { - self.slot - } - - fn parent_root(&self) -> Hash256 { - self.parent_root - } - - fn state_root(&self) -> Hash256 { - self.state_root - } - - fn into_beacon_block(self) -> Option { - Some(self) - } -} diff --git a/eth2/types/src/readers/mod.rs b/eth2/types/src/readers/mod.rs deleted file mode 100644 index 4ccb14a8c..000000000 --- a/eth2/types/src/readers/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod block_reader; -mod state_reader; - -pub use self::block_reader::BeaconBlockReader; -pub use self::state_reader::BeaconStateReader; diff --git a/eth2/types/src/readers/state_reader.rs b/eth2/types/src/readers/state_reader.rs deleted file mode 100644 index e469bee57..000000000 --- a/eth2/types/src/readers/state_reader.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::{BeaconState, Slot}; -use std::fmt::Debug; - -/// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`. -/// -/// The purpose of this trait is to allow reading from either; -/// - a standard `BeaconState` struct, or -/// - a SSZ serialized byte array. -/// -/// Note: presently, direct SSZ reading has not been implemented so this trait is being used for -/// "future proofing". -pub trait BeaconStateReader: Debug + PartialEq { - fn slot(&self) -> Slot; - fn into_beacon_state(self) -> Option; -} - -impl BeaconStateReader for BeaconState { - fn slot(&self) -> Slot { - self.slot - } - - fn into_beacon_state(self) -> Option { - Some(self) - } -} diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index 58633b5ce..e0e4677d4 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -32,8 +32,7 @@ impl TestingBeaconBlockBuilder { /// /// Modifying the block after signing may invalidate the signature. pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { - let proposal = self.block.proposal(spec); - let message = proposal.signed_root(); + let message = self.block.signed_root(); let epoch = self.block.slot.epoch(spec.slots_per_epoch); let domain = spec.get_domain(epoch, Domain::Proposal, fork); self.block.signature = Signature::new(&message, domain, sk); @@ -46,7 +45,7 @@ impl TestingBeaconBlockBuilder { let epoch = self.block.slot.epoch(spec.slots_per_epoch); let message = epoch.hash_tree_root(); let domain = spec.get_domain(epoch, Domain::Randao, fork); - self.block.randao_reveal = Signature::new(&message, domain, sk); + self.block.body.randao_reveal = Signature::new(&message, domain, sk); } /// Inserts a signed, valid `ProposerSlashing` for the validator. From dffc26a466c10c9f1397f6ab6e226f519a1eea5e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 18:33:32 +1100 Subject: [PATCH 077/154] Add field idents support to ssz_derive. - Adds idents to skip ser, deser and tree hashing --- eth2/utils/ssz_derive/src/lib.rs | 123 +++++++++++++++++++++++++++---- 1 file changed, 110 insertions(+), 13 deletions(-) diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index a7802a274..9ba1de416 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -56,10 +56,46 @@ fn get_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn:: .collect() } +/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields +/// that should not be serialized. +/// +/// # Panics +/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. +fn get_serializable_named_field_idents<'a>( + struct_data: &'a syn::DataStruct, +) -> Vec<&'a syn::Ident> { + struct_data + .fields + .iter() + .filter_map(|f| { + if should_skip_serializing(&f) { + None + } else { + Some(match &f.ident { + Some(ref ident) => ident, + _ => panic!("ssz_derive only supports named struct fields."), + }) + } + }) + .collect() +} + +/// Returns true if some field has an attribute declaring it should not be serialized. +/// +/// The field attribute is: `#[ssz(skip_serializing)]` +fn should_skip_serializing(field: &syn::Field) -> bool { + for attr in &field.attrs { + if attr.tts.to_string() == "( skip_serializing )" { + return true; + } + } + false +} + /// Implements `ssz::Encodable` for some `struct`. /// /// Fields are encoded in the order they are defined. -#[proc_macro_derive(Encode)] +#[proc_macro_derive(Encode, attributes(ssz))] pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); @@ -70,7 +106,7 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { _ => panic!("ssz_derive only supports structs."), }; - let field_idents = get_named_field_idents(&struct_data); + let field_idents = get_serializable_named_field_idents(&struct_data); let output = quote! { impl ssz::Encodable for #name { @@ -84,6 +120,18 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { output.into() } +/// Returns true if some field has an attribute declaring it should not be deserialized. +/// +/// The field attribute is: `#[ssz(skip_deserializing)]` +fn should_skip_deserializing(field: &syn::Field) -> bool { + for attr in &field.attrs { + if attr.tts.to_string() == "( skip_deserializing )" { + return true; + } + } + false +} + /// Implements `ssz::Decodable` for some `struct`. /// /// Fields are decoded in the order they are defined. @@ -98,26 +146,39 @@ pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { _ => panic!("ssz_derive only supports structs."), }; - let field_idents = get_named_field_idents(&struct_data); + let all_idents = get_named_field_idents(&struct_data); - // Using a var in an iteration always consumes the var, therefore we must make a `fields_a` and - // a `fields_b` in order to perform two loops. - // - // https://github.com/dtolnay/quote/issues/8 - let field_idents_a = &field_idents; - let field_idents_b = &field_idents; + // Build quotes for fields that should be deserialized and those that should be built from + // `Default`. + let mut quotes = vec![]; + for field in &struct_data.fields { + match &field.ident { + Some(ref ident) => { + if should_skip_deserializing(field) { + quotes.push(quote! { + let #ident = <_>::default(); + }); + } else { + quotes.push(quote! { + let (#ident, i) = <_>::ssz_decode(bytes, i)?; + }); + } + } + _ => panic!("ssz_derive only supports named struct fields."), + }; + } let output = quote! { impl ssz::Decodable for #name { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), ssz::DecodeError> { #( - let (#field_idents_a, i) = <_>::ssz_decode(bytes, i)?; + #quotes )* Ok(( Self { #( - #field_idents_b, + #all_idents, )* }, i @@ -128,10 +189,46 @@ pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { output.into() } +/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields +/// that should not be tree hashed. +/// +/// # Panics +/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. +fn get_tree_hashable_named_field_idents<'a>( + struct_data: &'a syn::DataStruct, +) -> Vec<&'a syn::Ident> { + struct_data + .fields + .iter() + .filter_map(|f| { + if should_skip_tree_hash(&f) { + None + } else { + Some(match &f.ident { + Some(ref ident) => ident, + _ => panic!("ssz_derive only supports named struct fields."), + }) + } + }) + .collect() +} + +/// Returns true if some field has an attribute declaring it should not be tree-hashed. +/// +/// The field attribute is: `#[tree_hash(skip_hashing)]` +fn should_skip_tree_hash(field: &syn::Field) -> bool { + for attr in &field.attrs { + if attr.tts.to_string() == "( skip_hashing )" { + return true; + } + } + false +} + /// Implements `ssz::TreeHash` for some `struct`. /// /// Fields are processed in the order they are defined. -#[proc_macro_derive(TreeHash)] +#[proc_macro_derive(TreeHash, attributes(tree_hash))] pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); @@ -142,7 +239,7 @@ pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { _ => panic!("ssz_derive only supports structs."), }; - let field_idents = get_named_field_idents(&struct_data); + let field_idents = get_tree_hashable_named_field_idents(&struct_data); let output = quote! { impl ssz::TreeHash for #name { From 57c4389f9c476c96cbad13484642e6bacb101e8e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 18:34:54 +1100 Subject: [PATCH 078/154] Add further BeaconBlock v0.5.0 updates --- eth2/types/src/beacon_block.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 7fa3f5e11..bfe266cbd 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; +use crate::*; use bls::Signature; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; @@ -31,14 +31,14 @@ pub struct BeaconBlock { } impl BeaconBlock { - /// The first block of the Beacon Chain. + /// Returns an empty block to be used during genesis. /// /// Spec v0.5.0 - pub fn genesis(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock { + pub fn empty(spec: &ChainSpec) -> BeaconBlock { BeaconBlock { slot: spec.genesis_slot, previous_block_root: spec.zero_hash, - state_root, + state_root: spec.zero_hash, body: BeaconBlockBody { randao_reveal: spec.empty_signature.clone(), eth1_data: Eth1Data { @@ -62,6 +62,19 @@ impl BeaconBlock { pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.hash_tree_root()[..]) } + + /// Returns a "temporary" header, where the `state_root` is `spec.zero_hash`. + /// + /// Spec v0.5.0 + pub fn into_temporary_header(&self, spec: &ChainSpec) -> BeaconBlockHeader { + BeaconBlockHeader { + slot: self.slot, + previous_block_root: self.previous_block_root, + state_root: spec.zero_hash, + block_body_root: Hash256::from_slice(&self.hash_tree_root()), + signature: self.signature, + } + } } #[cfg(test)] From 7660cbd4190acabafe64a4ded1668a7568c842c5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 18:35:27 +1100 Subject: [PATCH 079/154] Update ChainSpec to v0.5.0 --- eth2/types/src/chain_spec.rs | 59 ++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index ae521cc92..f4b113056 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -5,18 +5,21 @@ use serde_derive::Deserialize; const GWEI: u64 = 1_000_000_000; +/// Each of the BLS signature domains. +/// +/// Spec v0.5.0 pub enum Domain { - Deposit, - Attestation, - Proposal, - Exit, + BeaconBlock, Randao, + Attestation, + Deposit, + Exit, Transfer, } /// Holds all the "constants" for a BeaconChain. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(PartialEq, Debug, Clone, Deserialize)] #[serde(default)] pub struct ChainSpec { @@ -26,7 +29,6 @@ pub struct ChainSpec { pub shard_count: u64, pub target_committee_size: u64, pub max_balance_churn_quotient: u64, - pub beacon_chain_shard_number: u64, pub max_indices_per_slashable_vote: u64, pub max_exit_dequeues_per_epoch: u64, pub shuffle_round_count: u8, @@ -66,12 +68,13 @@ pub struct ChainSpec { pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, pub epochs_per_eth1_voting_period: u64, + pub slots_per_historical_root: usize, pub min_validator_withdrawability_delay: Epoch, + pub persistent_committee_period: u64, /* * State list lengths */ - pub latest_block_roots_length: usize, pub latest_randao_mixes_length: usize, pub latest_active_index_roots_length: usize, pub latest_slashed_exit_length: usize, @@ -103,11 +106,11 @@ pub struct ChainSpec { * * Use `ChainSpec::get_domain(..)` to access these values. */ - domain_deposit: u32, - domain_attestation: u32, - domain_proposal: u32, - domain_exit: u32, + domain_beacon_block: u32, domain_randao: u32, + domain_attestation: u32, + domain_deposit: u32, + domain_exit: u32, domain_transfer: u32, } @@ -130,11 +133,11 @@ impl ChainSpec { /// Spec v0.5.0 pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { let domain_constant = match domain { - Domain::Deposit => self.domain_deposit, - Domain::Attestation => self.domain_attestation, - Domain::Proposal => self.domain_proposal, - Domain::Exit => self.domain_exit, + Domain::BeaconBlock => self.domain_beacon_block, Domain::Randao => self.domain_randao, + Domain::Attestation => self.domain_attestation, + Domain::Deposit => self.domain_deposit, + Domain::Exit => self.domain_exit, Domain::Transfer => self.domain_transfer, }; @@ -149,7 +152,7 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn foundation() -> Self { let genesis_slot = Slot::new(2_u64.pow(32)); let slots_per_epoch = 64; @@ -162,7 +165,6 @@ impl ChainSpec { shard_count: 1_024, target_committee_size: 128, max_balance_churn_quotient: 32, - beacon_chain_shard_number: u64::max_value(), max_indices_per_slashable_vote: 4_096, max_exit_dequeues_per_epoch: 4, shuffle_round_count: 90, @@ -202,12 +204,13 @@ impl ChainSpec { min_seed_lookahead: Epoch::new(1), activation_exit_delay: 4, epochs_per_eth1_voting_period: 16, + slots_per_historical_root: 8_192, min_validator_withdrawability_delay: Epoch::new(256), + persistent_committee_period: 2_048, /* * State list lengths */ - latest_block_roots_length: 8_192, latest_randao_mixes_length: 8_192, latest_active_index_roots_length: 8_192, latest_slashed_exit_length: 8_192, @@ -234,18 +237,16 @@ impl ChainSpec { /* * Signature domains */ - domain_deposit: 0, - domain_attestation: 1, - domain_proposal: 2, - domain_exit: 3, - domain_randao: 4, + domain_beacon_block: 0, + domain_randao: 1, + domain_attestation: 2, + domain_deposit: 3, + domain_exit: 4, domain_transfer: 5, } } /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. - /// - /// Spec v0.4.0 pub fn few_validators() -> Self { let genesis_slot = Slot::new(2_u64.pow(32)); let slots_per_epoch = 8; @@ -294,11 +295,11 @@ mod tests { fn test_get_domain() { let spec = ChainSpec::foundation(); - test_domain(Domain::Deposit, spec.domain_deposit, &spec); - test_domain(Domain::Attestation, spec.domain_attestation, &spec); - test_domain(Domain::Proposal, spec.domain_proposal, &spec); - test_domain(Domain::Exit, spec.domain_exit, &spec); + test_domain(Domain::BeaconBlock, spec.domain_beacon_block, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); + test_domain(Domain::Attestation, spec.domain_attestation, &spec); + test_domain(Domain::Deposit, spec.domain_deposit, &spec); + test_domain(Domain::Exit, spec.domain_exit, &spec); test_domain(Domain::Transfer, spec.domain_transfer, &spec); } } From b37cf3a2698562e3c523e798e580b44ed42d2646 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 18:36:16 +1100 Subject: [PATCH 080/154] Add TreeHash derives for cache objects. This allows us to avoid a verbose manual impl for BeaconState --- eth2/types/src/beacon_state/epoch_cache.rs | 13 +++++++++++++ eth2/types/src/beacon_state/pubkey_cache.rs | 9 +++++++++ 2 files changed, 22 insertions(+) diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index ddcca0a9a..3e580eee1 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -1,5 +1,7 @@ use super::{AttestationDuty, BeaconState, CrosslinkCommittees, Error}; +use crate::test_utils::TestRandom; use crate::{ChainSpec, Epoch}; +use rand::RngCore; use serde_derive::{Deserialize, Serialize}; #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] @@ -67,3 +69,14 @@ impl EpochCache { }) } } + +impl TestRandom for [EpochCache; 3] { + /// Test random should generate an empty cache. + fn random_for_test(rng: &mut T) -> Self { + [ + EpochCache::default(), + EpochCache::default(), + EpochCache::default(), + ] + } +} diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs index 340bdb311..22fe32694 100644 --- a/eth2/types/src/beacon_state/pubkey_cache.rs +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -1,4 +1,6 @@ +use crate::test_utils::TestRandom; use crate::*; +use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use std::collections::HashMap; @@ -36,3 +38,10 @@ impl PubkeyCache { self.map.get(pubkey).cloned() } } + +impl TestRandom for PubkeyCache { + /// Test random should generate an empty cache. + fn random_for_test(rng: &mut T) -> Self { + Self::default() + } +} From 49d3f3d1d64126dd926ca5a3a26247542704882c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 18:36:55 +1100 Subject: [PATCH 081/154] Partially update BeaconState to v0.5.0 - Uses new ssz attributes to remove manual impl of ssz. - Updates struct def to v0.5.0 - Does not update all methods --- eth2/types/src/beacon_state.rs | 261 ++++++++------------------------- 1 file changed, 58 insertions(+), 203 deletions(-) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 2644b3e73..9bcfa5cec 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -8,9 +8,11 @@ use log::{debug, error, trace}; use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{hash, Decodable, DecodeError, Encodable, SignedRoot, SszStream, TreeHash}; +use ssz::{hash, Decodable, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode, TreeHash}; use std::collections::HashMap; use swap_or_not_shuffle::shuffle_list; +use test_random_derive::TestRandom; pub use builder::BeaconStateBuilder; @@ -72,7 +74,10 @@ macro_rules! safe_sub_assign { }; } -#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] +/// The state of the `BeaconChain` at some slot. +/// +/// Spec v0.5.0 +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, TestRandom, Encode, Decode, TreeHash)] pub struct BeaconState { // Misc pub slot: Slot, @@ -94,18 +99,24 @@ pub struct BeaconState { pub current_shuffling_seed: Hash256, // Finality + pub previous_epoch_attestations: Vec, + pub current_epoch_attestations: Vec, pub previous_justified_epoch: Epoch, - pub justified_epoch: Epoch, + pub current_justified_epoch: Epoch, + pub previous_justified_root: Hash256, + pub current_justified_root: Hash256, pub justification_bitfield: u64, pub finalized_epoch: Epoch, + pub finalized_root: Hash256, // Recent state pub latest_crosslinks: Vec, pub latest_block_roots: Vec, + pub latest_state_roots: Vec, pub latest_active_index_roots: Vec, pub latest_slashed_balances: Vec, - pub latest_attestations: Vec, - pub batched_block_roots: Vec, + pub latest_block_header: BeaconBlockHeader, + pub historical_roots: Vec, // Ethereum 1.0 chain data pub latest_eth1_data: Eth1Data, @@ -113,10 +124,19 @@ pub struct BeaconState { pub deposit_index: u64, // Caching (not in the spec) + #[serde(default)] + #[ssz(skip_serializing)] + #[ssz(skip_deserializing)] + #[tree_hash(skip_hashing)] pub cache_index_offset: usize, + #[ssz(skip_serializing)] + #[ssz(skip_deserializing)] + #[tree_hash(skip_hashing)] + pub caches: [EpochCache; CACHED_EPOCHS], #[serde(default)] - pub caches: Vec, - #[serde(default)] + #[ssz(skip_serializing)] + #[ssz(skip_deserializing)] + #[tree_hash(skip_hashing)] pub pubkey_cache: PubkeyCache, } @@ -126,7 +146,7 @@ impl BeaconState { /// This does not fully build a genesis beacon state, it omits processing of initial validator /// deposits. To obtain a full genesis beacon state, use the `BeaconStateBuilder`. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn genesis(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> BeaconState { let initial_crosslink = Crosslink { epoch: spec.genesis_epoch, @@ -134,23 +154,17 @@ impl BeaconState { }; BeaconState { - /* - * Misc - */ + // Misc slot: spec.genesis_slot, genesis_time, fork: Fork::genesis(spec), - /* - * Validator registry - */ + // Validator registry validator_registry: vec![], // Set later in the function. validator_balances: vec![], // Set later in the function. validator_registry_update_epoch: spec.genesis_epoch, - /* - * Randomness and committees - */ + // Randomness and committees latest_randao_mixes: vec![spec.zero_hash; spec.latest_randao_mixes_length as usize], previous_shuffling_start_shard: spec.genesis_start_shard, current_shuffling_start_shard: spec.genesis_start_shard, @@ -159,26 +173,25 @@ impl BeaconState { previous_shuffling_seed: spec.zero_hash, current_shuffling_seed: spec.zero_hash, - /* - * Finality - */ + // Finality + previous_epoch_attestations: vec![], + current_epoch_attestations: vec![], previous_justified_epoch: spec.genesis_epoch, - justified_epoch: spec.genesis_epoch, + current_justified_epoch: spec.genesis_epoch, + previous_justified_root: spec.zero_hash, + current_justified_root: spec.zero_hash, justification_bitfield: 0, finalized_epoch: spec.genesis_epoch, + finalized_root: spec.zero_hash, - /* - * Recent state - */ + // Recent state latest_crosslinks: vec![initial_crosslink; spec.shard_count as usize], - latest_block_roots: vec![spec.zero_hash; spec.latest_block_roots_length as usize], - latest_active_index_roots: vec![ - spec.zero_hash; - spec.latest_active_index_roots_length as usize - ], - latest_slashed_balances: vec![0; spec.latest_slashed_exit_length as usize], - latest_attestations: vec![], - batched_block_roots: vec![], + latest_block_roots: vec![spec.zero_hash; spec.slots_per_historical_root], + latest_state_roots: vec![spec.zero_hash; spec.slots_per_historical_root], + latest_active_index_roots: vec![spec.zero_hash; spec.latest_active_index_roots_length], + latest_slashed_balances: vec![0; spec.latest_slashed_exit_length], + latest_block_header: BeaconBlock::empty(spec).into_temporary_header(spec), + historical_roots: vec![], /* * PoW receipt root @@ -191,18 +204,26 @@ impl BeaconState { * Caching (not in spec) */ cache_index_offset: 0, - caches: vec![EpochCache::default(); CACHED_EPOCHS], + caches: [ + EpochCache::default(), + EpochCache::default(), + EpochCache::default(), + ], pubkey_cache: PubkeyCache::default(), } } + /* + /// Returns the `hash_tree_root` of the state. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.hash_tree_root()[..]) } + */ + /// Build an epoch cache, unless it is has already been built. pub fn build_epoch_cache( &mut self, @@ -426,11 +447,11 @@ impl BeaconState { /// Return the block root at a recent `slot`. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn get_block_root(&self, slot: Slot, spec: &ChainSpec) -> Option<&Hash256> { - if (self.slot <= slot + spec.latest_block_roots_length as u64) && (slot < self.slot) { + if (self.slot <= slot + spec.slots_per_historical_root as u64) && (slot < self.slot) { self.latest_block_roots - .get(slot.as_usize() % spec.latest_block_roots_length) + .get(slot.as_usize() % spec.slots_per_historical_root) } else { None } @@ -1135,169 +1156,3 @@ impl BeaconState { Ok(all_participants) } } - -impl Encodable for BeaconState { - fn ssz_append(&self, s: &mut SszStream) { - s.append(&self.slot); - s.append(&self.genesis_time); - s.append(&self.fork); - s.append(&self.validator_registry); - s.append(&self.validator_balances); - s.append(&self.validator_registry_update_epoch); - s.append(&self.latest_randao_mixes); - s.append(&self.previous_shuffling_start_shard); - s.append(&self.current_shuffling_start_shard); - s.append(&self.previous_shuffling_epoch); - s.append(&self.current_shuffling_epoch); - s.append(&self.previous_shuffling_seed); - s.append(&self.current_shuffling_seed); - s.append(&self.previous_justified_epoch); - s.append(&self.justified_epoch); - s.append(&self.justification_bitfield); - s.append(&self.finalized_epoch); - s.append(&self.latest_crosslinks); - s.append(&self.latest_block_roots); - s.append(&self.latest_active_index_roots); - s.append(&self.latest_slashed_balances); - s.append(&self.latest_attestations); - s.append(&self.batched_block_roots); - s.append(&self.latest_eth1_data); - s.append(&self.eth1_data_votes); - s.append(&self.deposit_index); - } -} - -impl Decodable for BeaconState { - fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { - let (slot, i) = <_>::ssz_decode(bytes, i)?; - let (genesis_time, i) = <_>::ssz_decode(bytes, i)?; - let (fork, i) = <_>::ssz_decode(bytes, i)?; - let (validator_registry, i) = <_>::ssz_decode(bytes, i)?; - let (validator_balances, i) = <_>::ssz_decode(bytes, i)?; - let (validator_registry_update_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (latest_randao_mixes, i) = <_>::ssz_decode(bytes, i)?; - let (previous_shuffling_start_shard, i) = <_>::ssz_decode(bytes, i)?; - let (current_shuffling_start_shard, i) = <_>::ssz_decode(bytes, i)?; - let (previous_shuffling_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (current_shuffling_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (previous_shuffling_seed, i) = <_>::ssz_decode(bytes, i)?; - let (current_shuffling_seed, i) = <_>::ssz_decode(bytes, i)?; - let (previous_justified_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (justified_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (justification_bitfield, i) = <_>::ssz_decode(bytes, i)?; - let (finalized_epoch, i) = <_>::ssz_decode(bytes, i)?; - let (latest_crosslinks, i) = <_>::ssz_decode(bytes, i)?; - let (latest_block_roots, i) = <_>::ssz_decode(bytes, i)?; - let (latest_active_index_roots, i) = <_>::ssz_decode(bytes, i)?; - let (latest_slashed_balances, i) = <_>::ssz_decode(bytes, i)?; - let (latest_attestations, i) = <_>::ssz_decode(bytes, i)?; - let (batched_block_roots, i) = <_>::ssz_decode(bytes, i)?; - let (latest_eth1_data, i) = <_>::ssz_decode(bytes, i)?; - let (eth1_data_votes, i) = <_>::ssz_decode(bytes, i)?; - let (deposit_index, i) = <_>::ssz_decode(bytes, i)?; - - Ok(( - Self { - slot, - genesis_time, - fork, - validator_registry, - validator_balances, - validator_registry_update_epoch, - latest_randao_mixes, - previous_shuffling_start_shard, - current_shuffling_start_shard, - previous_shuffling_epoch, - current_shuffling_epoch, - previous_shuffling_seed, - current_shuffling_seed, - previous_justified_epoch, - justified_epoch, - justification_bitfield, - finalized_epoch, - latest_crosslinks, - latest_block_roots, - latest_active_index_roots, - latest_slashed_balances, - latest_attestations, - batched_block_roots, - latest_eth1_data, - eth1_data_votes, - deposit_index, - cache_index_offset: 0, - caches: vec![EpochCache::default(); CACHED_EPOCHS], - pubkey_cache: PubkeyCache::default(), - }, - i, - )) - } -} - -impl TreeHash for BeaconState { - fn hash_tree_root(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.slot.hash_tree_root()); - result.append(&mut self.genesis_time.hash_tree_root()); - result.append(&mut self.fork.hash_tree_root()); - result.append(&mut self.validator_registry.hash_tree_root()); - result.append(&mut self.validator_balances.hash_tree_root()); - result.append(&mut self.validator_registry_update_epoch.hash_tree_root()); - result.append(&mut self.latest_randao_mixes.hash_tree_root()); - result.append(&mut self.previous_shuffling_start_shard.hash_tree_root()); - result.append(&mut self.current_shuffling_start_shard.hash_tree_root()); - result.append(&mut self.previous_shuffling_epoch.hash_tree_root()); - result.append(&mut self.current_shuffling_epoch.hash_tree_root()); - result.append(&mut self.previous_shuffling_seed.hash_tree_root()); - result.append(&mut self.current_shuffling_seed.hash_tree_root()); - result.append(&mut self.previous_justified_epoch.hash_tree_root()); - result.append(&mut self.justified_epoch.hash_tree_root()); - result.append(&mut self.justification_bitfield.hash_tree_root()); - result.append(&mut self.finalized_epoch.hash_tree_root()); - result.append(&mut self.latest_crosslinks.hash_tree_root()); - result.append(&mut self.latest_block_roots.hash_tree_root()); - result.append(&mut self.latest_active_index_roots.hash_tree_root()); - result.append(&mut self.latest_slashed_balances.hash_tree_root()); - result.append(&mut self.latest_attestations.hash_tree_root()); - result.append(&mut self.batched_block_roots.hash_tree_root()); - result.append(&mut self.latest_eth1_data.hash_tree_root()); - result.append(&mut self.eth1_data_votes.hash_tree_root()); - result.append(&mut self.deposit_index.hash_tree_root()); - hash(&result) - } -} - -impl TestRandom for BeaconState { - fn random_for_test(rng: &mut T) -> Self { - Self { - slot: <_>::random_for_test(rng), - genesis_time: <_>::random_for_test(rng), - fork: <_>::random_for_test(rng), - validator_registry: <_>::random_for_test(rng), - validator_balances: <_>::random_for_test(rng), - validator_registry_update_epoch: <_>::random_for_test(rng), - latest_randao_mixes: <_>::random_for_test(rng), - previous_shuffling_start_shard: <_>::random_for_test(rng), - current_shuffling_start_shard: <_>::random_for_test(rng), - previous_shuffling_epoch: <_>::random_for_test(rng), - current_shuffling_epoch: <_>::random_for_test(rng), - previous_shuffling_seed: <_>::random_for_test(rng), - current_shuffling_seed: <_>::random_for_test(rng), - previous_justified_epoch: <_>::random_for_test(rng), - justified_epoch: <_>::random_for_test(rng), - justification_bitfield: <_>::random_for_test(rng), - finalized_epoch: <_>::random_for_test(rng), - latest_crosslinks: <_>::random_for_test(rng), - latest_block_roots: <_>::random_for_test(rng), - latest_active_index_roots: <_>::random_for_test(rng), - latest_slashed_balances: <_>::random_for_test(rng), - latest_attestations: <_>::random_for_test(rng), - batched_block_roots: <_>::random_for_test(rng), - latest_eth1_data: <_>::random_for_test(rng), - eth1_data_votes: <_>::random_for_test(rng), - deposit_index: <_>::random_for_test(rng), - cache_index_offset: 0, - caches: vec![EpochCache::default(); CACHED_EPOCHS], - pubkey_cache: PubkeyCache::default(), - } - } -} From f739bb55510b9683aa0721f8d5ee125c58eb096d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 15 Mar 2019 18:38:58 +1100 Subject: [PATCH 082/154] Add serde default field attr I accidentally deleted it in the last commit. --- eth2/types/src/beacon_state.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 9bcfa5cec..ec1008d3c 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -129,6 +129,7 @@ pub struct BeaconState { #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] pub cache_index_offset: usize, + #[serde(default)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] From 01bfd386375f69e20bc157c8bc26217077b8e97b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 16 Mar 2019 14:30:21 +1100 Subject: [PATCH 083/154] Add `default` attribute to `TestRandom` derive. Allows for generating the item from default instead of randomizing it. --- eth2/utils/test_random_derive/src/lib.rs | 48 +++++++++++++++++------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/eth2/utils/test_random_derive/src/lib.rs b/eth2/utils/test_random_derive/src/lib.rs index 9a456606c..7920ea695 100644 --- a/eth2/utils/test_random_derive/src/lib.rs +++ b/eth2/utils/test_random_derive/src/lib.rs @@ -4,7 +4,20 @@ use crate::proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; -#[proc_macro_derive(TestRandom)] +/// Returns true if some field has an attribute declaring it should be generated from default (not +/// randomized). +/// +/// The field attribute is: `#[test_random(default)]` +fn should_use_default(field: &syn::Field) -> bool { + for attr in &field.attrs { + if attr.tts.to_string() == "( default )" { + return true; + } + } + false +} + +#[proc_macro_derive(TestRandom, attributes(test_random))] pub fn test_random_derive(input: TokenStream) -> TokenStream { let derived_input = parse_macro_input!(input as DeriveInput); let name = &derived_input.ident; @@ -14,14 +27,32 @@ pub fn test_random_derive(input: TokenStream) -> TokenStream { _ => panic!("test_random_derive only supports structs."), }; - let field_names = get_named_field_idents(&struct_data); + // Build quotes for fields that should be generated and those that should be built from + // `Default`. + let mut quotes = vec![]; + for field in &struct_data.fields { + match &field.ident { + Some(ref ident) => { + if should_use_default(field) { + quotes.push(quote! { + #ident: <_>::default(), + }); + } else { + quotes.push(quote! { + #ident: <_>::random_for_test(rng), + }); + } + } + _ => panic!("test_random_derive only supports named struct fields."), + }; + } let output = quote! { impl TestRandom for #name { fn random_for_test(rng: &mut T) -> Self { Self { #( - #field_names: <_>::random_for_test(rng), + #quotes )* } } @@ -30,14 +61,3 @@ pub fn test_random_derive(input: TokenStream) -> TokenStream { output.into() } - -fn get_named_field_idents(struct_data: &syn::DataStruct) -> Vec<(&syn::Ident)> { - struct_data - .fields - .iter() - .map(|f| match &f.ident { - Some(ref ident) => ident, - _ => panic!("test_random_derive only supports named struct fields."), - }) - .collect() -} From 6ae5d34d0f3f1063841dab726514067c96e7dcf6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 16 Mar 2019 14:31:43 +1100 Subject: [PATCH 084/154] Update testing builders as per state 0.5.0 update --- .../src/test_utils/testing_attestation_data_builder.rs | 2 +- .../types/src/test_utils/testing_beacon_block_builder.rs | 4 ++-- .../types/src/test_utils/testing_beacon_state_builder.rs | 9 +++++++-- .../src/test_utils/testing_proposer_slashing_builder.rs | 4 ++-- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/eth2/types/src/test_utils/testing_attestation_data_builder.rs b/eth2/types/src/test_utils/testing_attestation_data_builder.rs index f31de2fbd..a270e3859 100644 --- a/eth2/types/src/test_utils/testing_attestation_data_builder.rs +++ b/eth2/types/src/test_utils/testing_attestation_data_builder.rs @@ -20,7 +20,7 @@ impl TestingAttestationDataBuilder { let source_epoch = if is_previous_epoch { state.previous_justified_epoch } else { - state.justified_epoch + state.current_justified_epoch }; let target_root = if is_previous_epoch { diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index e0e4677d4..7fb3d8e09 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -19,7 +19,7 @@ impl TestingBeaconBlockBuilder { /// Create a new builder from genesis. pub fn new(spec: &ChainSpec) -> Self { Self { - block: BeaconBlock::genesis(spec.zero_hash, spec), + block: BeaconBlock::empty(spec), } } @@ -34,7 +34,7 @@ impl TestingBeaconBlockBuilder { pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { let message = self.block.signed_root(); let epoch = self.block.slot.epoch(spec.slots_per_epoch); - let domain = spec.get_domain(epoch, Domain::Proposal, fork); + let domain = spec.get_domain(epoch, Domain::BeaconBlock, fork); self.block.signature = Signature::new(&message, domain, sk); } diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 5b96dc455..8ef4f76ce 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -190,7 +190,7 @@ impl TestingBeaconStateBuilder { state.current_shuffling_seed = Hash256::from_low_u64_le(1); state.previous_justified_epoch = epoch - 3; - state.justified_epoch = epoch - 2; + state.current_justified_epoch = epoch - 2; state.justification_bitfield = u64::max_value(); state.finalized_epoch = epoch - 3; @@ -232,8 +232,13 @@ impl TestingBeaconStateBuilder { // The entire committee should have signed the pending attestation. let signers = vec![true; committee.len()]; builder.add_committee_participation(signers); + let attestation = builder.build(); - state.latest_attestations.push(builder.build()) + if attestation.data.slot.epoch(spec.slots_per_epoch) < state.current_epoch(spec) { + state.previous_epoch_attestations.push(attestation) + } else { + state.current_epoch_attestations.push(attestation) + } } } } diff --git a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs index fc38c185d..0773cd6da 100644 --- a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs @@ -41,13 +41,13 @@ impl TestingProposerSlashingBuilder { proposal_1.signature = { let message = proposal_1.signed_root(); let epoch = slot.epoch(spec.slots_per_epoch); - signer(proposer_index, &message[..], epoch, Domain::Proposal) + signer(proposer_index, &message[..], epoch, Domain::BeaconBlock) }; proposal_2.signature = { let message = proposal_2.signed_root(); let epoch = slot.epoch(spec.slots_per_epoch); - signer(proposer_index, &message[..], epoch, Domain::Proposal) + signer(proposer_index, &message[..], epoch, Domain::BeaconBlock) }; ProposerSlashing { From 33783d4baa2d7edb8be4aa75c6cf997df4da2c37 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 16 Mar 2019 14:32:07 +1100 Subject: [PATCH 085/154] Fix borrow issue in `BeaconBlock` --- eth2/types/src/beacon_block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index bfe266cbd..2dcf91d95 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -72,7 +72,7 @@ impl BeaconBlock { previous_block_root: self.previous_block_root, state_root: spec.zero_hash, block_body_root: Hash256::from_slice(&self.hash_tree_root()), - signature: self.signature, + signature: self.signature.clone(), } } } From c648491c45fdf858c55006a57cd4b48db6e08a91 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 16 Mar 2019 14:32:23 +1100 Subject: [PATCH 086/154] Add TestRandom `default` field attr to BeaconState --- eth2/types/src/beacon_state.rs | 5 ++++- eth2/types/src/beacon_state/epoch_cache.rs | 13 ------------- eth2/types/src/beacon_state/pubkey_cache.rs | 9 --------- 3 files changed, 4 insertions(+), 23 deletions(-) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index ec1008d3c..ba9c5cd4d 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -8,7 +8,7 @@ use log::{debug, error, trace}; use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{hash, Decodable, SignedRoot, TreeHash}; +use ssz::{hash, SignedRoot}; use ssz_derive::{Decode, Encode, TreeHash}; use std::collections::HashMap; use swap_or_not_shuffle::shuffle_list; @@ -128,16 +128,19 @@ pub struct BeaconState { #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] + #[test_random(default)] pub cache_index_offset: usize, #[serde(default)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] + #[test_random(default)] pub caches: [EpochCache; CACHED_EPOCHS], #[serde(default)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] + #[test_random(default)] pub pubkey_cache: PubkeyCache, } diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 3e580eee1..ddcca0a9a 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -1,7 +1,5 @@ use super::{AttestationDuty, BeaconState, CrosslinkCommittees, Error}; -use crate::test_utils::TestRandom; use crate::{ChainSpec, Epoch}; -use rand::RngCore; use serde_derive::{Deserialize, Serialize}; #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] @@ -69,14 +67,3 @@ impl EpochCache { }) } } - -impl TestRandom for [EpochCache; 3] { - /// Test random should generate an empty cache. - fn random_for_test(rng: &mut T) -> Self { - [ - EpochCache::default(), - EpochCache::default(), - EpochCache::default(), - ] - } -} diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs index 22fe32694..340bdb311 100644 --- a/eth2/types/src/beacon_state/pubkey_cache.rs +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -1,6 +1,4 @@ -use crate::test_utils::TestRandom; use crate::*; -use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use std::collections::HashMap; @@ -38,10 +36,3 @@ impl PubkeyCache { self.map.get(pubkey).cloned() } } - -impl TestRandom for PubkeyCache { - /// Test random should generate an empty cache. - fn random_for_test(rng: &mut T) -> Self { - Self::default() - } -} From d6456a948685af408c913229db1b626e18226123 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 16 Mar 2019 14:35:45 +1100 Subject: [PATCH 087/154] Remove old types structs. They've been outdated be v0.5.0 or prior --- eth2/types/src/lib.rs | 1 - eth2/types/src/proposal_signed_data.rs | 20 --- eth2/types/src/shard_reassignment_record.rs | 19 --- eth2/types/src/slashable_vote_data.rs | 132 ------------------ .../src/validator_registry_delta_block.rs | 36 ----- 5 files changed, 208 deletions(-) delete mode 100644 eth2/types/src/proposal_signed_data.rs delete mode 100644 eth2/types/src/shard_reassignment_record.rs delete mode 100644 eth2/types/src/slashable_vote_data.rs delete mode 100644 eth2/types/src/validator_registry_delta_block.rs diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index a1a58198b..c38fa8031 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -23,7 +23,6 @@ pub mod free_attestation; pub mod historical_batch; pub mod pending_attestation; pub mod proposer_slashing; -pub mod shard_reassignment_record; pub mod slashable_attestation; pub mod transfer; pub mod voluntary_exit; diff --git a/eth2/types/src/proposal_signed_data.rs b/eth2/types/src/proposal_signed_data.rs deleted file mode 100644 index 58f45a41d..000000000 --- a/eth2/types/src/proposal_signed_data.rs +++ /dev/null @@ -1,20 +0,0 @@ -use crate::test_utils::TestRandom; -use crate::{Hash256, Slot}; -use rand::RngCore; -use serde_derive::Serialize; -use ssz_derive::{Decode, Encode, TreeHash}; -use test_random_derive::TestRandom; - -#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] -pub struct ProposalSignedData { - pub slot: Slot, - pub shard: u64, - pub block_root: Hash256, -} - -#[cfg(test)] -mod tests { - use super::*; - - ssz_tests!(ProposalSignedData); -} diff --git a/eth2/types/src/shard_reassignment_record.rs b/eth2/types/src/shard_reassignment_record.rs deleted file mode 100644 index 9f1705f16..000000000 --- a/eth2/types/src/shard_reassignment_record.rs +++ /dev/null @@ -1,19 +0,0 @@ -use crate::{test_utils::TestRandom, Slot}; -use rand::RngCore; -use serde_derive::Serialize; -use ssz_derive::{Decode, Encode, TreeHash}; -use test_random_derive::TestRandom; - -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] -pub struct ShardReassignmentRecord { - pub validator_index: u64, - pub shard: u64, - pub slot: Slot, -} - -#[cfg(test)] -mod tests { - use super::*; - - ssz_tests!(ShardReassignmentRecord); -} diff --git a/eth2/types/src/slashable_vote_data.rs b/eth2/types/src/slashable_vote_data.rs deleted file mode 100644 index 73cf91c61..000000000 --- a/eth2/types/src/slashable_vote_data.rs +++ /dev/null @@ -1,132 +0,0 @@ -use super::AttestationData; -use crate::chain_spec::ChainSpec; -use crate::test_utils::TestRandom; -use bls::AggregateSignature; -use rand::RngCore; -use serde_derive::Serialize; -use ssz_derive::{Decode, Encode, TreeHash}; -use test_random_derive::TestRandom; - -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] -pub struct SlashableVoteData { - pub custody_bit_0_indices: Vec, - pub custody_bit_1_indices: Vec, - pub data: AttestationData, - pub aggregate_signature: AggregateSignature, -} - -impl SlashableVoteData { - /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. - /// - /// Spec v0.3.0 - pub fn is_double_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool { - self.data.slot.epoch(spec.epoch_length) == other.data.slot.epoch(spec.epoch_length) - } - - /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. - /// - /// Spec v0.3.0 - pub fn is_surround_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool { - let source_epoch_1 = self.data.justified_epoch; - let source_epoch_2 = other.data.justified_epoch; - let target_epoch_1 = self.data.slot.epoch(spec.epoch_length); - let target_epoch_2 = other.data.slot.epoch(spec.epoch_length); - - (source_epoch_1 < source_epoch_2) && (target_epoch_2 < target_epoch_1) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::chain_spec::ChainSpec; - use crate::slot_epoch::{Epoch, Slot}; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - - #[test] - pub fn test_is_double_vote_true() { - let spec = ChainSpec::foundation(); - let slashable_vote_first = create_slashable_vote_data(1, 1, &spec); - let slashable_vote_second = create_slashable_vote_data(1, 1, &spec); - - assert_eq!( - slashable_vote_first.is_double_vote(&slashable_vote_second, &spec), - true - ) - } - - #[test] - pub fn test_is_double_vote_false() { - let spec = ChainSpec::foundation(); - let slashable_vote_first = create_slashable_vote_data(1, 1, &spec); - let slashable_vote_second = create_slashable_vote_data(2, 1, &spec); - - assert_eq!( - slashable_vote_first.is_double_vote(&slashable_vote_second, &spec), - false - ); - } - - #[test] - pub fn test_is_surround_vote_true() { - let spec = ChainSpec::foundation(); - let slashable_vote_first = create_slashable_vote_data(2, 1, &spec); - let slashable_vote_second = create_slashable_vote_data(1, 2, &spec); - - assert_eq!( - slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), - true - ); - } - - #[test] - pub fn test_is_surround_vote_true_realistic() { - let spec = ChainSpec::foundation(); - let slashable_vote_first = create_slashable_vote_data(4, 1, &spec); - let slashable_vote_second = create_slashable_vote_data(3, 2, &spec); - - assert_eq!( - slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), - true - ); - } - - #[test] - pub fn test_is_surround_vote_false_source_epoch_fails() { - let spec = ChainSpec::foundation(); - let slashable_vote_first = create_slashable_vote_data(2, 2, &spec); - let slashable_vote_second = create_slashable_vote_data(1, 1, &spec); - - assert_eq!( - slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), - false - ); - } - - #[test] - pub fn test_is_surround_vote_false_target_epoch_fails() { - let spec = ChainSpec::foundation(); - let slashable_vote_first = create_slashable_vote_data(1, 1, &spec); - let slashable_vote_second = create_slashable_vote_data(2, 2, &spec); - - assert_eq!( - slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec), - false - ); - } - - ssz_tests!(SlashableVoteData); - - fn create_slashable_vote_data( - slot_factor: u64, - justified_epoch: u64, - spec: &ChainSpec, - ) -> SlashableVoteData { - let mut rng = XorShiftRng::from_seed([42; 16]); - let mut slashable_vote = SlashableVoteData::random_for_test(&mut rng); - - slashable_vote.data.slot = Slot::new(slot_factor * spec.epoch_length); - slashable_vote.data.justified_epoch = Epoch::new(justified_epoch); - slashable_vote - } -} diff --git a/eth2/types/src/validator_registry_delta_block.rs b/eth2/types/src/validator_registry_delta_block.rs deleted file mode 100644 index e9a075052..000000000 --- a/eth2/types/src/validator_registry_delta_block.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::{test_utils::TestRandom, Hash256, Slot}; -use bls::PublicKey; -use rand::RngCore; -use serde_derive::Serialize; -use ssz_derive::{Decode, Encode, TreeHash}; -use test_random_derive::TestRandom; - -// The information gathered from the PoW chain validator registration function. -#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)] -pub struct ValidatorRegistryDeltaBlock { - pub latest_registry_delta_root: Hash256, - pub validator_index: u32, - pub pubkey: PublicKey, - pub slot: Slot, - pub flag: u64, -} - -impl Default for ValidatorRegistryDeltaBlock { - /// Yields a "default" `Validator`. Primarily used for testing. - fn default() -> Self { - Self { - latest_registry_delta_root: Hash256::zero(), - validator_index: std::u32::MAX, - pubkey: PublicKey::default(), - slot: Slot::from(std::u64::MAX), - flag: std::u64::MAX, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - ssz_tests!(ValidatorRegistryDeltaBlock); -} From 7f4af20212c4b07dbb4c682b163f9c99cf3144f5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 16 Mar 2019 19:14:49 +1100 Subject: [PATCH 088/154] Refactor shuffling generation --- eth2/types/src/attestation_duty.rs | 9 + eth2/types/src/beacon_state.rs | 459 ++++-------------- eth2/types/src/beacon_state/builder.rs | 6 +- eth2/types/src/beacon_state/epoch_cache.rs | 292 +++++++++-- eth2/types/src/beacon_state/tests.rs | 48 -- eth2/types/src/crosslink_committee.rs | 9 + eth2/types/src/epoch_cache.rs | 0 eth2/types/src/lib.rs | 12 +- eth2/types/src/relative_epoch.rs | 76 +++ .../testing_beacon_block_builder.rs | 12 +- .../testing_beacon_state_builder.rs | 17 +- 11 files changed, 476 insertions(+), 464 deletions(-) create mode 100644 eth2/types/src/attestation_duty.rs create mode 100644 eth2/types/src/crosslink_committee.rs create mode 100644 eth2/types/src/epoch_cache.rs create mode 100644 eth2/types/src/relative_epoch.rs diff --git a/eth2/types/src/attestation_duty.rs b/eth2/types/src/attestation_duty.rs new file mode 100644 index 000000000..f6e86d263 --- /dev/null +++ b/eth2/types/src/attestation_duty.rs @@ -0,0 +1,9 @@ +use crate::*; +use serde_derive::{Deserialize, Serialize}; + +#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] +pub struct AttestationDuty { + pub slot: Slot, + pub shard: Shard, + pub committee_index: usize, +} diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index ba9c5cd4d..32f8204e3 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,17 +1,14 @@ use self::epoch_cache::EpochCache; use crate::test_utils::TestRandom; use crate::{validator_registry::get_active_validator_indices, *}; -use helpers::*; -use honey_badger_split::SplitExt; use int_to_bytes::int_to_bytes32; -use log::{debug, error, trace}; +use log::{debug, trace}; use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{hash, SignedRoot}; +use ssz::{hash, SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash}; use std::collections::HashMap; -use swap_or_not_shuffle::shuffle_list; use test_random_derive::TestRandom; pub use builder::BeaconStateBuilder; @@ -22,22 +19,7 @@ pub mod helpers; mod pubkey_cache; mod tests; -pub type Committee = Vec; -pub type CrosslinkCommittees = Vec<(Committee, u64)>; -pub type Shard = u64; -pub type CommitteeIndex = u64; -pub type AttestationDuty = (Slot, Shard, CommitteeIndex); -pub type AttestationDutyMap = HashMap; -pub type ShardCommitteeIndexMap = HashMap; - -pub const CACHED_EPOCHS: usize = 3; - -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum RelativeEpoch { - Previous, - Current, - Next, -} +pub const CACHED_EPOCHS: usize = 4; #[derive(Debug, PartialEq)] pub enum Error { @@ -61,6 +43,7 @@ pub enum Error { cache_len: usize, registry_len: usize, }, + RelativeEpochError(RelativeEpochError), } macro_rules! safe_add_assign { @@ -212,13 +195,12 @@ impl BeaconState { EpochCache::default(), EpochCache::default(), EpochCache::default(), + EpochCache::default(), ], pubkey_cache: PubkeyCache::default(), } } - /* - /// Returns the `hash_tree_root` of the state. /// /// Spec v0.5.0 @@ -226,8 +208,6 @@ impl BeaconState { Hash256::from_slice(&self.hash_tree_root()[..]) } - */ - /// Build an epoch cache, unless it is has already been built. pub fn build_epoch_cache( &mut self, @@ -236,7 +216,8 @@ impl BeaconState { ) -> Result<(), Error> { let cache_index = self.cache_index(relative_epoch); - if self.caches[cache_index].initialized { + if self.caches[cache_index].initialized_epoch == Some(self.slot.epoch(spec.slots_per_epoch)) + { Ok(()) } else { self.force_build_epoch_cache(relative_epoch, spec) @@ -249,36 +230,13 @@ impl BeaconState { relative_epoch: RelativeEpoch, spec: &ChainSpec, ) -> Result<(), Error> { - let epoch = self.absolute_epoch(relative_epoch, spec); let cache_index = self.cache_index(relative_epoch); - self.caches[cache_index] = EpochCache::initialized(&self, epoch, spec)?; + self.caches[cache_index] = EpochCache::initialized(&self, relative_epoch, spec)?; Ok(()) } - /// Converts a `RelativeEpoch` into an `Epoch` with respect to the epoch of this state. - fn absolute_epoch(&self, relative_epoch: RelativeEpoch, spec: &ChainSpec) -> Epoch { - match relative_epoch { - RelativeEpoch::Previous => self.previous_epoch(spec), - RelativeEpoch::Current => self.current_epoch(spec), - RelativeEpoch::Next => self.next_epoch(spec), - } - } - - /// Converts an `Epoch` into a `RelativeEpoch` with respect to the epoch of this state. - /// - /// Returns an error if the given `epoch` not "previous", "current" or "next" compared to the - /// epoch of this tate. - fn relative_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Result { - match epoch { - e if e == self.current_epoch(spec) => Ok(RelativeEpoch::Current), - e if e == self.previous_epoch(spec) => Ok(RelativeEpoch::Previous), - e if e == self.next_epoch(spec) => Ok(RelativeEpoch::Next), - _ => Err(Error::EpochOutOfBounds), - } - } - /// Advances the cache for this state into the next epoch. /// /// This should be used if the `slot` of this state is advanced beyond an epoch boundary. @@ -305,9 +263,10 @@ impl BeaconState { /// Returns the index of `self.caches` for some `RelativeEpoch`. fn cache_index(&self, relative_epoch: RelativeEpoch) -> usize { let base_index = match relative_epoch { - RelativeEpoch::Current => 1, RelativeEpoch::Previous => 0, - RelativeEpoch::Next => 2, + RelativeEpoch::Current => 1, + RelativeEpoch::NextWithoutRegistryChange => 2, + RelativeEpoch::NextWithRegistryChange => 3, }; (base_index + self.cache_index_offset) % CACHED_EPOCHS @@ -315,10 +274,10 @@ impl BeaconState { /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. - fn cache(&self, relative_epoch: RelativeEpoch) -> Result<&EpochCache, Error> { + fn cache(&self, relative_epoch: RelativeEpoch, spec: &ChainSpec) -> Result<&EpochCache, Error> { let cache = &self.caches[self.cache_index(relative_epoch)]; - if cache.initialized { + if cache.initialized_epoch == Some(self.slot.epoch(spec.slots_per_epoch)) { Ok(cache) } else { Err(Error::EpochCacheUninitialized(relative_epoch)) @@ -367,7 +326,7 @@ impl BeaconState { /// The epoch corresponding to `self.slot`. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn current_epoch(&self, spec: &ChainSpec) -> Epoch { self.slot.epoch(spec.slots_per_epoch) } @@ -376,58 +335,16 @@ impl BeaconState { /// /// If the current epoch is the genesis epoch, the genesis_epoch is returned. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch { - let current_epoch = self.current_epoch(&spec); - std::cmp::max(current_epoch - 1, spec.genesis_epoch) + self.current_epoch(&spec) - 1 } /// The epoch following `self.current_epoch()`. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn next_epoch(&self, spec: &ChainSpec) -> Epoch { - self.current_epoch(spec).saturating_add(1_u64) - } - - /// The first slot of the epoch corresponding to `self.slot`. - /// - /// Spec v0.4.0 - pub fn current_epoch_start_slot(&self, spec: &ChainSpec) -> Slot { - self.current_epoch(spec).start_slot(spec.slots_per_epoch) - } - - /// The first slot of the epoch preceding the one corresponding to `self.slot`. - /// - /// Spec v0.4.0 - pub fn previous_epoch_start_slot(&self, spec: &ChainSpec) -> Slot { - self.previous_epoch(spec).start_slot(spec.slots_per_epoch) - } - - /// Return the number of committees in the previous epoch. - /// - /// Spec v0.4.0 - fn get_previous_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { - let previous_active_validators = - get_active_validator_indices(&self.validator_registry, self.previous_shuffling_epoch); - spec.get_epoch_committee_count(previous_active_validators.len()) - } - - /// Return the number of committees in the current epoch. - /// - /// Spec v0.4.0 - pub fn get_current_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { - let current_active_validators = - get_active_validator_indices(&self.validator_registry, self.current_shuffling_epoch); - spec.get_epoch_committee_count(current_active_validators.len()) - } - - /// Return the number of committees in the next epoch. - /// - /// Spec v0.4.0 - pub fn get_next_epoch_committee_count(&self, spec: &ChainSpec) -> u64 { - let next_active_validators = - get_active_validator_indices(&self.validator_registry, self.next_epoch(spec)); - spec.get_epoch_committee_count(next_active_validators.len()) + self.current_epoch(spec) + 1 } /// Returns the crosslink committees for some slot. @@ -438,15 +355,14 @@ impl BeaconState { pub fn get_crosslink_committees_at_slot( &self, slot: Slot, + relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<&CrosslinkCommittees, Error> { - let epoch = slot.epoch(spec.slots_per_epoch); - let relative_epoch = self.relative_epoch(epoch, spec)?; - let cache = self.cache(relative_epoch)?; + ) -> Result<&Vec, Error> { + let cache = self.cache(relative_epoch, spec)?; - let slot_offset = slot - epoch.start_slot(spec.slots_per_epoch); - - Ok(&cache.committees[slot_offset.as_usize()]) + Ok(cache + .get_crosslink_committees_at_slot(slot, spec) + .ok_or_else(|| Error::SlotOutOfBounds)?) } /// Return the block root at a recent `slot`. @@ -525,8 +441,13 @@ impl BeaconState { /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. /// /// Spec v0.4.0 - pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { - let committees = self.get_crosslink_committees_at_slot(slot, spec)?; + pub fn get_beacon_proposer_index( + &self, + slot: Slot, + relative_epoch: RelativeEpoch, + spec: &ChainSpec, + ) -> Result { + let committees = self.get_crosslink_committees_at_slot(slot, relative_epoch, spec)?; trace!( "get_beacon_proposer_index: slot: {}, committees_count: {}", slot, @@ -535,71 +456,28 @@ impl BeaconState { committees .first() .ok_or(Error::InsufficientValidators) - .and_then(|(first_committee, _)| { + .and_then(|first| { let index = slot .as_usize() - .checked_rem(first_committee.len()) + .checked_rem(first.committee.len()) .ok_or(Error::InsufficientValidators)?; - Ok(first_committee[index]) + Ok(first.committee[index]) }) } - /// Returns the list of validator indices which participiated in the attestation. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.4.0 - pub fn get_attestation_participants( - &self, - attestation_data: &AttestationData, - bitfield: &Bitfield, - spec: &ChainSpec, - ) -> Result, Error> { - let epoch = attestation_data.slot.epoch(spec.slots_per_epoch); - let relative_epoch = self.relative_epoch(epoch, spec)?; - let cache = self.cache(relative_epoch)?; - - let (committee_slot_index, committee_index) = cache - .shard_committee_indices - .get(attestation_data.shard as usize) - .ok_or_else(|| Error::ShardOutOfBounds)?; - let (committee, shard) = &cache.committees[*committee_slot_index][*committee_index]; - - assert_eq!(*shard, attestation_data.shard, "Bad epoch cache build."); - - if !verify_bitfield_length(&bitfield, committee.len()) { - return Err(Error::InvalidBitfield); - } - - let mut participants = Vec::with_capacity(committee.len()); - for (i, validator_index) in committee.iter().enumerate() { - match bitfield.get(i) { - Ok(bit) if bit == true => participants.push(*validator_index), - _ => {} - } - } - participants.shrink_to_fit(); - - Ok(participants) - } - /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. /// /// Spec v0.4.0 - pub fn get_effective_balance(&self, validator_index: usize, spec: &ChainSpec) -> u64 { - std::cmp::min( - self.validator_balances[validator_index], - spec.max_deposit_amount, - ) - } - - /// Return the combined effective balance of an array of validators. - /// - /// Spec v0.4.0 - pub fn get_total_balance(&self, validator_indices: &[usize], spec: &ChainSpec) -> u64 { - validator_indices - .iter() - .fold(0, |acc, i| acc + self.get_effective_balance(*i, spec)) + pub fn get_effective_balance( + &self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let balance = self + .validator_balances + .get(validator_index) + .ok_or_else(|| Error::UnknownValidator)?; + Ok(std::cmp::min(*balance, spec.max_deposit_amount)) } /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. @@ -767,11 +645,15 @@ impl BeaconState { self.exit_validator(validator_index, spec); - self.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length] += - self.get_effective_balance(validator_index, spec); + let effective_balance = self.get_effective_balance(validator_index, spec)?; - let whistleblower_index = self.get_beacon_proposer_index(self.slot, spec)?; - let whistleblower_reward = self.get_effective_balance(validator_index, spec); + self.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length] += + effective_balance; + + let whistleblower_index = + self.get_beacon_proposer_index(self.slot, RelativeEpoch::Current, spec)?; + + let whistleblower_reward = effective_balance; safe_add_assign!( self.validator_balances[whistleblower_index as usize], whistleblower_reward @@ -801,166 +683,6 @@ impl BeaconState { self.current_epoch(spec) + spec.min_validator_withdrawability_delay; } - /// Returns the crosslink committees for some slot. - /// - /// Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.4.0 - pub(crate) fn get_shuffling_for_slot( - &self, - slot: Slot, - registry_change: bool, - spec: &ChainSpec, - ) -> Result>, Error> { - let (_committees_per_epoch, seed, shuffling_epoch, _shuffling_start_shard) = - self.get_committee_params_at_slot(slot, registry_change, spec)?; - - self.get_shuffling(seed, shuffling_epoch, spec) - } - - /// Shuffle ``validators`` into crosslink committees seeded by ``seed`` and ``epoch``. - /// - /// Return a list of ``committees_per_epoch`` committees where each - /// committee is itself a list of validator indices. - /// - /// Spec v0.4.0 - pub(crate) fn get_shuffling( - &self, - seed: Hash256, - epoch: Epoch, - spec: &ChainSpec, - ) -> Result>, Error> { - let active_validator_indices = - get_active_validator_indices(&self.validator_registry, epoch); - if active_validator_indices.is_empty() { - error!("get_shuffling: no validators."); - return Err(Error::InsufficientValidators); - } - - debug!("Shuffling {} validators...", active_validator_indices.len()); - - let committees_per_epoch = spec.get_epoch_committee_count(active_validator_indices.len()); - - trace!( - "get_shuffling: active_validator_indices.len() == {}, committees_per_epoch: {}", - active_validator_indices.len(), - committees_per_epoch - ); - - let active_validator_indices: Vec = active_validator_indices.to_vec(); - - let shuffled_active_validator_indices = shuffle_list( - active_validator_indices, - spec.shuffle_round_count, - &seed[..], - true, - ) - .ok_or_else(|| Error::UnableToShuffle)?; - - Ok(shuffled_active_validator_indices - .honey_badger_split(committees_per_epoch as usize) - .map(|slice: &[usize]| slice.to_vec()) - .collect()) - } - - /// Returns the following params for the given slot: - /// - /// - epoch committee count - /// - epoch seed - /// - calculation epoch - /// - start shard - /// - /// In the spec, this functionality is included in the `get_crosslink_committees_at_slot(..)` - /// function. It is separated here to allow the division of shuffling and committee building, - /// as is required for efficient operations. - /// - /// Spec v0.4.0 - pub(crate) fn get_committee_params_at_slot( - &self, - slot: Slot, - registry_change: bool, - spec: &ChainSpec, - ) -> Result<(u64, Hash256, Epoch, u64), Error> { - let epoch = slot.epoch(spec.slots_per_epoch); - let current_epoch = self.current_epoch(spec); - let previous_epoch = self.previous_epoch(spec); - let next_epoch = self.next_epoch(spec); - - if epoch == current_epoch { - Ok(( - self.get_current_epoch_committee_count(spec), - self.current_shuffling_seed, - self.current_shuffling_epoch, - self.current_shuffling_start_shard, - )) - } else if epoch == previous_epoch { - Ok(( - self.get_previous_epoch_committee_count(spec), - self.previous_shuffling_seed, - self.previous_shuffling_epoch, - self.previous_shuffling_start_shard, - )) - } else if epoch == next_epoch { - let current_committees_per_epoch = self.get_current_epoch_committee_count(spec); - let epochs_since_last_registry_update = - current_epoch - self.validator_registry_update_epoch; - let (seed, shuffling_start_shard) = if registry_change { - let next_seed = self.generate_seed(next_epoch, spec)?; - ( - next_seed, - (self.current_shuffling_start_shard + current_committees_per_epoch) - % spec.shard_count, - ) - } else if (epochs_since_last_registry_update > 1) - & epochs_since_last_registry_update.is_power_of_two() - { - let next_seed = self.generate_seed(next_epoch, spec)?; - (next_seed, self.current_shuffling_start_shard) - } else { - ( - self.current_shuffling_seed, - self.current_shuffling_start_shard, - ) - }; - Ok(( - self.get_next_epoch_committee_count(spec), - seed, - next_epoch, - shuffling_start_shard, - )) - } else { - Err(Error::EpochOutOfBounds) - } - } - - /// Return the ordered list of shards tuples for the `slot`. - /// - /// Note: There are two possible shufflings for crosslink committees for a - /// `slot` in the next epoch: with and without a `registry_change` - /// - /// Spec v0.4.0 - pub(crate) fn get_shards_for_slot( - &self, - slot: Slot, - registry_change: bool, - spec: &ChainSpec, - ) -> Result, Error> { - let (committees_per_epoch, _seed, _shuffling_epoch, shuffling_start_shard) = - self.get_committee_params_at_slot(slot, registry_change, spec)?; - - let offset = slot.as_u64() % spec.slots_per_epoch; - let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; - let slot_start_shard = - (shuffling_start_shard + committees_per_slot * offset) % spec.shard_count; - - let mut shards_at_slot = vec![]; - for i in 0..committees_per_slot { - shards_at_slot.push((slot_start_shard + i) % spec.shard_count) - } - - Ok(shards_at_slot) - } - /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an /// attestation. /// @@ -969,14 +691,14 @@ impl BeaconState { /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// /// Spec v0.4.0 - pub fn attestation_slot_and_shard_for_validator( + pub fn get_attestation_duties( &self, validator_index: usize, - _spec: &ChainSpec, - ) -> Result, Error> { - let cache = self.cache(RelativeEpoch::Current)?; + spec: &ChainSpec, + ) -> Result<&Option, Error> { + let cache = self.cache(RelativeEpoch::Current, spec)?; - Ok(*cache + Ok(cache .attestation_duties .get(validator_index) .ok_or_else(|| Error::UnknownValidator)?) @@ -985,11 +707,11 @@ impl BeaconState { /// Process the slashings. /// /// Spec v0.4.0 - pub fn process_slashings(&mut self, spec: &ChainSpec) { + pub fn process_slashings(&mut self, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = self.current_epoch(spec); let active_validator_indices = get_active_validator_indices(&self.validator_registry, current_epoch); - let total_balance = self.get_total_balance(&active_validator_indices[..], spec); + let total_balance = self.get_total_balance(&active_validator_indices[..], spec)?; for (index, validator) in self.validator_registry.iter().enumerate() { if validator.slashed @@ -1003,16 +725,19 @@ impl BeaconState { [(epoch_index + 1) % spec.latest_slashed_exit_length]; let total_at_end = self.latest_slashed_balances[epoch_index]; let total_penalities = total_at_end.saturating_sub(total_at_start); + + let effective_balance = self.get_effective_balance(index, spec)?; let penalty = std::cmp::max( - self.get_effective_balance(index, spec) - * std::cmp::min(total_penalities * 3, total_balance) + effective_balance * std::cmp::min(total_penalities * 3, total_balance) / total_balance, - self.get_effective_balance(index, spec) / spec.min_penalty_quotient, + effective_balance / spec.min_penalty_quotient, ); safe_sub_assign!(self.validator_balances[index], penalty); } } + + Ok(()) } /// Process the exit queue. @@ -1047,11 +772,11 @@ impl BeaconState { /// Update validator registry, activating/exiting validators if possible. /// /// Spec v0.4.0 - pub fn update_validator_registry(&mut self, spec: &ChainSpec) { + pub fn update_validator_registry(&mut self, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = self.current_epoch(spec); let active_validator_indices = get_active_validator_indices(&self.validator_registry, current_epoch); - let total_balance = self.get_total_balance(&active_validator_indices[..], spec); + let total_balance = self.get_total_balance(&active_validator_indices[..], spec)?; let max_balance_churn = std::cmp::max( spec.max_deposit_amount, @@ -1065,7 +790,7 @@ impl BeaconState { if (validator.activation_epoch == spec.far_future_epoch) & (self.validator_balances[index] == spec.max_deposit_amount) { - balance_churn += self.get_effective_balance(index, spec); + balance_churn += self.get_effective_balance(index, spec)?; if balance_churn > max_balance_churn { break; } @@ -1078,7 +803,7 @@ impl BeaconState { let validator = &self.validator_registry[index]; if (validator.exit_epoch == spec.far_future_epoch) & (validator.initiated_exit) { - balance_churn += self.get_effective_balance(index, spec); + balance_churn += self.get_effective_balance(index, spec)?; if balance_churn > max_balance_churn { break; } @@ -1088,6 +813,8 @@ impl BeaconState { } self.validator_registry_update_epoch = current_epoch; + + Ok(()) } /// Iterate through the validator registry and eject active validators with balance below @@ -1115,12 +842,13 @@ impl BeaconState { epochs_since_finality: Epoch, base_reward_quotient: u64, spec: &ChainSpec, - ) -> u64 { - let effective_balance = self.get_effective_balance(validator_index, spec); - self.base_reward(validator_index, base_reward_quotient, spec) + ) -> Result { + let effective_balance = self.get_effective_balance(validator_index, spec)?; + let base_reward = self.base_reward(validator_index, base_reward_quotient, spec)?; + Ok(base_reward + effective_balance * epochs_since_finality.as_u64() / spec.inactivity_penalty_quotient - / 2 + / 2) } /// Returns the base reward for some validator. @@ -1133,30 +861,27 @@ impl BeaconState { validator_index: usize, base_reward_quotient: u64, spec: &ChainSpec, - ) -> u64 { - self.get_effective_balance(validator_index, spec) / base_reward_quotient / 5 + ) -> Result { + Ok(self.get_effective_balance(validator_index, spec)? / base_reward_quotient / 5) } - /// Returns the union of all participants in the provided attestations + /// Return the combined effective balance of an array of validators. /// /// Spec v0.4.0 - pub fn get_attestation_participants_union( + pub fn get_total_balance( &self, - attestations: &[&PendingAttestation], + validator_indices: &[usize], spec: &ChainSpec, - ) -> Result, Error> { - let mut all_participants = attestations - .iter() - .try_fold::<_, _, Result, Error>>(vec![], |mut acc, a| { - acc.append(&mut self.get_attestation_participants( - &a.data, - &a.aggregation_bitfield, - spec, - )?); - Ok(acc) - })?; - all_participants.sort_unstable(); - all_participants.dedup(); - Ok(all_participants) + ) -> Result { + validator_indices.iter().try_fold(0_u64, |acc, i| { + self.get_effective_balance(*i, spec) + .and_then(|bal| Ok(bal + acc)) + }) + } +} + +impl From for Error { + fn from(e: RelativeEpochError) -> Error { + Error::RelativeEpochError(e) } } diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs index 22ca3e622..780ec9b8b 100644 --- a/eth2/types/src/beacon_state/builder.rs +++ b/eth2/types/src/beacon_state/builder.rs @@ -43,12 +43,14 @@ impl BeaconStateBuilder { self.state.deposit_index = initial_validator_deposits.len() as u64; } - fn activate_genesis_validators(&mut self, spec: &ChainSpec) { + fn activate_genesis_validators(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { for validator_index in 0..self.state.validator_registry.len() { - if self.state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount { + if self.state.get_effective_balance(validator_index, spec)? >= spec.max_deposit_amount { self.state.activate_validator(validator_index, true, spec); } } + + Ok(()) } /// Instantiate the validator registry from a YAML file. diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index ddcca0a9a..6312ea5a5 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -1,69 +1,283 @@ -use super::{AttestationDuty, BeaconState, CrosslinkCommittees, Error}; -use crate::{ChainSpec, Epoch}; +use super::{BeaconState, Error}; +use crate::*; +use honey_badger_split::SplitExt; use serde_derive::{Deserialize, Serialize}; +use swap_or_not_shuffle::shuffle_list; #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] pub struct EpochCache { - /// True if this cache has been initialized. - pub initialized: bool, - /// The crosslink committees for an epoch. - pub committees: Vec, + /// `Some(epoch)` if the cache is initialized, where `epoch` is the cache it holds. + pub initialized_epoch: Option, + /// All crosslink committees for an epoch. + pub epoch_crosslink_committees: EpochCrosslinkCommittees, /// Maps validator index to a slot, shard and committee index for attestation. pub attestation_duties: Vec>, /// Maps a shard to an index of `self.committees`. - pub shard_committee_indices: Vec<(usize, usize)>, + pub shard_committee_indices: Vec<(Slot, usize)>, } impl EpochCache { /// Return a new, fully initialized cache. pub fn initialized( state: &BeaconState, - epoch: Epoch, + relative_epoch: RelativeEpoch, spec: &ChainSpec, ) -> Result { - let mut epoch_committees: Vec = - Vec::with_capacity(spec.slots_per_epoch as usize); + let epoch = relative_epoch.into_epoch(state.slot.epoch(spec.slots_per_epoch)); + let active_validator_indices = + get_active_validator_indices(&state.validator_registry, epoch); + + let builder = match relative_epoch { + RelativeEpoch::Previous => EpochCrosslinkCommitteesBuilder::for_previous_epoch( + state, + active_validator_indices, + spec, + ), + RelativeEpoch::Current => EpochCrosslinkCommitteesBuilder::for_current_epoch( + state, + active_validator_indices, + spec, + ), + RelativeEpoch::NextWithRegistryChange => { + EpochCrosslinkCommitteesBuilder::for_next_epoch( + state, + active_validator_indices, + true, + spec, + )? + } + RelativeEpoch::NextWithoutRegistryChange => { + EpochCrosslinkCommitteesBuilder::for_next_epoch( + state, + active_validator_indices, + false, + spec, + )? + } + }; + let epoch_crosslink_committees = builder.build(spec)?; + + // Loop through all the validators in the committees and create the following maps: + // + // 1. `attestation_duties`: maps `ValidatorIndex` to `AttestationDuty`. + // 2. `shard_committee_indices`: maps `Shard` into a `CrosslinkCommittee` in + // `EpochCrosslinkCommittees`. let mut attestation_duties = vec![None; state.validator_registry.len()]; + let mut shard_committee_indices = vec![(Slot::default(), 0); spec.shard_count as usize]; + for (i, slot_committees) in epoch_crosslink_committees + .crosslink_committees + .iter() + .enumerate() + { + let slot = epoch.start_slot(spec.slots_per_epoch) + i as u64; - let mut shard_committee_indices = vec![(0, 0); spec.shard_count as usize]; + for (j, crosslink_committee) in slot_committees.iter().enumerate() { + let shard = crosslink_committee.shard; - let mut shuffling = - state.get_shuffling_for_slot(epoch.start_slot(spec.slots_per_epoch), false, spec)?; + shard_committee_indices[shard as usize] = (slot, j); - for (epoch_committees_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() { - let mut slot_committees: Vec<(Vec, u64)> = vec![]; - - let shards = state.get_shards_for_slot(slot, false, spec)?; - for shard in shards { - let committee = shuffling.remove(0); - slot_committees.push((committee, shard)); - } - - for (slot_committees_index, (committee, shard)) in slot_committees.iter().enumerate() { - if committee.is_empty() { - return Err(Error::InsufficientValidators); - } - - // Store the slot and committee index for this shard. - shard_committee_indices[*shard as usize] = - (epoch_committees_index, slot_committees_index); - - // For each validator, store their attestation duties. - for (committee_index, validator_index) in committee.iter().enumerate() { - attestation_duties[*validator_index] = - Some((slot, *shard, committee_index as u64)) + for (k, validator_index) in crosslink_committee.committee.iter().enumerate() { + let attestation_duty = AttestationDuty { + slot, + shard, + committee_index: k, + }; + attestation_duties[*validator_index] = Some(attestation_duty) } } - - epoch_committees.push(slot_committees) } Ok(EpochCache { - initialized: true, - committees: epoch_committees, + initialized_epoch: Some(epoch), + epoch_crosslink_committees, attestation_duties, shard_committee_indices, }) } + + pub fn get_crosslink_committees_at_slot( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Option<&Vec> { + self.epoch_crosslink_committees + .get_crosslink_committees_at_slot(slot, spec) + } + + pub fn get_crosslink_committee_for_shard( + &self, + shard: Shard, + spec: &ChainSpec, + ) -> Option<&CrosslinkCommittee> { + let (slot, committee) = self.shard_committee_indices.get(shard as usize)?; + let slot_committees = self.get_crosslink_committees_at_slot(*slot, spec)?; + slot_committees.get(*committee) + } +} + +pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { + let mut active = Vec::with_capacity(validators.len()); + + for (index, validator) in validators.iter().enumerate() { + if validator.is_active_at(epoch) { + active.push(index) + } + } + + active.shrink_to_fit(); + + active +} + +#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] +pub struct EpochCrosslinkCommittees { + epoch: Epoch, + pub crosslink_committees: Vec>, +} + +impl EpochCrosslinkCommittees { + fn new(epoch: Epoch, spec: &ChainSpec) -> Self { + Self { + epoch, + crosslink_committees: vec![vec![]; spec.slots_per_epoch as usize], + } + } + + fn get_crosslink_committees_at_slot( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Option<&Vec> { + let epoch_start_slot = self.epoch.start_slot(spec.slots_per_epoch); + let epoch_end_slot = self.epoch.end_slot(spec.slots_per_epoch); + + if (epoch_start_slot < slot) && (slot <= epoch_end_slot) { + let index = slot - epoch_start_slot; + self.crosslink_committees.get(index.as_usize()) + } else { + None + } + } +} + +pub struct EpochCrosslinkCommitteesBuilder { + epoch: Epoch, + shuffling_start_shard: Shard, + shuffling_seed: Hash256, + active_validator_indices: Vec, + committees_per_epoch: u64, +} + +impl EpochCrosslinkCommitteesBuilder { + pub fn for_previous_epoch( + state: &BeaconState, + active_validator_indices: Vec, + spec: &ChainSpec, + ) -> Self { + Self { + epoch: state.previous_epoch(spec), + shuffling_start_shard: state.previous_shuffling_start_shard, + shuffling_seed: state.previous_shuffling_seed, + committees_per_epoch: spec.get_epoch_committee_count(active_validator_indices.len()), + active_validator_indices, + } + } + + pub fn for_current_epoch( + state: &BeaconState, + active_validator_indices: Vec, + spec: &ChainSpec, + ) -> Self { + Self { + epoch: state.current_epoch(spec), + shuffling_start_shard: state.current_shuffling_start_shard, + shuffling_seed: state.current_shuffling_seed, + committees_per_epoch: spec.get_epoch_committee_count(active_validator_indices.len()), + active_validator_indices, + } + } + + pub fn for_next_epoch( + state: &BeaconState, + active_validator_indices: Vec, + registry_change: bool, + spec: &ChainSpec, + ) -> Result { + let current_epoch = state.current_epoch(spec); + let next_epoch = state.next_epoch(spec); + let committees_per_epoch = spec.get_epoch_committee_count(active_validator_indices.len()); + + let epochs_since_last_registry_update = + current_epoch - state.validator_registry_update_epoch; + + let (seed, shuffling_start_shard) = if registry_change { + let next_seed = state.generate_seed(next_epoch, spec)?; + ( + next_seed, + (state.current_shuffling_start_shard + committees_per_epoch) % spec.shard_count, + ) + } else if (epochs_since_last_registry_update > 1) + & epochs_since_last_registry_update.is_power_of_two() + { + let next_seed = state.generate_seed(next_epoch, spec)?; + (next_seed, state.current_shuffling_start_shard) + } else { + ( + state.current_shuffling_seed, + state.current_shuffling_start_shard, + ) + }; + + Ok(Self { + epoch: state.next_epoch(spec), + shuffling_start_shard, + shuffling_seed: seed, + active_validator_indices, + committees_per_epoch, + }) + } + + pub fn build(self, spec: &ChainSpec) -> Result { + if self.active_validator_indices.is_empty() { + return Err(Error::InsufficientValidators); + } + + let shuffled_active_validator_indices = shuffle_list( + self.active_validator_indices, + spec.shuffle_round_count, + &self.shuffling_seed[..], + true, + ) + .ok_or_else(|| Error::UnableToShuffle)?; + + let mut committees: Vec> = shuffled_active_validator_indices + .honey_badger_split(self.committees_per_epoch as usize) + .map(|slice: &[usize]| slice.to_vec()) + .collect(); + + let mut epoch_crosslink_committees = EpochCrosslinkCommittees::new(self.epoch, spec); + let mut shard = self.shuffling_start_shard; + + let committees_per_slot = (self.committees_per_epoch / spec.slots_per_epoch) as usize; + + for i in 0..spec.slots_per_epoch as usize { + for j in (0..committees.len()) + .into_iter() + .skip(i * committees_per_slot) + .take(committees_per_slot) + { + let crosslink_committee = CrosslinkCommittee { + shard, + committee: committees.remove(j), + }; + epoch_crosslink_committees.crosslink_committees[i].push(crosslink_committee); + + shard += 1; + shard %= spec.shard_count; + } + } + + Ok(epoch_crosslink_committees) + } } diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 1e1a555fd..6c10ebe86 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -1,53 +1,5 @@ #![cfg(test)] use super::*; -use crate::test_utils::TestingBeaconStateBuilder; -use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use crate::{BeaconState, ChainSpec}; - -/// Tests that `get_attestation_participants` is consistent with the result of -/// get_crosslink_committees_at_slot` with a full bitfield. -#[test] -pub fn get_attestation_participants_consistency() { - let mut rng = XorShiftRng::from_seed([42; 16]); - - let spec = ChainSpec::few_validators(); - let builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); - let (mut state, _keypairs) = builder.build(); - - state - .build_epoch_cache(RelativeEpoch::Previous, &spec) - .unwrap(); - state - .build_epoch_cache(RelativeEpoch::Current, &spec) - .unwrap(); - state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap(); - - for slot in state - .slot - .epoch(spec.slots_per_epoch) - .slot_iter(spec.slots_per_epoch) - { - let committees = state.get_crosslink_committees_at_slot(slot, &spec).unwrap(); - - for (committee, shard) in committees { - let mut attestation_data = AttestationData::random_for_test(&mut rng); - attestation_data.slot = slot; - attestation_data.shard = *shard; - - let mut bitfield = Bitfield::new(); - for (i, _) in committee.iter().enumerate() { - bitfield.set(i, true); - } - - assert_eq!( - state - .get_attestation_participants(&attestation_data, &bitfield, &spec) - .unwrap(), - *committee - ); - } - } -} ssz_tests!(BeaconState); diff --git a/eth2/types/src/crosslink_committee.rs b/eth2/types/src/crosslink_committee.rs new file mode 100644 index 000000000..06a6562fc --- /dev/null +++ b/eth2/types/src/crosslink_committee.rs @@ -0,0 +1,9 @@ +use crate::*; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode, TreeHash}; + +#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, Decode, Encode, TreeHash)] +pub struct CrosslinkCommittee { + pub shard: Shard, + pub committee: Vec, +} diff --git a/eth2/types/src/epoch_cache.rs b/eth2/types/src/epoch_cache.rs new file mode 100644 index 000000000..e69de29bb diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index c38fa8031..05f8254d5 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -6,6 +6,7 @@ pub mod test_utils; pub mod attestation; pub mod attestation_data; pub mod attestation_data_and_custody_bit; +pub mod attestation_duty; pub mod attester_slashing; pub mod beacon_block; pub mod beacon_block_body; @@ -13,6 +14,7 @@ pub mod beacon_block_header; pub mod beacon_state; pub mod chain_spec; pub mod crosslink; +pub mod crosslink_committee; pub mod deposit; pub mod deposit_data; pub mod deposit_input; @@ -28,6 +30,7 @@ pub mod transfer; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; +pub mod relative_epoch; pub mod slot_epoch; pub mod slot_height; pub mod validator; @@ -39,13 +42,15 @@ use std::collections::HashMap; pub use crate::attestation::Attestation; pub use crate::attestation_data::AttestationData; pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; +pub use crate::attestation_duty::AttestationDuty; pub use crate::attester_slashing::AttesterSlashing; pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_header::BeaconBlockHeader; -pub use crate::beacon_state::{BeaconState, Error as BeaconStateError, RelativeEpoch}; +pub use crate::beacon_state::{BeaconState, Error as BeaconStateError}; pub use crate::chain_spec::{ChainSpec, Domain}; pub use crate::crosslink::Crosslink; +pub use crate::crosslink_committee::CrosslinkCommittee; pub use crate::deposit::Deposit; pub use crate::deposit_data::DepositData; pub use crate::deposit_input::DepositInput; @@ -56,6 +61,7 @@ pub use crate::free_attestation::FreeAttestation; pub use crate::historical_batch::HistoricalBatch; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposer_slashing::ProposerSlashing; +pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::slashable_attestation::SlashableAttestation; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_height::SlotHeight; @@ -63,6 +69,10 @@ pub use crate::transfer::Transfer; pub use crate::validator::Validator; pub use crate::voluntary_exit::VoluntaryExit; +pub type Shard = u64; +pub type Committee = Vec; +pub type CrosslinkCommittees = Vec<(Committee, u64)>; + pub type Hash256 = H256; pub type Address = H160; pub type EthBalance = U256; diff --git a/eth2/types/src/relative_epoch.rs b/eth2/types/src/relative_epoch.rs new file mode 100644 index 000000000..943936605 --- /dev/null +++ b/eth2/types/src/relative_epoch.rs @@ -0,0 +1,76 @@ +use crate::*; + +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum Error { + EpochTooLow { base: Epoch, other: Epoch }, + EpochTooHigh { base: Epoch, other: Epoch }, + AmbiguiousNextEpoch, +} + +/// Defines the epochs relative to some epoch. Most useful when referring to the committees prior +/// to and following some epoch. +/// +/// Spec v0.5.0 +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum RelativeEpoch { + /// The prior epoch. + Previous, + /// The current epoch. + Current, + /// The next epoch if there _is_ a validator registry update. + /// + /// If the validator registry is updated during an epoch transition, a new shuffling seed is + /// generated, this changes the attestation and proposal roles. + NextWithRegistryChange, + /// The next epoch if there _is not_ a validator registry update. + /// + /// If the validator registry _is not_ updated during an epoch transition, the shuffling stays + /// the same. + NextWithoutRegistryChange, +} + +impl RelativeEpoch { + /// Returns the `epoch` that `self` refers to, with respect to the `base` epoch. + /// + /// Spec v0.5.0 + pub fn into_epoch(&self, base: Epoch) -> Epoch { + match self { + RelativeEpoch::Previous => base - 1, + RelativeEpoch::Current => base, + RelativeEpoch::NextWithoutRegistryChange => base + 1, + RelativeEpoch::NextWithRegistryChange => base + 1, + } + } + + /// Converts the `other` epoch into a `RelativeEpoch`, with respect to `base` + /// + /// ## Errors + /// Returns an error when: + /// - `EpochTooLow` when `other` is more than 1 prior to `base`. + /// - `EpochTooHigh` when `other` is more than 1 after `base`. + /// - `AmbiguiousNextEpoch` whenever `other` is one after `base`, because it's unknowable if + /// there will be a registry change. + /// + /// Spec v0.5.0 + pub fn from_epoch(base: Epoch, other: Epoch) -> Result { + if other == base - 1 { + Ok(RelativeEpoch::Previous) + } else if other == base { + Ok(RelativeEpoch::Current) + } else if other == base + 1 { + Err(Error::AmbiguiousNextEpoch) + } else if other < base { + Err(Error::EpochTooLow { base, other }) + } else { + Err(Error::EpochTooHigh { base, other }) + } + } + + /// Convenience function for `Self::from_epoch` where both slots are converted into epochs. + pub fn from_slot(base: Slot, other: Slot, spec: &ChainSpec) -> Result { + Self::from_epoch( + base.epoch(spec.slots_per_epoch), + other.epoch(spec.slots_per_epoch), + ) + } +} diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index 7fb3d8e09..402bd79d6 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -109,12 +109,20 @@ impl TestingBeaconBlockBuilder { break; } - for (committee, shard) in state.get_crosslink_committees_at_slot(slot, spec)? { + let relative_epoch = RelativeEpoch::from_slot(state.slot, slot, spec).unwrap(); + for crosslink_committee in + state.get_crosslink_committees_at_slot(slot, relative_epoch, spec)? + { if attestations_added >= num_attestations { break; } - committees.push((slot, committee.clone(), committee.clone(), *shard)); + committees.push(( + slot, + crosslink_committee.committee.clone(), + crosslink_committee.committee.clone(), + crosslink_committee.shard, + )); attestations_added += 1; } diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 8ef4f76ce..9e613f0e9 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -159,7 +159,8 @@ impl TestingBeaconStateBuilder { state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; state.build_epoch_cache(RelativeEpoch::Current, &spec)?; - state.build_epoch_cache(RelativeEpoch::Next, &spec)?; + state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)?; + state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)?; state.update_pubkey_cache()?; @@ -222,15 +223,21 @@ impl TestingBeaconStateBuilder { for slot in first_slot..last_slot + 1 { let slot = Slot::from(slot); + let relative_epoch = RelativeEpoch::from_slot(state.slot, slot, spec).unwrap(); let committees = state - .get_crosslink_committees_at_slot(slot, spec) + .get_crosslink_committees_at_slot(slot, relative_epoch, spec) .unwrap() .clone(); - for (committee, shard) in committees { - let mut builder = TestingPendingAttestationBuilder::new(state, shard, slot, spec); + for crosslink_committee in committees { + let mut builder = TestingPendingAttestationBuilder::new( + state, + crosslink_committee.shard, + slot, + spec, + ); // The entire committee should have signed the pending attestation. - let signers = vec![true; committee.len()]; + let signers = vec![true; crosslink_committee.committee.len()]; builder.add_committee_participation(signers); let attestation = builder.build(); From 6bd2055a0ac2f15eb47ce837fa56e01f9e5e161a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 12:25:37 +1100 Subject: [PATCH 089/154] Update block processing to v0.5.0 --- .../src/per_block_processing.rs | 131 +++++------- .../src/per_block_processing/errors.rs | 86 ++++---- .../validate_attestation.rs | 198 ++++++++++-------- .../verify_attester_slashing.rs | 16 +- .../per_block_processing/verify_deposit.rs | 33 ++- .../src/per_block_processing/verify_exit.rs | 30 ++- .../verify_proposer_slashing.rs | 50 ++--- .../per_block_processing/verify_transfer.rs | 74 ++++--- .../src/per_slot_processing.rs | 22 +- eth2/types/src/beacon_state.rs | 85 ++++++-- eth2/types/src/chain_spec.rs | 2 +- eth2/types/src/proposer_slashing.rs | 4 +- .../testing_proposer_slashing_builder.rs | 18 +- 13 files changed, 413 insertions(+), 336 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 13a47836b..377f92e8b 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,17 +1,14 @@ use self::verify_proposer_slashing::verify_proposer_slashing; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; -use hashing::hash; use rayon::prelude::*; -use ssz::{ssz_encode, SignedRoot, TreeHash}; +use ssz::{SignedRoot, TreeHash}; use types::*; pub use self::verify_attester_slashing::{ gather_attester_slashing_indices, verify_attester_slashing, }; pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; -pub use verify_deposit::{ - build_public_key_hashmap, get_existing_validator_index, verify_deposit, verify_deposit_index, -}; +pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index}; pub use verify_exit::verify_exit; pub use verify_slashable_attestation::verify_slashable_attestation; pub use verify_transfer::{execute_transfer, verify_transfer}; @@ -72,8 +69,7 @@ fn per_block_processing_signature_optional( should_verify_block_signature: bool, spec: &ChainSpec, ) -> Result<(), Error> { - // Verify that `block.slot == state.slot`. - verify!(block.slot == state.slot, Invalid::StateSlotMismatch); + process_block_header(state, block, spec)?; // Ensure the current and previous epoch cache is built. state.build_epoch_cache(RelativeEpoch::Current, spec)?; @@ -83,7 +79,7 @@ fn per_block_processing_signature_optional( verify_block_signature(&state, &block, &spec)?; } process_randao(&mut state, &block, &spec)?; - process_eth1_data(&mut state, &block.eth1_data)?; + process_eth1_data(&mut state, &block.body.eth1_data)?; process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?; process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?; process_attestations(&mut state, &block.body.attestations, spec)?; @@ -94,33 +90,47 @@ fn per_block_processing_signature_optional( Ok(()) } +/// Processes the block header. +/// +/// Spec v0.5.0 +pub fn process_block_header( + state: &BeaconState, + block: &BeaconBlock, + spec: &ChainSpec, +) -> Result<(), Error> { + verify!(block.slot == state.slot, Invalid::StateSlotMismatch); + + verify!( + block.previous_block_root.as_bytes() == &state.latest_block_header.hash_tree_root()[..], + Invalid::ParentBlockRootMismatch + ); + + state.latest_block_header = block.into_temporary_header(spec); + + Ok(()) +} + /// Verifies the signature of a block. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_block_signature( state: &BeaconState, block: &BeaconBlock, spec: &ChainSpec, ) -> Result<(), Error> { - let block_proposer = - &state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?]; + let block_proposer = &state.validator_registry + [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; - let proposal = Proposal { - slot: block.slot, - shard: spec.beacon_chain_shard_number, - block_root: Hash256::from_slice(&block.signed_root()[..]), - signature: block.signature.clone(), - }; let domain = spec.get_domain( block.slot.epoch(spec.slots_per_epoch), - Domain::Proposal, + Domain::BeaconBlock, &state.fork, ); verify!( - proposal + block .signature - .verify(&proposal.signed_root()[..], domain, &block_proposer.pubkey), + .verify(&block.signed_root()[..], domain, &block_proposer.pubkey), Invalid::BadSignature ); @@ -130,21 +140,18 @@ pub fn verify_block_signature( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_randao( state: &mut BeaconState, block: &BeaconBlock, spec: &ChainSpec, ) -> Result<(), Error> { - // Let `proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)]`. - let block_proposer = - &state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?]; + let block_proposer = &state.validator_registry + [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; - // Verify that `bls_verify(pubkey=proposer.pubkey, - // message_hash=hash_tree_root(get_current_epoch(state)), signature=block.randao_reveal, - // domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO))`. + // Verify the RANDAO is a valid signature of the proposer. verify!( - block.randao_reveal.verify( + block.body.randao_reveal.verify( &state.current_epoch(spec).hash_tree_root()[..], spec.get_domain( block.slot.epoch(spec.slots_per_epoch), @@ -156,21 +163,23 @@ pub fn process_randao( Invalid::BadRandaoSignature ); - // Update the state's RANDAO mix with the one revealed in the block. - update_randao(state, &block.randao_reveal, spec)?; + // Update the current epoch RANDAO mix. + state.update_randao_mix(state.current_epoch(spec), &block.body.randao_reveal, spec)?; Ok(()) } /// Update the `state.eth1_data_votes` based upon the `eth1_data` provided. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> { - // Either increment the eth1_data vote count, or add a new eth1_data. + // Attempt to find a `Eth1DataVote` with matching `Eth1Data`. let matching_eth1_vote_index = state .eth1_data_votes .iter() .position(|vote| vote.eth1_data == *eth1_data); + + // If a vote exists, increment it's `vote_count`. Otherwise, create a new `Eth1DataVote`. if let Some(index) = matching_eth1_vote_index { state.eth1_data_votes[index].vote_count += 1; } else { @@ -183,46 +192,12 @@ pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Resul Ok(()) } -/// Updates the present randao mix. -/// -/// Set `state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = -/// xor(get_randao_mix(state, get_current_epoch(state)), hash(block.randao_reveal))`. -/// -/// Spec v0.4.0 -pub fn update_randao( - state: &mut BeaconState, - reveal: &Signature, - spec: &ChainSpec, -) -> Result<(), BeaconStateError> { - let hashed_reveal = { - let encoded_signature = ssz_encode(reveal); - Hash256::from_slice(&hash(&encoded_signature[..])[..]) - }; - - let current_epoch = state.slot.epoch(spec.slots_per_epoch); - - let current_mix = state - .get_randao_mix(current_epoch, spec) - .ok_or_else(|| BeaconStateError::InsufficientRandaoMixes)?; - - let new_mix = *current_mix ^ hashed_reveal; - - let index = current_epoch.as_usize() % spec.latest_randao_mixes_length; - - if index < state.latest_randao_mixes.len() { - state.latest_randao_mixes[index] = new_mix; - Ok(()) - } else { - Err(BeaconStateError::InsufficientRandaoMixes) - } -} - /// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object. /// /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_proposer_slashings( state: &mut BeaconState, proposer_slashings: &[ProposerSlashing], @@ -242,6 +217,7 @@ pub fn process_proposer_slashings( .map_err(|e| e.into_with_index(i)) })?; + // Update the state. for proposer_slashing in proposer_slashings { state.slash_validator(proposer_slashing.proposer_index as usize, spec)?; } @@ -254,7 +230,7 @@ pub fn process_proposer_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_attester_slashings( state: &mut BeaconState, attester_slashings: &[AttesterSlashing], @@ -296,7 +272,7 @@ pub fn process_attester_slashings( ) .map_err(|e| e.into_with_index(i))?; - let slashable_indices = gather_attester_slashing_indices(&state, &attester_slashing) + let slashable_indices = gather_attester_slashing_indices(&state, &attester_slashing, spec) .map_err(|e| e.into_with_index(i))?; for i in slashable_indices { @@ -312,7 +288,7 @@ pub fn process_attester_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_attestations( state: &mut BeaconState, attestations: &[Attestation], @@ -342,7 +318,14 @@ pub fn process_attestations( custody_bitfield: attestation.custody_bitfield.clone(), inclusion_slot: state.slot, }; - state.latest_attestations.push(pending_attestation); + + let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); + + if attestation_epoch == state.current_epoch(spec) { + state.current_epoch_attestations.push(pending_attestation) + } else if attestation_epoch == state.previous_epoch(spec) { + state.previous_epoch_attestations.push(pending_attestation) + } } Ok(()) @@ -353,7 +336,7 @@ pub fn process_attestations( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_deposits( state: &mut BeaconState, deposits: &[Deposit], @@ -423,7 +406,7 @@ pub fn process_deposits( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_exits( state: &mut BeaconState, voluntary_exits: &[VoluntaryExit], @@ -455,7 +438,7 @@ pub fn process_exits( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_transfers( state: &mut BeaconState, transfers: &[Transfer], diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 8366a6584..c0fe252de 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -67,6 +67,7 @@ impl_from_beacon_state_error!(BlockProcessingError); #[derive(Debug, PartialEq)] pub enum BlockInvalid { StateSlotMismatch, + ParentBlockRootMismatch, BadSignature, BadRandaoSignature, MaxAttestationsExceeded, @@ -112,45 +113,53 @@ pub enum AttestationValidationError { #[derive(Debug, PartialEq)] pub enum AttestationInvalid { /// Attestation references a pre-genesis slot. - /// - /// (genesis_slot, attestation_slot) - PreGenesis(Slot, Slot), + PreGenesis { genesis: Slot, attestation: Slot }, /// Attestation included before the inclusion delay. - /// - /// (state_slot, inclusion_delay, attestation_slot) - IncludedTooEarly(Slot, u64, Slot), + IncludedTooEarly { + state: Slot, + delay: u64, + attestation: Slot, + }, /// Attestation slot is too far in the past to be included in a block. - /// - /// (state_slot, attestation_slot) - IncludedTooLate(Slot, Slot), + IncludedTooLate { state: Slot, attestation: Slot }, /// Attestation justified epoch does not match the states current or previous justified epoch. /// - /// (attestation_justified_epoch, state_epoch, used_previous_epoch) - WrongJustifiedEpoch(Epoch, Epoch, bool), + /// `is_current` is `true` if the attestation was compared to the + /// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`. + WrongJustifiedEpoch { + state: Epoch, + attestation: Epoch, + is_current: bool, + }, /// Attestation justified epoch root does not match root known to the state. /// - /// (state_justified_root, attestation_justified_root) - WrongJustifiedRoot(Hash256, Hash256), + /// `is_current` is `true` if the attestation was compared to the + /// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`. + WrongJustifiedRoot { + state: Hash256, + attestation: Hash256, + is_current: bool, + }, /// Attestation crosslink root does not match the state crosslink root for the attestations /// slot. - BadLatestCrosslinkRoot, + BadPreviousCrosslink, /// The custody bitfield has some bits set `true`. This is not allowed in phase 0. CustodyBitfieldHasSetBits, /// There are no set bits on the attestation -- an attestation must be signed by at least one /// validator. AggregationBitfieldIsEmpty, /// The custody bitfield length is not the smallest possible size to represent the committee. - /// - /// (committee_len, bitfield_len) - BadCustodyBitfieldLength(usize, usize), + BadCustodyBitfieldLength { + committee_len: usize, + bitfield_len: usize, + }, /// The aggregation bitfield length is not the smallest possible size to represent the committee. - /// - /// (committee_len, bitfield_len) - BadAggregationBitfieldLength(usize, usize), - /// There was no known committee for the given shard in the given slot. - /// - /// (attestation_data_shard, attestation_data_slot) - NoCommitteeForShard(u64, Slot), + BadAggregationBitfieldLength { + committee_len: usize, + bitfield_len: usize, + }, + /// There was no known committee in this `epoch` for the given shard and slot. + NoCommitteeForShard { shard: u64, slot: Slot }, /// The validator index was unknown. UnknownValidator(u64), /// The attestation signature verification failed. @@ -188,6 +197,8 @@ pub enum AttesterSlashingInvalid { SlashableAttestation2Invalid(SlashableAttestationInvalid), /// The validator index is unknown. One cannot slash one who does not exist. UnknownValidator(u64), + /// The specified validator has already been withdrawn. + ValidatorAlreadyWithdrawn(u64), /// There were no indices able to be slashed. NoSlashableIndices, } @@ -264,16 +275,12 @@ pub enum ProposerSlashingInvalid { /// /// (proposal_1_slot, proposal_2_slot) ProposalSlotMismatch(Slot, Slot), - /// The two proposal have different shards. - /// - /// (proposal_1_shard, proposal_2_shard) - ProposalShardMismatch(u64, u64), - /// The two proposal have different block roots. - /// - /// (proposal_1_root, proposal_2_root) - ProposalBlockRootMismatch(Hash256, Hash256), + /// The proposals are identical and therefore not slashable. + ProposalsIdentical, /// The specified proposer has already been slashed. ProposerAlreadySlashed, + /// The specified proposer has already been withdrawn. + ProposerAlreadyWithdrawn(u64), /// The first proposal signature was invalid. BadProposal1Signature, /// The second proposal signature was invalid. @@ -302,9 +309,7 @@ pub enum DepositValidationError { #[derive(Debug, PartialEq)] pub enum DepositInvalid { /// The deposit index does not match the state index. - /// - /// (state_index, deposit_index) - BadIndex(u64, u64), + BadIndex { state: u64, deposit: u64 }, /// The proof-of-possession does not match the given pubkey. BadProofOfPossession, /// The withdrawal credentials for the depositing validator did not match the withdrawal @@ -334,11 +339,14 @@ pub enum ExitValidationError { pub enum ExitInvalid { /// The specified validator is not in the state's validator registry. ValidatorUnknown(u64), - AlreadyExited, + /// The specified validator has a non-maximum exit epoch. + AlreadyExited(u64), + /// The specified validator has already initiated exit. + AlreadyInitiatedExited(u64), /// The exit is for a future epoch. - /// - /// (state_epoch, exit_epoch) - FutureEpoch(Epoch, Epoch), + FutureEpoch { state: Epoch, exit: Epoch }, + /// The validator has not been active for long enough. + TooYoungToLeave { lifespan: Epoch, expected: u64 }, /// The exit signature was not signed by the validator. BadSignature, } diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index b15360850..9d1321407 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -8,7 +8,7 @@ use types::*; /// /// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn validate_attestation( state: &BeaconState, attestation: &Attestation, @@ -22,7 +22,7 @@ pub fn validate_attestation( /// /// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn validate_attestation_without_signature( state: &BeaconState, attestation: &Attestation, @@ -35,74 +35,83 @@ pub fn validate_attestation_without_signature( /// given state, optionally validating the aggregate signature. /// /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn validate_attestation_signature_optional( state: &BeaconState, attestation: &Attestation, spec: &ChainSpec, verify_signature: bool, ) -> Result<(), Error> { - // Verify that `attestation.data.slot >= GENESIS_SLOT`. + let state_epoch = state.slot.epoch(spec.slots_per_epoch); + let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); + + // Can't submit pre-historic attestations. verify!( attestation.data.slot >= spec.genesis_slot, - Invalid::PreGenesis(spec.genesis_slot, attestation.data.slot) + Invalid::PreGenesis { + genesis: spec.genesis_slot, + attestation: attestation.data.slot + } ); - // Verify that `attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot`. + // Can't submit attestations too far in history. + verify!( + state.slot <= attestation.data.slot + spec.slots_per_epoch, + Invalid::IncludedTooLate { + state: spec.genesis_slot, + attestation: attestation.data.slot + } + ); + + // Can't submit attestation too quickly. verify!( attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, - Invalid::IncludedTooEarly( - state.slot, - spec.min_attestation_inclusion_delay, - attestation.data.slot - ) + Invalid::IncludedTooEarly { + state: state.slot, + delay: spec.min_attestation_inclusion_delay, + attestation: attestation.data.slot + } ); - // Verify that `state.slot < attestation.data.slot + SLOTS_PER_EPOCH`. - verify!( - state.slot < attestation.data.slot + spec.slots_per_epoch, - Invalid::IncludedTooLate(state.slot, attestation.data.slot) - ); - - // Verify that `attestation.data.justified_epoch` is equal to `state.justified_epoch` if - // `slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state) else - // state.previous_justified_epoch`. - if (attestation.data.slot + 1).epoch(spec.slots_per_epoch) >= state.current_epoch(spec) { + // Verify the justified epoch and root is correct. + if attestation_epoch >= state_epoch { verify!( - attestation.data.justified_epoch == state.justified_epoch, - Invalid::WrongJustifiedEpoch( - attestation.data.justified_epoch, - state.justified_epoch, - false - ) + attestation.data.source_epoch == state.current_justified_epoch, + Invalid::WrongJustifiedEpoch { + state: state.current_justified_epoch, + attestation: attestation.data.source_epoch, + is_current: true, + } + ); + verify!( + attestation.data.source_root == state.current_justified_root, + Invalid::WrongJustifiedRoot { + state: state.current_justified_root, + attestation: attestation.data.source_root, + is_current: true, + } ); } else { verify!( - attestation.data.justified_epoch == state.previous_justified_epoch, - Invalid::WrongJustifiedEpoch( - attestation.data.justified_epoch, - state.previous_justified_epoch, - true - ) + attestation.data.source_epoch == state.previous_justified_epoch, + Invalid::WrongJustifiedEpoch { + state: state.previous_justified_epoch, + attestation: attestation.data.source_epoch, + is_current: false, + } + ); + verify!( + attestation.data.source_root == state.previous_justified_root, + Invalid::WrongJustifiedRoot { + state: state.previous_justified_root, + attestation: attestation.data.source_root, + is_current: true, + } ); } - // Verify that `attestation.data.justified_block_root` is equal to `get_block_root(state, - // get_epoch_start_slot(attestation.data.justified_epoch))`. - let justified_block_root = *state - .get_block_root( - attestation - .data - .justified_epoch - .start_slot(spec.slots_per_epoch), - &spec, - ) - .ok_or(BeaconStateError::InsufficientBlockRoots)?; - verify!( - attestation.data.justified_block_root == justified_block_root, - Invalid::WrongJustifiedRoot(justified_block_root, attestation.data.justified_block_root) - ); - + // Check that the crosslink data is valid. + // // Verify that either: // // (i)`state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink`, @@ -115,46 +124,59 @@ fn validate_attestation_signature_optional( epoch: attestation.data.slot.epoch(spec.slots_per_epoch), }; verify!( - (attestation.data.latest_crosslink + (attestation.data.previous_crosslink == state.latest_crosslinks[attestation.data.shard as usize]) | (state.latest_crosslinks[attestation.data.shard as usize] == potential_crosslink), - Invalid::BadLatestCrosslinkRoot + Invalid::BadPreviousCrosslink ); - // Get the committee for this attestation - let (committee, _shard) = state - .get_crosslink_committees_at_slot(attestation.data.slot, spec)? - .iter() - .find(|(_committee, shard)| *shard == attestation.data.shard) - .ok_or_else(|| { - Error::Invalid(Invalid::NoCommitteeForShard( - attestation.data.shard, - attestation.data.slot, - )) - })?; - - // Custody bitfield is all zeros (phase 0 requirement). - verify!( - attestation.custody_bitfield.num_set_bits() == 0, - Invalid::CustodyBitfieldHasSetBits - ); - // Custody bitfield length is correct. - verify!( - verify_bitfield_length(&attestation.custody_bitfield, committee.len()), - Invalid::BadCustodyBitfieldLength(committee.len(), attestation.custody_bitfield.len()) - ); - // Aggregation bitfield isn't empty. + // Attestation must be non-empty! verify!( attestation.aggregation_bitfield.num_set_bits() != 0, Invalid::AggregationBitfieldIsEmpty ); + // Custody bitfield must be empty (be be removed in phase 1) + verify!( + attestation.custody_bitfield.num_set_bits() == 0, + Invalid::CustodyBitfieldHasSetBits + ); + + // Get the committee for the specific shard that this attestation is for. + let crosslink_committee = state + .get_crosslink_committees_at_slot( + attestation.data.slot, + RelativeEpoch::NextWithoutRegistryChange, + spec, + )? + .iter() + .find(|c| c.shard == attestation.data.shard) + .ok_or_else(|| { + Error::Invalid(Invalid::NoCommitteeForShard { + shard: attestation.data.shard, + slot: attestation.data.slot, + }) + })?; + let committee = &crosslink_committee.committee; + + // Custody bitfield length is correct. + // + // This is not directly in the spec, but it is inferred. + verify!( + verify_bitfield_length(&attestation.custody_bitfield, committee.len()), + Invalid::BadCustodyBitfieldLength { + committee_len: committee.len(), + bitfield_len: attestation.custody_bitfield.len() + } + ); // Aggregation bitfield length is correct. + // + // This is not directly in the spec, but it is inferred. verify!( verify_bitfield_length(&attestation.aggregation_bitfield, committee.len()), - Invalid::BadAggregationBitfieldLength( - committee.len(), - attestation.aggregation_bitfield.len() - ) + Invalid::BadAggregationBitfieldLength { + committee_len: committee.len(), + bitfield_len: attestation.custody_bitfield.len() + } ); if verify_signature { @@ -171,7 +193,7 @@ fn validate_attestation_signature_optional( )?; } - // [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`. + // Crosslink data root is zero (to be removed in phase 1). verify!( attestation.data.crosslink_data_root == spec.zero_hash, Invalid::ShardBlockRootNotZero @@ -188,7 +210,7 @@ fn validate_attestation_signature_optional( /// - `custody_bitfield` does not have a bit for each index of `committee`. /// - A `validator_index` in `committee` is not in `state.validator_registry`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn verify_attestation_signature( state: &BeaconState, committee: &[usize], @@ -204,10 +226,10 @@ fn verify_attestation_signature( for (i, v) in committee.iter().enumerate() { let validator_signed = aggregation_bitfield.get(i).map_err(|_| { - Error::Invalid(Invalid::BadAggregationBitfieldLength( - committee.len(), - aggregation_bitfield.len(), - )) + Error::Invalid(Invalid::BadAggregationBitfieldLength { + committee_len: committee.len(), + bitfield_len: aggregation_bitfield.len(), + }) })?; if validator_signed { @@ -215,10 +237,10 @@ fn verify_attestation_signature( Ok(bit) => bit, // Invalidate signature if custody_bitfield.len() < committee Err(_) => { - return Err(Error::Invalid(Invalid::BadCustodyBitfieldLength( - committee.len(), - custody_bitfield.len(), - ))); + return Err(Error::Invalid(Invalid::BadCustodyBitfieldLength { + committee_len: committee.len(), + bitfield_len: aggregation_bitfield.len(), + })); } }; diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs index d126849b6..a198d2a3e 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -7,7 +7,7 @@ use types::*; /// /// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_attester_slashing( state: &BeaconState, attester_slashing: &AttesterSlashing, @@ -41,15 +41,16 @@ pub fn verify_attester_slashing( /// /// Returns Ok(indices) if `indices.len() > 0`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn gather_attester_slashing_indices( state: &BeaconState, attester_slashing: &AttesterSlashing, + spec: &ChainSpec, ) -> Result, Error> { let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; - let mut slashable_indices = vec![]; + let mut slashable_indices = Vec::with_capacity(spec.max_indices_per_slashable_vote); for i in &slashable_attestation_1.validator_indices { let validator = state .validator_registry @@ -57,11 +58,20 @@ pub fn gather_attester_slashing_indices( .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; if slashable_attestation_2.validator_indices.contains(&i) & !validator.slashed { + // TODO: verify that we should reject any slashable attestation which includes a + // withdrawn validator. PH has asked the question on gitter, awaiting response. + verify!( + validator.withdrawable_epoch > state.slot.epoch(spec.slots_per_epoch), + Invalid::ValidatorAlreadyWithdrawn(*i) + ); + slashable_indices.push(*i); } } verify!(!slashable_indices.is_empty(), Invalid::NoSlashableIndices); + slashable_indices.shrink_to_fit(); + Ok(slashable_indices) } diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index aad38f616..2aeab6c5a 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -18,7 +18,7 @@ pub type PublicKeyValidatorIndexHashmap = HashMap; /// /// Note: this function is incomplete. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_deposit( state: &BeaconState, deposit: &Deposit, @@ -49,26 +49,25 @@ pub fn verify_deposit( /// Verify that the `Deposit` index is correct. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_deposit_index(state: &BeaconState, deposit: &Deposit) -> Result<(), Error> { verify!( deposit.index == state.deposit_index, - Invalid::BadIndex(state.deposit_index, deposit.index) + Invalid::BadIndex { + state: state.deposit_index, + deposit: deposit.index + } ); Ok(()) } -pub fn build_public_key_hashmap(state: &BeaconState) -> PublicKeyValidatorIndexHashmap { - let mut hashmap = HashMap::with_capacity(state.validator_registry.len()); - - for (i, validator) in state.validator_registry.iter().enumerate() { - hashmap.insert(validator.pubkey.clone(), i as u64); - } - - hashmap -} - +/// Returns a `Some(validator index)` if a pubkey already exists in the `validator_registry`, +/// otherwise returns `None`. +/// +/// ## Errors +/// +/// Errors if the state's `pubkey_cache` is not current. pub fn get_existing_validator_index( state: &BeaconState, deposit: &Deposit, @@ -94,12 +93,12 @@ pub fn get_existing_validator_index( /// Verify that a deposit is included in the state's eth1 deposit root. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool { let leaf = hash(&get_serialized_deposit_data(deposit)); verify_merkle_proof( Hash256::from_slice(&leaf), - &deposit.branch, + &deposit.proof, spec.deposit_contract_tree_depth as usize, deposit.index as usize, state.latest_eth1_data.deposit_root, @@ -108,7 +107,7 @@ fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &Ch /// Helper struct for easily getting the serialized data generated by the deposit contract. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive(Encode)] struct SerializedDepositData { amount: u64, @@ -119,7 +118,7 @@ struct SerializedDepositData { /// Return the serialized data generated by the deposit contract that is used to generate the /// merkle proof. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn get_serialized_deposit_data(deposit: &Deposit) -> Vec { let serialized_deposit_data = SerializedDepositData { amount: deposit.deposit_data.amount, diff --git a/eth2/state_processing/src/per_block_processing/verify_exit.rs b/eth2/state_processing/src/per_block_processing/verify_exit.rs index 8cd54fb69..7893cea96 100644 --- a/eth2/state_processing/src/per_block_processing/verify_exit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_exit.rs @@ -7,7 +7,7 @@ use types::*; /// /// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_exit( state: &BeaconState, exit: &VoluntaryExit, @@ -18,15 +18,35 @@ pub fn verify_exit( .get(exit.validator_index as usize) .ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?; + // Verify that the validator has not yet exited. verify!( - validator.exit_epoch - > state.get_delayed_activation_exit_epoch(state.current_epoch(spec), spec), - Invalid::AlreadyExited + validator.exit_epoch == spec.far_future_epoch, + Invalid::AlreadyExited(exit.validator_index) ); + // Verify that the validator has not yet initiated. + verify!( + !validator.initiated_exit, + Invalid::AlreadyInitiatedExited(exit.validator_index) + ); + + // Exits must specify an epoch when they become valid; they are not valid before then. verify!( state.current_epoch(spec) >= exit.epoch, - Invalid::FutureEpoch(state.current_epoch(spec), exit.epoch) + Invalid::FutureEpoch { + state: state.current_epoch(spec), + exit: exit.epoch + } + ); + + // Must have been in the validator set long enough. + let lifespan = state.slot.epoch(spec.slots_per_epoch) - validator.activation_epoch; + verify!( + lifespan >= spec.persistent_committee_period, + Invalid::TooYoungToLeave { + lifespan, + expected: spec.persistent_committee_period, + } ); let message = exit.signed_root(); diff --git a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs index c3c0079a9..dffb9d898 100644 --- a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -7,7 +7,7 @@ use types::*; /// /// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_proposer_slashing( proposer_slashing: &ProposerSlashing, state: &BeaconState, @@ -21,34 +21,28 @@ pub fn verify_proposer_slashing( })?; verify!( - proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot, + proposer_slashing.header_1.slot == proposer_slashing.header_2.slot, Invalid::ProposalSlotMismatch( - proposer_slashing.proposal_1.slot, - proposer_slashing.proposal_2.slot + proposer_slashing.header_1.slot, + proposer_slashing.header_2.slot ) ); verify!( - proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard, - Invalid::ProposalShardMismatch( - proposer_slashing.proposal_1.shard, - proposer_slashing.proposal_2.shard - ) - ); - - verify!( - proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root, - Invalid::ProposalBlockRootMismatch( - proposer_slashing.proposal_1.block_root, - proposer_slashing.proposal_2.block_root - ) + proposer_slashing.header_1 != proposer_slashing.header_2, + Invalid::ProposalsIdentical ); verify!(!proposer.slashed, Invalid::ProposerAlreadySlashed); verify!( - verify_proposal_signature( - &proposer_slashing.proposal_1, + proposer.withdrawable_epoch > state.slot.epoch(spec.slots_per_epoch), + Invalid::ProposerAlreadyWithdrawn(proposer_slashing.proposer_index) + ); + + verify!( + verify_header_signature( + &proposer_slashing.header_1, &proposer.pubkey, &state.fork, spec @@ -56,8 +50,8 @@ pub fn verify_proposer_slashing( Invalid::BadProposal1Signature ); verify!( - verify_proposal_signature( - &proposer_slashing.proposal_2, + verify_header_signature( + &proposer_slashing.header_2, &proposer.pubkey, &state.fork, spec @@ -71,17 +65,19 @@ pub fn verify_proposer_slashing( /// Verifies the signature of a proposal. /// /// Returns `true` if the signature is valid. -fn verify_proposal_signature( - proposal: &Proposal, +/// +/// Spec v0.5.0 +fn verify_header_signature( + header: &BeaconBlockHeader, pubkey: &PublicKey, fork: &Fork, spec: &ChainSpec, ) -> bool { - let message = proposal.signed_root(); + let message = header.signed_root(); let domain = spec.get_domain( - proposal.slot.epoch(spec.slots_per_epoch), - Domain::Proposal, + header.slot.epoch(spec.slots_per_epoch), + Domain::BeaconBlock, fork, ); - proposal.signature.verify(&message[..], domain, pubkey) + header.signature.verify(&message[..], domain, pubkey) } diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index 4746fc75c..546760fd0 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -10,16 +10,16 @@ use types::*; /// /// Note: this function is incomplete. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_transfer( state: &BeaconState, transfer: &Transfer, spec: &ChainSpec, ) -> Result<(), Error> { - let from_balance = *state + let sender_balance = *state .validator_balances - .get(transfer.from as usize) - .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; + .get(transfer.sender as usize) + .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?; let total_amount = transfer .amount @@ -27,19 +27,22 @@ pub fn verify_transfer( .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; verify!( - from_balance >= transfer.amount, - Invalid::FromBalanceInsufficient(transfer.amount, from_balance) + sender_balance >= transfer.amount, + Invalid::FromBalanceInsufficient(transfer.amount, sender_balance) ); verify!( - from_balance >= transfer.fee, - Invalid::FromBalanceInsufficient(transfer.fee, from_balance) + sender_balance >= transfer.fee, + Invalid::FromBalanceInsufficient(transfer.fee, sender_balance) ); verify!( - (from_balance == total_amount) - || (from_balance >= (total_amount + spec.min_deposit_amount)), - Invalid::InvalidResultingFromBalance(from_balance - total_amount, spec.min_deposit_amount) + (sender_balance == total_amount) + || (sender_balance >= (total_amount + spec.min_deposit_amount)), + Invalid::InvalidResultingFromBalance( + sender_balance - total_amount, + spec.min_deposit_amount + ) ); verify!( @@ -47,25 +50,25 @@ pub fn verify_transfer( Invalid::StateSlotMismatch(state.slot, transfer.slot) ); - let from_validator = state + let sender_validator = state .validator_registry - .get(transfer.from as usize) - .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; + .get(transfer.sender as usize) + .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?; let epoch = state.slot.epoch(spec.slots_per_epoch); verify!( - from_validator.is_withdrawable_at(epoch) - || from_validator.activation_epoch == spec.far_future_epoch, - Invalid::FromValidatorIneligableForTransfer(transfer.from) + sender_validator.is_withdrawable_at(epoch) + || sender_validator.activation_epoch == spec.far_future_epoch, + Invalid::FromValidatorIneligableForTransfer(transfer.sender) ); let transfer_withdrawal_credentials = Hash256::from_slice( &get_withdrawal_credentials(&transfer.pubkey, spec.bls_withdrawal_prefix_byte)[..], ); verify!( - from_validator.withdrawal_credentials == transfer_withdrawal_credentials, + sender_validator.withdrawal_credentials == transfer_withdrawal_credentials, Invalid::WithdrawalCredentialsMismatch( - from_validator.withdrawal_credentials, + sender_validator.withdrawal_credentials, transfer_withdrawal_credentials ) ); @@ -97,16 +100,17 @@ pub fn execute_transfer( transfer: &Transfer, spec: &ChainSpec, ) -> Result<(), Error> { - let from_balance = *state + let sender_balance = *state .validator_balances - .get(transfer.from as usize) - .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; - let to_balance = *state + .get(transfer.sender as usize) + .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?; + let recipient_balance = *state .validator_balances - .get(transfer.to as usize) - .ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.to)))?; + .get(transfer.recipient as usize) + .ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.recipient)))?; - let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?; + let proposer_index = + state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?; let proposer_balance = state.validator_balances[proposer_index]; let total_amount = transfer @@ -114,14 +118,22 @@ pub fn execute_transfer( .checked_add(transfer.fee) .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; - state.validator_balances[transfer.from as usize] = - from_balance.checked_sub(total_amount).ok_or_else(|| { - Error::Invalid(Invalid::FromBalanceInsufficient(total_amount, from_balance)) + state.validator_balances[transfer.sender as usize] = + sender_balance.checked_sub(total_amount).ok_or_else(|| { + Error::Invalid(Invalid::FromBalanceInsufficient( + total_amount, + sender_balance, + )) })?; - state.validator_balances[transfer.to as usize] = to_balance + state.validator_balances[transfer.recipient as usize] = recipient_balance .checked_add(transfer.amount) - .ok_or_else(|| Error::Invalid(Invalid::ToBalanceOverflow(to_balance, transfer.amount)))?; + .ok_or_else(|| { + Error::Invalid(Invalid::ToBalanceOverflow( + recipient_balance, + transfer.amount, + )) + })?; state.validator_balances[proposer_index] = proposer_balance.checked_add(transfer.fee).ok_or_else(|| { diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index 0bb405c98..aafc7166a 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -9,7 +9,7 @@ pub enum Error { /// Advances a state forward by one slot, performing per-epoch processing if required. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn per_slot_processing( state: &mut BeaconState, previous_block_root: Hash256, @@ -22,29 +22,9 @@ pub fn per_slot_processing( state.slot += 1; - update_block_roots(state, previous_block_root, spec); - Ok(()) } -/// Updates the state's block roots as per-slot processing is performed. -/// -/// Spec v0.4.0 -pub fn update_block_roots(state: &mut BeaconState, previous_block_root: Hash256, spec: &ChainSpec) { - state.latest_block_roots[(state.slot.as_usize() - 1) % spec.latest_block_roots_length] = - previous_block_root; - - if state.slot.as_usize() % spec.latest_block_roots_length == 0 { - let root = merkle_root(&state.latest_block_roots[..]); - state.batched_block_roots.push(root); - } -} - -fn merkle_root(_input: &[Hash256]) -> Hash256 { - // TODO: implement correctly. - Hash256::zero() -} - impl From for Error { fn from(e: BeaconStateError) -> Error { Error::BeaconStateError(e) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 32f8204e3..7c77a5a3e 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -2,11 +2,11 @@ use self::epoch_cache::EpochCache; use crate::test_utils::TestRandom; use crate::{validator_registry::get_active_validator_indices, *}; use int_to_bytes::int_to_bytes32; -use log::{debug, trace}; +use log::trace; use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{hash, SignedRoot, TreeHash}; +use ssz::{hash, ssz_encode, SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash}; use std::collections::HashMap; use test_random_derive::TestRandom; @@ -31,12 +31,14 @@ pub enum Error { UnableToShuffle, UnknownValidator, InvalidBitfield, + ValidatorIsWithdrawable, InsufficientRandaoMixes, InsufficientValidators, InsufficientBlockRoots, InsufficientIndexRoots, InsufficientAttestations, InsufficientCommittees, + InsufficientSlashedBalances, EpochCacheUninitialized(RelativeEpoch), PubkeyCacheInconsistent, PubkeyCacheIncomplete { @@ -377,10 +379,37 @@ impl BeaconState { } } + /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. + /// + /// # Errors: + /// + /// See `Self::get_randao_mix`. + /// + /// Spec v0.5.0 + pub fn update_randao_mix( + &mut self, + epoch: Epoch, + signature: &Signature, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = epoch.as_usize() % spec.latest_randao_mixes_length; + + let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature))); + + self.latest_randao_mixes[i] = *self.get_randao_mix(epoch, spec)? ^ signature_hash; + + Ok(()) + } + /// Return the randao mix at a recent ``epoch``. /// - /// Spec v0.4.0 - pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Option<&Hash256> { + /// # Errors: + /// - `InsufficientRandaoMixes` if `self.latest_randao_mixes` is shorter than + /// `spec.latest_randao_mixes_length`. + /// - `EpochOutOfBounds` if the state no longer stores randao mixes for the given `epoch`. + /// + /// Spec v0.5.0 + pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Result<&Hash256, Error> { let current_epoch = self.current_epoch(spec); if (current_epoch - (spec.latest_randao_mixes_length as u64) < epoch) @@ -388,8 +417,9 @@ impl BeaconState { { self.latest_randao_mixes .get(epoch.as_usize() % spec.latest_randao_mixes_length) + .ok_or_else(|| Error::InsufficientRandaoMixes) } else { - None + Err(Error::EpochOutOfBounds) } } @@ -418,8 +448,7 @@ impl BeaconState { /// Spec v0.4.0 pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let mut input = self - .get_randao_mix(epoch - spec.min_seed_lookahead, spec) - .ok_or_else(|| Error::InsufficientRandaoMixes)? + .get_randao_mix(epoch - spec.min_seed_lookahead, spec)? .as_bytes() .to_vec(); @@ -601,7 +630,7 @@ impl BeaconState { /// Initiate an exit for the validator of the given `index`. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn initiate_validator_exit(&mut self, validator_index: usize) { self.validator_registry[validator_index].initiated_exit = true; } @@ -622,7 +651,7 @@ impl BeaconState { /// Slash the validator with index ``index``. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn slash_validator( &mut self, validator_index: usize, @@ -634,26 +663,27 @@ impl BeaconState { .validator_registry .get(validator_index) .ok_or_else(|| Error::UnknownValidator)?; + let effective_balance = self.get_effective_balance(validator_index, spec)?; + // A validator that is withdrawn cannot be slashed. + // + // This constraint will be lifted in Phase 0. if self.slot >= validator .withdrawable_epoch .start_slot(spec.slots_per_epoch) { - return Err(Error::SlotOutOfBounds); + return Err(Error::ValidatorIsWithdrawable); } self.exit_validator(validator_index, spec); - let effective_balance = self.get_effective_balance(validator_index, spec)?; - - self.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length] += - effective_balance; + self.increment_current_epoch_slashed_balances(effective_balance, spec)?; let whistleblower_index = self.get_beacon_proposer_index(self.slot, RelativeEpoch::Current, spec)?; + let whistleblower_reward = effective_balance / spec.whistleblower_reward_quotient; - let whistleblower_reward = effective_balance; safe_add_assign!( self.validator_balances[whistleblower_index as usize], whistleblower_reward @@ -662,14 +692,31 @@ impl BeaconState { self.validator_balances[validator_index], whistleblower_reward ); + self.validator_registry[validator_index].slashed = true; + self.validator_registry[validator_index].withdrawable_epoch = current_epoch + Epoch::from(spec.latest_slashed_exit_length); - debug!( - "Whistleblower {} penalized validator {}.", - whistleblower_index, validator_index - ); + Ok(()) + } + + /// Increment `self.latest_slashed_balances` with a slashing from the current epoch. + /// + /// Spec v0.5.0. + fn increment_current_epoch_slashed_balances( + &mut self, + increment: u64, + spec: &ChainSpec, + ) -> Result<(), Error> { + let current_epoch = self.current_epoch(spec); + + let slashed_balances_index = current_epoch.as_usize() % spec.latest_slashed_exit_length; + if slashed_balances_index >= self.latest_slashed_balances.len() { + return Err(Error::InsufficientSlashedBalances); + } + + self.latest_slashed_balances[slashed_balances_index] += increment; Ok(()) } diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index f4b113056..e9ade2c91 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -29,7 +29,7 @@ pub struct ChainSpec { pub shard_count: u64, pub target_committee_size: u64, pub max_balance_churn_quotient: u64, - pub max_indices_per_slashable_vote: u64, + pub max_indices_per_slashable_vote: usize, pub max_exit_dequeues_per_epoch: u64, pub shuffle_round_count: u8, diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 881f0e405..02216a2fc 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -11,8 +11,8 @@ use test_random_derive::TestRandom; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct ProposerSlashing { pub proposer_index: u64, - pub proposal_1: BeaconBlockHeader, - pub proposal_2: BeaconBlockHeader, + pub header_1: BeaconBlockHeader, + pub header_2: BeaconBlockHeader, } #[cfg(test)] diff --git a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs index 0773cd6da..2cfebd915 100644 --- a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs @@ -25,7 +25,7 @@ impl TestingProposerSlashingBuilder { let hash_1 = Hash256::from([1; 32]); let hash_2 = Hash256::from([2; 32]); - let mut proposal_1 = BeaconBlockHeader { + let mut header_1 = BeaconBlockHeader { slot, previous_block_root: hash_1, state_root: hash_1, @@ -33,27 +33,27 @@ impl TestingProposerSlashingBuilder { signature: Signature::empty_signature(), }; - let mut proposal_2 = BeaconBlockHeader { + let mut header_2 = BeaconBlockHeader { previous_block_root: hash_2, - ..proposal_1.clone() + ..header_1.clone() }; - proposal_1.signature = { - let message = proposal_1.signed_root(); + header_1.signature = { + let message = header_1.signed_root(); let epoch = slot.epoch(spec.slots_per_epoch); signer(proposer_index, &message[..], epoch, Domain::BeaconBlock) }; - proposal_2.signature = { - let message = proposal_2.signed_root(); + header_2.signature = { + let message = header_2.signed_root(); let epoch = slot.epoch(spec.slots_per_epoch); signer(proposer_index, &message[..], epoch, Domain::BeaconBlock) }; ProposerSlashing { proposer_index, - proposal_1, - proposal_2, + header_1, + header_2, } } } From 3b8c1df5da123170c23482d86c1bb064ce557b09 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 12:49:55 +1100 Subject: [PATCH 090/154] Fix bug in per block processing. --- .../src/per_block_processing/validate_attestation.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index 9d1321407..68a51b2df 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -142,12 +142,10 @@ fn validate_attestation_signature_optional( ); // Get the committee for the specific shard that this attestation is for. + let relative_epoch = RelativeEpoch::from_slot(state.slot, attestation.data.slot, spec) + .map_err(|_| BeaconStateError::EpochOutOfBounds)?; // Should not fail due to previous checks. let crosslink_committee = state - .get_crosslink_committees_at_slot( - attestation.data.slot, - RelativeEpoch::NextWithoutRegistryChange, - spec, - )? + .get_crosslink_committees_at_slot(attestation.data.slot, relative_epoch, spec)? .iter() .find(|c| c.shard == attestation.data.shard) .ok_or_else(|| { From 2871ad50558ad2f672cc0a6fb599267819cda7c1 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 17 Mar 2019 14:25:47 +1100 Subject: [PATCH 091/154] Correct listening addresses and associated log --- beacon_node/Cargo.toml | 1 + beacon_node/client/src/client_config.rs | 6 ++++++ beacon_node/src/run.rs | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 56f5c654e..6be030fd5 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner ", "Age Manning () { config.net_conf.listen_port = port; + // update the listening multiaddrs + for address in &mut config.net_conf.listen_addresses { + address.pop(); + address.append(Protocol::Tcp(port)); + } } else { error!(log, "Invalid port"; "port" => port_str); return Err("Invalid port"); diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index cfae001a0..12d761d84 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -14,7 +14,7 @@ pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result .map_err(|e| format!("{:?}", e))?; // Log configuration - info!(log, ""; + info!(log, "Listening on {:?}", &config.net_conf.listen_addresses; "data_dir" => &config.data_dir.to_str(), "port" => &config.net_conf.listen_port); From 7370306366d21abe8a52b549933d328aded2c5f9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 17 Mar 2019 14:38:20 +1100 Subject: [PATCH 092/154] Rename RPC reqeusts, correct RPC internals --- beacon_node/libp2p/src/rpc/handler.rs | 0 beacon_node/libp2p/src/rpc/methods.rs | 6 +++--- beacon_node/libp2p/src/rpc/mod.rs | 1 - beacon_node/libp2p/src/rpc/protocol.rs | 25 +++++++++++-------------- 4 files changed, 14 insertions(+), 18 deletions(-) delete mode 100644 beacon_node/libp2p/src/rpc/handler.rs diff --git a/beacon_node/libp2p/src/rpc/handler.rs b/beacon_node/libp2p/src/rpc/handler.rs deleted file mode 100644 index e69de29bb..000000000 diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index d299e9bb7..b6563ba64 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -19,17 +19,17 @@ impl From for RPCMethod { #[derive(Debug, Clone)] pub enum RPCRequest { - HelloRequest, + Hello(HelloBody), } #[derive(Debug, Clone)] pub enum RPCResponse { - HelloResponse(HelloResponse), + Hello(HelloBody), } // request/response structs for RPC methods #[derive(Encode, Decode, Clone, Debug)] -pub struct HelloResponse { +pub struct HelloBody { pub network_id: u8, pub latest_finalized_root: Hash256, pub latest_finalized_epoch: Epoch, diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index f66f531eb..4cebb1e39 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -2,7 +2,6 @@ /// /// This is purpose built for Ethereum 2.0 serenity and the protocol listens on /// `/eth/serenity/rpc/1.0.0` -mod handler; mod methods; mod protocol; diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/libp2p/src/rpc/protocol.rs index 2c6b3caa0..4b462bb77 100644 --- a/beacon_node/libp2p/src/rpc/protocol.rs +++ b/beacon_node/libp2p/src/rpc/protocol.rs @@ -1,11 +1,5 @@ -use super::methods::HelloResponse; -use super::methods::{RPCMethod, RPCRequest, RPCResponse}; -//use crate::rpc_proto; -//use byteorder::{BigEndian, ByteOrder}; -//use bytes::BytesMut; -use futures::{future, stream, Future, Stream}; -use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, PeerId, UpgradeInfo}; -//use std::{io, iter}; +use super::methods::{HelloBody, RPCMethod, RPCRequest, RPCResponse}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use ssz::{ssz_encode, Decodable, Encodable, SszStream}; use std::io; use std::iter; @@ -83,7 +77,10 @@ fn decode(packet: Vec) -> Result { if request { let body = match RPCMethod::from(method_id) { - RPCMethod::Hello => RPCRequest::HelloRequest, + RPCMethod::Hello => { + let (hello_body, _index) = HelloBody::ssz_decode(&packet, index)?; + RPCRequest::Hello(hello_body) + } RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), }; @@ -97,8 +94,8 @@ fn decode(packet: Vec) -> Result { else { let result = match RPCMethod::from(method_id) { RPCMethod::Hello => { - let (hello_response, _index) = HelloResponse::ssz_decode(&packet, index)?; - RPCResponse::HelloResponse(hello_response) + let (body, _index) = HelloBody::ssz_decode(&packet, index)?; + RPCResponse::Hello(body) } RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), }; @@ -137,8 +134,8 @@ impl Encodable for RpcEvent { s.append(id); s.append(method_id); match body { - RPCRequest::HelloRequest => {} - } + RPCRequest::Hello(body) => s.append(body), + }; } RpcEvent::Response { id, @@ -149,7 +146,7 @@ impl Encodable for RpcEvent { s.append(id); s.append(method_id); match result { - RPCResponse::HelloResponse(response) => { + RPCResponse::Hello(response) => { s.append(response); } } From 3a384d93f85001bd316819571a635a4cfab1c2a4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 17:47:12 +1100 Subject: [PATCH 093/154] Allow state processing to compile under v0.5.0 --- .../src/per_block_processing.rs | 4 +- .../validate_attestation.rs | 4 +- .../src/per_epoch_processing.rs | 172 ++++++++---------- .../get_attestation_participants.rs | 37 ++++ .../inclusion_distance.rs | 15 +- .../process_validator_registry.rs | 72 ++++++++ .../validator_statuses.rs | 60 +++--- .../src/per_epoch_processing/winning_root.rs | 100 +++++++--- eth2/types/src/beacon_state.rs | 161 ++++++++++------ eth2/types/src/beacon_state/epoch_cache.rs | 30 +-- eth2/types/src/crosslink_committee.rs | 1 + .../testing_beacon_block_builder.rs | 5 +- .../testing_beacon_state_builder.rs | 3 +- 13 files changed, 422 insertions(+), 242 deletions(-) create mode 100644 eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs create mode 100644 eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 377f92e8b..c6b22fa75 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -72,8 +72,8 @@ fn per_block_processing_signature_optional( process_block_header(state, block, spec)?; // Ensure the current and previous epoch cache is built. - state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Previous, spec)?; + state.build_epoch_cache(RelativeEpoch::Current, spec)?; if should_verify_block_signature { verify_block_signature(&state, &block, &spec)?; @@ -94,7 +94,7 @@ fn per_block_processing_signature_optional( /// /// Spec v0.5.0 pub fn process_block_header( - state: &BeaconState, + state: &mut BeaconState, block: &BeaconBlock, spec: &ChainSpec, ) -> Result<(), Error> { diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index 68a51b2df..272eeb18b 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -142,10 +142,8 @@ fn validate_attestation_signature_optional( ); // Get the committee for the specific shard that this attestation is for. - let relative_epoch = RelativeEpoch::from_slot(state.slot, attestation.data.slot, spec) - .map_err(|_| BeaconStateError::EpochOutOfBounds)?; // Should not fail due to previous checks. let crosslink_committee = state - .get_crosslink_committees_at_slot(attestation.data.slot, relative_epoch, spec)? + .get_crosslink_committees_at_slot(attestation.data.slot, spec)? .iter() .find(|c| c.shard == attestation.data.shard) .ok_or_else(|| { diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 8c4b8e88b..2f1cc3551 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,5 +1,6 @@ use errors::EpochProcessingError as Error; use integer_sqrt::IntegerSquareRoot; +use process_validator_registry::process_validator_registry; use rayon::prelude::*; use ssz::TreeHash; use std::collections::HashMap; @@ -8,7 +9,9 @@ use validator_statuses::{TotalBalances, ValidatorStatuses}; use winning_root::{winning_root, WinningRoot}; pub mod errors; +pub mod get_attestation_participants; pub mod inclusion_distance; +pub mod process_validator_registry; pub mod tests; pub mod validator_statuses; pub mod winning_root; @@ -25,10 +28,9 @@ pub type WinningRootHashSet = HashMap; /// /// Spec v0.4.0 pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - // Ensure all of the caches are built. + // Ensure the previous and next epoch caches are built. state.build_epoch_cache(RelativeEpoch::Previous, spec)?; state.build_epoch_cache(RelativeEpoch::Current, spec)?; - state.build_epoch_cache(RelativeEpoch::Next, spec)?; let mut statuses = initialize_validator_statuses(&state, spec)?; @@ -61,7 +63,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result /// Returns a list of active validator indices for the state's current epoch. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) -> Vec { get_active_validator_indices( &state.validator_registry, @@ -76,26 +78,28 @@ pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) /// - previous epoch attesters /// - etc. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn initialize_validator_statuses( state: &BeaconState, spec: &ChainSpec, ) -> Result { - let mut statuses = ValidatorStatuses::new(state, spec); + let mut statuses = ValidatorStatuses::new(state, spec)?; - statuses.process_attestations(&state, &state.latest_attestations, spec)?; + statuses.process_attestations(&state, spec)?; Ok(statuses) } -/// Spec v0.4.0 +/// Maybe resets the eth1 period. +/// +/// Spec v0.5.0 pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { let next_epoch = state.next_epoch(spec); let voting_period = spec.epochs_per_eth1_voting_period; if next_epoch % voting_period == 0 { for eth1_data_vote in &state.eth1_data_votes { - if eth1_data_vote.vote_count * 2 > voting_period { + if eth1_data_vote.vote_count * 2 > voting_period * spec.slots_per_epoch { state.latest_eth1_data = eth1_data_vote.eth1_data.clone(); } } @@ -119,7 +123,7 @@ pub fn process_justification( let previous_epoch = state.previous_epoch(spec); let current_epoch = state.current_epoch(spec); - let mut new_justified_epoch = state.justified_epoch; + let mut new_justified_epoch = state.current_justified_epoch; state.justification_bitfield <<= 1; // If > 2/3 of the total balance attested to the previous epoch boundary @@ -168,8 +172,10 @@ pub fn process_justification( // - The presently justified epoch was two epochs ago. // // Then, set the finalized epoch to two epochs ago. - if (state.justification_bitfield % 8 == 0b111) & (state.justified_epoch == previous_epoch - 1) { - state.finalized_epoch = state.justified_epoch; + if (state.justification_bitfield % 8 == 0b111) + & (state.current_justified_epoch == previous_epoch - 1) + { + state.finalized_epoch = state.current_justified_epoch; } // If: // @@ -177,12 +183,14 @@ pub fn process_justification( // - Set the previous epoch to be justified. // // Then, set the finalized epoch to be the previous epoch. - if (state.justification_bitfield % 4 == 0b11) & (state.justified_epoch == previous_epoch) { - state.finalized_epoch = state.justified_epoch; + if (state.justification_bitfield % 4 == 0b11) + & (state.current_justified_epoch == previous_epoch) + { + state.finalized_epoch = state.current_justified_epoch; } - state.previous_justified_epoch = state.justified_epoch; - state.justified_epoch = new_justified_epoch; + state.previous_justified_epoch = state.current_justified_epoch; + state.current_justified_epoch = new_justified_epoch; } /// Updates the following fields on the `BeaconState`: @@ -191,23 +199,11 @@ pub fn process_justification( /// /// Also returns a `WinningRootHashSet` for later use during epoch processing. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn process_crosslinks( state: &mut BeaconState, spec: &ChainSpec, ) -> Result { - let current_epoch_attestations: Vec<&PendingAttestation> = state - .latest_attestations - .par_iter() - .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.current_epoch(spec)) - .collect(); - - let previous_epoch_attestations: Vec<&PendingAttestation> = state - .latest_attestations - .par_iter() - .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec)) - .collect(); - let mut winning_root_for_shards: WinningRootHashSet = HashMap::new(); let previous_and_current_epoch_slots: Vec = state @@ -221,24 +217,18 @@ pub fn process_crosslinks( let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot, spec)?.clone(); - for (crosslink_committee, shard) in crosslink_committees_at_slot { - let shard = shard as u64; + for c in crosslink_committees_at_slot { + let shard = c.shard as u64; - let winning_root = winning_root( - state, - shard, - ¤t_epoch_attestations[..], - &previous_epoch_attestations[..], - spec, - )?; + let winning_root = winning_root(state, shard, spec)?; if let Some(winning_root) = winning_root { - let total_committee_balance = state.get_total_balance(&crosslink_committee, spec); + let total_committee_balance = state.get_total_balance(&c.committee, spec)?; // TODO: I think this has a bug. if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) { state.latest_crosslinks[shard as usize] = Crosslink { - epoch: state.current_epoch(spec), + epoch: slot.epoch(spec.slots_per_epoch), crosslink_data_root: winning_root.crosslink_data_root, } } @@ -294,7 +284,10 @@ pub fn process_rewards_and_penalities( .map(|(index, &balance)| { let mut balance = balance; let status = &statuses.statuses[index]; - let base_reward = state.base_reward(index, base_reward_quotient, spec); + let base_reward = get_base_reward(state, index, total_balances.previous_epoch, spec) + .expect( + "Cannot fail to access a validator balance when iterating validator balances.", + ); if epochs_since_finality <= 4 { // Expected FFG source @@ -330,11 +323,15 @@ pub fn process_rewards_and_penalities( safe_sub_assign!(balance, base_reward); }; } else { - let inactivity_penalty = state.inactivity_penalty( + let inactivity_penalty = get_inactivity_penalty( + state, index, - epochs_since_finality, - base_reward_quotient, + epochs_since_finality.as_u64(), + total_balances.previous_epoch, spec, + ) + .expect( + "Cannot fail to access a validator balance when iterating validator balances.", ); if status.is_active_in_previous_epoch { @@ -349,7 +346,10 @@ pub fn process_rewards_and_penalities( } if state.validator_registry[index].slashed { - let base_reward = state.base_reward(index, base_reward_quotient, spec); + let base_reward = + get_base_reward(state, index, total_balances.previous_epoch, spec).expect( + "Cannot fail to access a validator balance when iterating validator balances.", + ); safe_sub_assign!(balance, 2 * inactivity_penalty + base_reward); } } @@ -384,7 +384,10 @@ pub fn process_rewards_and_penalities( let proposer_index = status.inclusion_info.proposer_index; let inclusion_distance = status.inclusion_info.distance; - let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); + let base_reward = + get_base_reward(state, proposer_index, total_balances.previous_epoch, spec).expect( + "Cannot fail to access a validator balance when iterating validator balances.", + ); if inclusion_distance > 0 && inclusion_distance < Slot::max_value() { safe_add_assign!( @@ -399,53 +402,37 @@ pub fn process_rewards_and_penalities( Ok(()) } -/// Peforms a validator registry update, if required. +/// Returns the base reward for some validator. /// -/// Spec v0.4.0 -pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - let current_epoch = state.current_epoch(spec); - let next_epoch = state.next_epoch(spec); - - state.previous_shuffling_epoch = state.current_shuffling_epoch; - state.previous_shuffling_start_shard = state.current_shuffling_start_shard; - - state.previous_shuffling_seed = state.current_shuffling_seed; - - let should_update_validator_registy = if state.finalized_epoch - > state.validator_registry_update_epoch - { - (0..state.get_current_epoch_committee_count(spec)).all(|i| { - let shard = (state.current_shuffling_start_shard + i as u64) % spec.shard_count; - state.latest_crosslinks[shard as usize].epoch > state.validator_registry_update_epoch - }) +/// Spec v0.5.0 +pub fn get_base_reward( + state: &BeaconState, + index: usize, + previous_total_balance: u64, + spec: &ChainSpec, +) -> Result { + if previous_total_balance == 0 { + Ok(0) } else { - false - }; - - if should_update_validator_registy { - state.update_validator_registry(spec); - - state.current_shuffling_epoch = next_epoch; - state.current_shuffling_start_shard = (state.current_shuffling_start_shard - + state.get_current_epoch_committee_count(spec) as u64) - % spec.shard_count; - state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)? - } else { - let epochs_since_last_registry_update = - current_epoch - state.validator_registry_update_epoch; - if (epochs_since_last_registry_update > 1) - & epochs_since_last_registry_update.is_power_of_two() - { - state.current_shuffling_epoch = next_epoch; - state.current_shuffling_seed = - state.generate_seed(state.current_shuffling_epoch, spec)? - } + let adjusted_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; + Ok(state.get_effective_balance(index, spec)? / adjusted_quotient / 5) } +} - state.process_slashings(spec); - state.process_exit_queue(spec); - - Ok(()) +/// Returns the inactivity penalty for some validator. +/// +/// Spec v0.5.0 +pub fn get_inactivity_penalty( + state: &BeaconState, + index: usize, + epochs_since_finality: u64, + previous_total_balance: u64, + spec: &ChainSpec, +) -> Result { + Ok(get_base_reward(state, index, previous_total_balance, spec)? + + state.get_effective_balance(index, spec)? * epochs_since_finality + / spec.inactivity_penalty_quotient + / 2) } /// Updates the state's `latest_active_index_roots` field with a tree hash the active validator @@ -486,12 +473,5 @@ pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) /// /// Spec v0.4.0 pub fn clean_attestations(state: &mut BeaconState, spec: &ChainSpec) { - let current_epoch = state.current_epoch(spec); - - state.latest_attestations = state - .latest_attestations - .iter() - .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch) - .cloned() - .collect(); + state.previous_epoch_attestations = vec![]; } diff --git a/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs new file mode 100644 index 000000000..d822e434d --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs @@ -0,0 +1,37 @@ +use types::{beacon_state::helpers::verify_bitfield_length, *}; + +/// Returns validator indices which participated in the attestation. +/// +/// Spec v0.5.0 +pub fn get_attestation_participants( + state: &BeaconState, + attestation_data: &AttestationData, + bitfield: &Bitfield, + spec: &ChainSpec, +) -> Result, BeaconStateError> { + let epoch = attestation_data.slot.epoch(spec.slots_per_epoch); + + let crosslink_committee = + state.get_crosslink_committee_for_shard(epoch, attestation_data.shard, spec)?; + + if crosslink_committee.slot != attestation_data.slot { + return Err(BeaconStateError::NoCommitteeForShard); + } + + let committee = &crosslink_committee.committee; + + if !verify_bitfield_length(&bitfield, committee.len()) { + return Err(BeaconStateError::InvalidBitfield); + } + + let mut participants = Vec::with_capacity(committee.len()); + for (i, validator_index) in committee.iter().enumerate() { + match bitfield.get(i) { + Ok(bit) if bit == true => participants.push(*validator_index), + _ => {} + } + } + participants.shrink_to_fit(); + + Ok(participants) +} diff --git a/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs b/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs index 243dc67f0..b52485947 100644 --- a/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs +++ b/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs @@ -1,12 +1,11 @@ use super::errors::InclusionError; +use super::get_attestation_participants::get_attestation_participants; use types::*; /// Returns the distance between the first included attestation for some validator and this /// slot. /// -/// Note: In the spec this is defined "inline", not as a helper function. -/// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn inclusion_distance( state: &BeaconState, attestations: &[&PendingAttestation], @@ -19,9 +18,7 @@ pub fn inclusion_distance( /// Returns the slot of the earliest included attestation for some validator. /// -/// Note: In the spec this is defined "inline", not as a helper function. -/// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn inclusion_slot( state: &BeaconState, attestations: &[&PendingAttestation], @@ -34,9 +31,7 @@ pub fn inclusion_slot( /// Finds the earliest included attestation for some validator. /// -/// Note: In the spec this is defined "inline", not as a helper function. -/// -/// Spec v0.4.0 +/// Spec v0.5.0 fn earliest_included_attestation( state: &BeaconState, attestations: &[&PendingAttestation], @@ -47,7 +42,7 @@ fn earliest_included_attestation( for (i, a) in attestations.iter().enumerate() { let participants = - state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + get_attestation_participants(state, &a.data, &a.aggregation_bitfield, spec)?; if participants.iter().any(|i| *i == validator_index) { included_attestations.push(i); } diff --git a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs new file mode 100644 index 000000000..c830bfc24 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs @@ -0,0 +1,72 @@ +use super::Error; +use types::*; + +/// Peforms a validator registry update, if required. +/// +/// Spec v0.4.0 +pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { + let current_epoch = state.current_epoch(spec); + let next_epoch = state.next_epoch(spec); + + state.previous_shuffling_epoch = state.current_shuffling_epoch; + state.previous_shuffling_start_shard = state.current_shuffling_start_shard; + + state.previous_shuffling_seed = state.current_shuffling_seed; + + if should_update_validator_registry(state, spec)? { + state.update_validator_registry(spec); + + state.current_shuffling_epoch = next_epoch; + state.current_shuffling_start_shard = (state.current_shuffling_start_shard + + spec.get_epoch_committee_count( + state + .get_active_validator_indices(current_epoch, spec)? + .len(), + ) as u64) + % spec.shard_count; + state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)? + } else { + let epochs_since_last_registry_update = + current_epoch - state.validator_registry_update_epoch; + if (epochs_since_last_registry_update > 1) + & epochs_since_last_registry_update.is_power_of_two() + { + state.current_shuffling_epoch = next_epoch; + state.current_shuffling_seed = + state.generate_seed(state.current_shuffling_epoch, spec)? + } + } + + state.process_slashings(spec); + state.process_exit_queue(spec); + + Ok(()) +} + +/// Returns `true` if the validator registry should be updated during an epoch processing. +/// +/// Spec v0.5.0 +pub fn should_update_validator_registry( + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + if state.finalized_epoch <= state.validator_registry_update_epoch { + return Ok(false); + } + + let num_active_validators = state + .get_active_validator_indices(state.current_epoch(spec), spec)? + .len(); + let current_epoch_committee_count = spec.get_epoch_committee_count(num_active_validators); + + for shard in (0..current_epoch_committee_count) + .into_iter() + .map(|i| (state.current_shuffling_start_shard + i as u64) % spec.shard_count) + { + if state.latest_crosslinks[shard as usize].epoch <= state.validator_registry_update_epoch { + return Ok(false); + } + } + + Ok(true) +} diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index f76900f3b..bcbca8244 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,3 +1,4 @@ +use super::get_attestation_participants::get_attestation_participants; use super::WinningRootHashSet; use types::*; @@ -147,8 +148,8 @@ impl ValidatorStatuses { /// - Active validators /// - Total balances for the current and previous epochs. /// - /// Spec v0.4.0 - pub fn new(state: &BeaconState, spec: &ChainSpec) -> Self { + /// Spec v0.5.0 + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { let mut statuses = Vec::with_capacity(state.validator_registry.len()); let mut total_balances = TotalBalances::default(); @@ -157,37 +158,40 @@ impl ValidatorStatuses { if validator.is_active_at(state.current_epoch(spec)) { status.is_active_in_current_epoch = true; - total_balances.current_epoch += state.get_effective_balance(i, spec); + total_balances.current_epoch += state.get_effective_balance(i, spec)?; } if validator.is_active_at(state.previous_epoch(spec)) { status.is_active_in_previous_epoch = true; - total_balances.previous_epoch += state.get_effective_balance(i, spec); + total_balances.previous_epoch += state.get_effective_balance(i, spec)?; } statuses.push(status); } - Self { + Ok(Self { statuses, total_balances, - } + }) } /// Process some attestations from the given `state` updating the `statuses` and /// `total_balances` fields. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn process_attestations( &mut self, state: &BeaconState, - attestations: &[PendingAttestation], spec: &ChainSpec, ) -> Result<(), BeaconStateError> { - for a in attestations { + for a in state + .previous_epoch_attestations + .iter() + .chain(state.current_epoch_attestations.iter()) + { let attesting_indices = - state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; - let attesting_balance = state.get_total_balance(&attesting_indices, spec); + get_attestation_participants(state, &a.data, &a.aggregation_bitfield, spec)?; + let attesting_balance = state.get_total_balance(&attesting_indices, spec)?; let mut status = AttesterStatus::default(); @@ -206,10 +210,15 @@ impl ValidatorStatuses { status.is_previous_epoch_attester = true; // The inclusion slot and distance are only required for previous epoch attesters. + let relative_epoch = RelativeEpoch::from_slot(state.slot, a.data.slot, spec)?; status.inclusion_info = InclusionInfo { slot: a.inclusion_slot, distance: inclusion_distance(a), - proposer_index: state.get_beacon_proposer_index(a.inclusion_slot, spec)?, + proposer_index: state.get_beacon_proposer_index( + a.inclusion_slot, + relative_epoch, + spec, + )?, }; if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { @@ -235,7 +244,7 @@ impl ValidatorStatuses { /// Update the `statuses` for each validator based upon whether or not they attested to the /// "winning" shard block root for the previous epoch. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn process_winning_roots( &mut self, state: &BeaconState, @@ -248,11 +257,10 @@ impl ValidatorStatuses { state.get_crosslink_committees_at_slot(slot, spec)?; // Loop through each committee in the slot. - for (crosslink_committee, shard) in crosslink_committees_at_slot { + for c in crosslink_committees_at_slot { // If there was some winning crosslink root for the committee's shard. - if let Some(winning_root) = winning_roots.get(&shard) { - let total_committee_balance = - state.get_total_balance(&crosslink_committee, spec); + if let Some(winning_root) = winning_roots.get(&c.shard) { + let total_committee_balance = state.get_total_balance(&c.committee, spec)?; for &validator_index in &winning_root.attesting_validator_indices { // Take note of the balance information for the winning root, it will be // used later to calculate rewards for that validator. @@ -272,14 +280,14 @@ impl ValidatorStatuses { /// Returns the distance between when the attestation was created and when it was included in a /// block. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn inclusion_distance(a: &PendingAttestation) -> Slot { a.inclusion_slot - a.data.slot } /// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { a.data.slot.epoch(spec.slots_per_epoch) == epoch } @@ -287,7 +295,7 @@ fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for /// the first slot of the given epoch. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn has_common_epoch_boundary_root( a: &PendingAttestation, state: &BeaconState, @@ -295,25 +303,21 @@ fn has_common_epoch_boundary_root( spec: &ChainSpec, ) -> Result { let slot = epoch.start_slot(spec.slots_per_epoch); - let state_boundary_root = *state - .get_block_root(slot, spec) - .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + let state_boundary_root = *state.get_block_root(slot, spec)?; - Ok(a.data.epoch_boundary_root == state_boundary_root) + Ok(a.data.target_root == state_boundary_root) } /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for /// the current slot of the `PendingAttestation`. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn has_common_beacon_block_root( a: &PendingAttestation, state: &BeaconState, spec: &ChainSpec, ) -> Result { - let state_block_root = *state - .get_block_root(a.data.slot, spec) - .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + let state_block_root = *state.get_block_root(a.data.slot, spec)?; Ok(a.data.beacon_block_root == state_block_root) } diff --git a/eth2/state_processing/src/per_epoch_processing/winning_root.rs b/eth2/state_processing/src/per_epoch_processing/winning_root.rs index 07678f93b..97cff3e13 100644 --- a/eth2/state_processing/src/per_epoch_processing/winning_root.rs +++ b/eth2/state_processing/src/per_epoch_processing/winning_root.rs @@ -1,3 +1,4 @@ +use super::get_attestation_participants::get_attestation_participants; use std::collections::HashSet; use std::iter::FromIterator; use types::*; @@ -13,14 +14,14 @@ impl WinningRoot { /// Returns `true` if `self` is a "better" candidate than `other`. /// /// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties - /// are broken by favouring the lower `crosslink_data_root` value. + /// are broken by favouring the higher `crosslink_data_root` value. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn is_better_than(&self, other: &Self) -> bool { if self.total_attesting_balance > other.total_attesting_balance { true } else if self.total_attesting_balance == other.total_attesting_balance { - self.crosslink_data_root < other.crosslink_data_root + self.crosslink_data_root > other.crosslink_data_root } else { false } @@ -33,22 +34,21 @@ impl WinningRoot { /// The `WinningRoot` object also contains additional fields that are useful in later stages of /// per-epoch processing. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn winning_root( state: &BeaconState, shard: u64, - current_epoch_attestations: &[&PendingAttestation], - previous_epoch_attestations: &[&PendingAttestation], spec: &ChainSpec, ) -> Result, BeaconStateError> { let mut winning_root: Option = None; let crosslink_data_roots: HashSet = HashSet::from_iter( - previous_epoch_attestations + state + .previous_epoch_attestations .iter() - .chain(current_epoch_attestations.iter()) + .chain(state.current_epoch_attestations.iter()) .filter_map(|a| { - if a.data.shard == shard { + if is_eligible_for_winning_root(state, a, shard) { Some(a.data.crosslink_data_root) } else { None @@ -57,18 +57,17 @@ pub fn winning_root( ); for crosslink_data_root in crosslink_data_roots { - let attesting_validator_indices = get_attesting_validator_indices( - state, - shard, - current_epoch_attestations, - previous_epoch_attestations, - &crosslink_data_root, - spec, - )?; + let attesting_validator_indices = + get_attesting_validator_indices(state, shard, &crosslink_data_root, spec)?; - let total_attesting_balance: u64 = attesting_validator_indices - .iter() - .fold(0, |acc, i| acc + state.get_effective_balance(*i, spec)); + let total_attesting_balance: u64 = + attesting_validator_indices + .iter() + .try_fold(0_u64, |acc, i| { + state + .get_effective_balance(*i, spec) + .and_then(|bal| Ok(acc + bal)) + })?; let candidate = WinningRoot { crosslink_data_root, @@ -88,25 +87,36 @@ pub fn winning_root( Ok(winning_root) } -/// Returns all indices which voted for a given crosslink. May contain duplicates. +/// Returns `true` if pending attestation `a` is eligible to become a winning root. /// -/// Spec v0.4.0 +/// Spec v0.5.0 +fn is_eligible_for_winning_root(state: &BeaconState, a: &PendingAttestation, shard: Shard) -> bool { + if shard >= state.latest_crosslinks.len() as u64 { + return false; + } + + a.data.previous_crosslink == state.latest_crosslinks[shard as usize] +} + +/// Returns all indices which voted for a given crosslink. Does not contain duplicates. +/// +/// Spec v0.5.0 fn get_attesting_validator_indices( state: &BeaconState, shard: u64, - current_epoch_attestations: &[&PendingAttestation], - previous_epoch_attestations: &[&PendingAttestation], crosslink_data_root: &Hash256, spec: &ChainSpec, ) -> Result, BeaconStateError> { let mut indices = vec![]; - for a in current_epoch_attestations + for a in state + .current_epoch_attestations .iter() - .chain(previous_epoch_attestations.iter()) + .chain(state.previous_epoch_attestations.iter()) { if (a.data.shard == shard) && (a.data.crosslink_data_root == *crosslink_data_root) { - indices.append(&mut state.get_attestation_participants( + indices.append(&mut get_attestation_participants( + state, &a.data, &a.aggregation_bitfield, spec, @@ -114,5 +124,41 @@ fn get_attesting_validator_indices( } } + // Sort the list (required for dedup). "Unstable" means the sort may re-order equal elements, + // this causes no issue here. + // + // These sort + dedup ops are potentially good CPU time optimisation targets. + indices.sort_unstable(); + // Remove all duplicate indices (requires a sorted list). + indices.dedup(); + Ok(indices) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_better_than() { + let worse = WinningRoot { + crosslink_data_root: Hash256::from_slice(&[1; 32]), + attesting_validator_indices: vec![], + total_attesting_balance: 42, + }; + + let better = WinningRoot { + crosslink_data_root: Hash256::from_slice(&[2; 32]), + ..worse.clone() + }; + + assert!(better.is_better_than(&worse)); + + let better = WinningRoot { + total_attesting_balance: worse.total_attesting_balance + 1, + ..worse.clone() + }; + + assert!(better.is_better_than(&worse)); + } +} diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 7c77a5a3e..a90f09759 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,8 +1,7 @@ use self::epoch_cache::EpochCache; use crate::test_utils::TestRandom; -use crate::{validator_registry::get_active_validator_indices, *}; +use crate::*; use int_to_bytes::int_to_bytes32; -use log::trace; use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; @@ -39,6 +38,7 @@ pub enum Error { InsufficientAttestations, InsufficientCommittees, InsufficientSlashedBalances, + NoCommitteeForShard, EpochCacheUninitialized(RelativeEpoch), PubkeyCacheInconsistent, PubkeyCacheIncomplete { @@ -349,17 +349,49 @@ impl BeaconState { self.current_epoch(spec) + 1 } + /// Returns the active validator indices for the given epoch, assuming there is no validator + /// registry update in the next epoch. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.5.0 + pub fn get_active_validator_indices( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result<&[usize], Error> { + // If the slot is in the next epoch, assume there was no validator registry update. + let relative_epoch = + match RelativeEpoch::from_epoch(self.slot.epoch(spec.slots_per_epoch), epoch) { + Err(RelativeEpochError::AmbiguiousNextEpoch) => { + Ok(RelativeEpoch::NextWithoutRegistryChange) + } + e => e, + }?; + + let cache = self.cache(relative_epoch, spec)?; + + Ok(&cache.active_validator_indices) + } + /// Returns the crosslink committees for some slot. /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn get_crosslink_committees_at_slot( &self, slot: Slot, - relative_epoch: RelativeEpoch, spec: &ChainSpec, ) -> Result<&Vec, Error> { + // If the slot is in the next epoch, assume there was no validator registry update. + let relative_epoch = match RelativeEpoch::from_slot(self.slot, slot, spec) { + Err(RelativeEpochError::AmbiguiousNextEpoch) => { + Ok(RelativeEpoch::NextWithoutRegistryChange) + } + e => e, + }?; + let cache = self.cache(relative_epoch, spec)?; Ok(cache @@ -367,15 +399,46 @@ impl BeaconState { .ok_or_else(|| Error::SlotOutOfBounds)?) } + /// Returns the crosslink committees for some shard in an epoch. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.4.0 + pub fn get_crosslink_committee_for_shard( + &self, + epoch: Epoch, + shard: Shard, + spec: &ChainSpec, + ) -> Result<&CrosslinkCommittee, Error> { + // If the slot is in the next epoch, assume there was no validator registry update. + let relative_epoch = match RelativeEpoch::from_epoch(self.current_epoch(spec), epoch) { + Err(RelativeEpochError::AmbiguiousNextEpoch) => { + Ok(RelativeEpoch::NextWithoutRegistryChange) + } + e => e, + }?; + + let cache = self.cache(relative_epoch, spec)?; + + Ok(cache + .get_crosslink_committee_for_shard(shard, spec) + .ok_or_else(|| Error::NoCommitteeForShard)?) + } + /// Return the block root at a recent `slot`. /// /// Spec v0.5.0 - pub fn get_block_root(&self, slot: Slot, spec: &ChainSpec) -> Option<&Hash256> { + pub fn get_block_root( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result<&Hash256, BeaconStateError> { if (self.slot <= slot + spec.slots_per_historical_root as u64) && (slot < self.slot) { self.latest_block_roots .get(slot.as_usize() % spec.slots_per_historical_root) + .ok_or_else(|| Error::InsufficientBlockRoots) } else { - None + Err(Error::EpochOutOfBounds) } } @@ -476,12 +539,12 @@ impl BeaconState { relative_epoch: RelativeEpoch, spec: &ChainSpec, ) -> Result { - let committees = self.get_crosslink_committees_at_slot(slot, relative_epoch, spec)?; - trace!( - "get_beacon_proposer_index: slot: {}, committees_count: {}", - slot, - committees.len() - ); + let cache = self.cache(relative_epoch, spec)?; + + let committees = cache + .get_crosslink_committees_at_slot(slot, spec) + .ok_or_else(|| Error::SlotOutOfBounds)?; + committees .first() .ok_or(Error::InsufficientValidators) @@ -751,13 +814,14 @@ impl BeaconState { .ok_or_else(|| Error::UnknownValidator)?) } - /// Process the slashings. + /// Process slashings. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// /// Spec v0.4.0 pub fn process_slashings(&mut self, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = self.current_epoch(spec); - let active_validator_indices = - get_active_validator_indices(&self.validator_registry, current_epoch); + let active_validator_indices = self.get_active_validator_indices(current_epoch, spec)?; let total_balance = self.get_total_balance(&active_validator_indices[..], spec)?; for (index, validator) in self.validator_registry.iter().enumerate() { @@ -818,11 +882,12 @@ impl BeaconState { /// Update validator registry, activating/exiting validators if possible. /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// /// Spec v0.4.0 pub fn update_validator_registry(&mut self, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = self.current_epoch(spec); - let active_validator_indices = - get_active_validator_indices(&self.validator_registry, current_epoch); + let active_validator_indices = self.get_active_validator_indices(current_epoch, spec)?; let total_balance = self.get_total_balance(&active_validator_indices[..], spec)?; let max_balance_churn = std::cmp::max( @@ -867,54 +932,32 @@ impl BeaconState { /// Iterate through the validator registry and eject active validators with balance below /// ``EJECTION_BALANCE``. /// - /// Spec v0.4.0 - pub fn process_ejections(&mut self, spec: &ChainSpec) { - for validator_index in - get_active_validator_indices(&self.validator_registry, self.current_epoch(spec)) - { - if self.validator_balances[validator_index] < spec.ejection_balance { - self.exit_validator(validator_index, spec) - } + /// Spec v0.5.0 + pub fn process_ejections(&mut self, spec: &ChainSpec) -> Result<(), Error> { + // There is an awkward double (triple?) loop here because we can't loop across the borrowed + // active validator indices and mutate state in the one loop. + let exitable: Vec = self + .get_active_validator_indices(self.current_epoch(spec), spec)? + .iter() + .filter_map(|&i| { + if self.validator_balances[i as usize] < spec.ejection_balance { + Some(i) + } else { + None + } + }) + .collect(); + + for validator_index in exitable { + self.exit_validator(validator_index, spec) } - } - /// Returns the penality that should be applied to some validator for inactivity. - /// - /// Note: this is defined "inline" in the spec, not as a helper function. - /// - /// Spec v0.4.0 - pub fn inactivity_penalty( - &self, - validator_index: usize, - epochs_since_finality: Epoch, - base_reward_quotient: u64, - spec: &ChainSpec, - ) -> Result { - let effective_balance = self.get_effective_balance(validator_index, spec)?; - let base_reward = self.base_reward(validator_index, base_reward_quotient, spec)?; - Ok(base_reward - + effective_balance * epochs_since_finality.as_u64() - / spec.inactivity_penalty_quotient - / 2) - } - - /// Returns the base reward for some validator. - /// - /// Note: In the spec this is defined "inline", not as a helper function. - /// - /// Spec v0.4.0 - pub fn base_reward( - &self, - validator_index: usize, - base_reward_quotient: u64, - spec: &ChainSpec, - ) -> Result { - Ok(self.get_effective_balance(validator_index, spec)? / base_reward_quotient / 5) + Ok(()) } /// Return the combined effective balance of an array of validators. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn get_total_balance( &self, validator_indices: &[usize], diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 6312ea5a5..0759a7617 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -13,7 +13,9 @@ pub struct EpochCache { /// Maps validator index to a slot, shard and committee index for attestation. pub attestation_duties: Vec>, /// Maps a shard to an index of `self.committees`. - pub shard_committee_indices: Vec<(Slot, usize)>, + pub shard_committee_indices: Vec>, + /// Indices of all active validators in the epoch + pub active_validator_indices: Vec, } impl EpochCache { @@ -31,18 +33,18 @@ impl EpochCache { let builder = match relative_epoch { RelativeEpoch::Previous => EpochCrosslinkCommitteesBuilder::for_previous_epoch( state, - active_validator_indices, + active_validator_indices.clone(), spec, ), RelativeEpoch::Current => EpochCrosslinkCommitteesBuilder::for_current_epoch( state, - active_validator_indices, + active_validator_indices.clone(), spec, ), RelativeEpoch::NextWithRegistryChange => { EpochCrosslinkCommitteesBuilder::for_next_epoch( state, - active_validator_indices, + active_validator_indices.clone(), true, spec, )? @@ -50,7 +52,7 @@ impl EpochCache { RelativeEpoch::NextWithoutRegistryChange => { EpochCrosslinkCommitteesBuilder::for_next_epoch( state, - active_validator_indices, + active_validator_indices.clone(), false, spec, )? @@ -64,7 +66,7 @@ impl EpochCache { // 2. `shard_committee_indices`: maps `Shard` into a `CrosslinkCommittee` in // `EpochCrosslinkCommittees`. let mut attestation_duties = vec![None; state.validator_registry.len()]; - let mut shard_committee_indices = vec![(Slot::default(), 0); spec.shard_count as usize]; + let mut shard_committee_indices = vec![None; spec.shard_count as usize]; for (i, slot_committees) in epoch_crosslink_committees .crosslink_committees .iter() @@ -75,7 +77,7 @@ impl EpochCache { for (j, crosslink_committee) in slot_committees.iter().enumerate() { let shard = crosslink_committee.shard; - shard_committee_indices[shard as usize] = (slot, j); + shard_committee_indices[shard as usize] = Some((slot, j)); for (k, validator_index) in crosslink_committee.committee.iter().enumerate() { let attestation_duty = AttestationDuty { @@ -93,6 +95,7 @@ impl EpochCache { epoch_crosslink_committees, attestation_duties, shard_committee_indices, + active_validator_indices, }) } @@ -110,9 +113,13 @@ impl EpochCache { shard: Shard, spec: &ChainSpec, ) -> Option<&CrosslinkCommittee> { - let (slot, committee) = self.shard_committee_indices.get(shard as usize)?; - let slot_committees = self.get_crosslink_committees_at_slot(*slot, spec)?; - slot_committees.get(*committee) + if shard > self.shard_committee_indices.len() as u64 { + None + } else { + let (slot, committee) = self.shard_committee_indices[shard as usize]?; + let slot_committees = self.get_crosslink_committees_at_slot(slot, spec)?; + slot_committees.get(committee) + } } } @@ -261,13 +268,14 @@ impl EpochCrosslinkCommitteesBuilder { let committees_per_slot = (self.committees_per_epoch / spec.slots_per_epoch) as usize; - for i in 0..spec.slots_per_epoch as usize { + for (i, slot) in self.epoch.slot_iter(spec.slots_per_epoch).enumerate() { for j in (0..committees.len()) .into_iter() .skip(i * committees_per_slot) .take(committees_per_slot) { let crosslink_committee = CrosslinkCommittee { + slot, shard, committee: committees.remove(j), }; diff --git a/eth2/types/src/crosslink_committee.rs b/eth2/types/src/crosslink_committee.rs index 06a6562fc..af1778a1b 100644 --- a/eth2/types/src/crosslink_committee.rs +++ b/eth2/types/src/crosslink_committee.rs @@ -4,6 +4,7 @@ use ssz_derive::{Decode, Encode, TreeHash}; #[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, Decode, Encode, TreeHash)] pub struct CrosslinkCommittee { + pub slot: Slot, pub shard: Shard, pub committee: Vec, } diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index 402bd79d6..6e48c8c17 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -109,10 +109,7 @@ impl TestingBeaconBlockBuilder { break; } - let relative_epoch = RelativeEpoch::from_slot(state.slot, slot, spec).unwrap(); - for crosslink_committee in - state.get_crosslink_committees_at_slot(slot, relative_epoch, spec)? - { + for crosslink_committee in state.get_crosslink_committees_at_slot(slot, spec)? { if attestations_added >= num_attestations { break; } diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 9e613f0e9..54e2fbe96 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -223,9 +223,8 @@ impl TestingBeaconStateBuilder { for slot in first_slot..last_slot + 1 { let slot = Slot::from(slot); - let relative_epoch = RelativeEpoch::from_slot(state.slot, slot, spec).unwrap(); let committees = state - .get_crosslink_committees_at_slot(slot, relative_epoch, spec) + .get_crosslink_committees_at_slot(slot, spec) .unwrap() .clone(); From 6b3cdc34dd4197a87468ba67549da376a678c834 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 17:50:22 +1100 Subject: [PATCH 094/154] Update block proposer to v0.5.0 --- eth2/block_proposer/src/lib.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/eth2/block_proposer/src/lib.rs b/eth2/block_proposer/src/lib.rs index 5cddbaedc..e62c4b71d 100644 --- a/eth2/block_proposer/src/lib.rs +++ b/eth2/block_proposer/src/lib.rs @@ -4,7 +4,7 @@ mod traits; use slot_clock::SlotClock; use ssz::{SignedRoot, TreeHash}; use std::sync::Arc; -use types::{BeaconBlock, ChainSpec, Domain, Hash256, Proposal, Slot}; +use types::{BeaconBlock, ChainSpec, Domain, Slot}; pub use self::traits::{ BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, @@ -158,7 +158,7 @@ impl BlockProducer BlockProducer Option { self.store_produce(&block); - let proposal = Proposal { - slot: block.slot, - shard: self.spec.beacon_chain_shard_number, - block_root: Hash256::from_slice(&block.signed_root()[..]), - signature: block.signature.clone(), - }; - match self .signer - .sign_block_proposal(&proposal.signed_root()[..], domain) + .sign_block_proposal(&block.signed_root()[..], domain) { None => None, Some(signature) => { From 8b08e9dd2e2fe90f3e5c9269397cbf82e24bd413 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 17:54:43 +1100 Subject: [PATCH 095/154] Remove block and state readers from db crate --- .../db/src/stores/beacon_block_store.rs | 37 +++++-------------- .../db/src/stores/beacon_state_store.rs | 22 +---------- 2 files changed, 12 insertions(+), 47 deletions(-) diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs index 92d296c37..e2e16e60b 100644 --- a/beacon_node/db/src/stores/beacon_block_store.rs +++ b/beacon_node/db/src/stores/beacon_block_store.rs @@ -2,7 +2,7 @@ use super::BLOCKS_DB_COLUMN as DB_COLUMN; use super::{ClientDB, DBError}; use ssz::Decodable; use std::sync::Arc; -use types::{readers::BeaconBlockReader, BeaconBlock, Hash256, Slot}; +use types::{BeaconBlock, Hash256, Slot}; #[derive(Clone, Debug, PartialEq)] pub enum BeaconBlockAtSlotError { @@ -38,23 +38,6 @@ impl BeaconBlockStore { } } - /// Retuns an object implementing `BeaconBlockReader`, or `None` (if hash not known). - /// - /// Note: Presently, this function fully deserializes a `BeaconBlock` and returns that. In the - /// future, it would be ideal to return an object capable of reading directly from serialized - /// SSZ bytes. - pub fn get_reader(&self, hash: &Hash256) -> Result, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError { - message: "Bad BeaconBlock SSZ.".to_string(), - })?; - Ok(Some(block)) - } - } - } - /// Retrieve the block at a slot given a "head_hash" and a slot. /// /// A "head_hash" must be a block hash with a slot number greater than or equal to the desired @@ -72,17 +55,17 @@ impl BeaconBlockStore { &self, head_hash: &Hash256, slot: Slot, - ) -> Result, BeaconBlockAtSlotError> { + ) -> Result, BeaconBlockAtSlotError> { let mut current_hash = *head_hash; loop { - if let Some(block_reader) = self.get_reader(¤t_hash)? { - if block_reader.slot() == slot { - break Ok(Some((current_hash, block_reader))); - } else if block_reader.slot() < slot { + if let Some(block) = self.get_deserialized(¤t_hash)? { + if block.slot == slot { + break Ok(Some((current_hash, block))); + } else if block.slot < slot { break Ok(None); } else { - current_hash = block_reader.parent_root(); + current_hash = block.previous_block_root; } } else { break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); @@ -228,7 +211,7 @@ mod tests { for i in 0..block_count { let mut block = BeaconBlock::random_for_test(&mut rng); - block.parent_root = parent_hashes[i]; + block.previous_block_root = parent_hashes[i]; block.slot = slots[i]; let ssz = ssz_encode(&block); @@ -240,12 +223,12 @@ mod tests { // Test that certain slots can be reached from certain hashes. let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)]; for (hashes_index, slot_index) in test_cases { - let (matched_block_hash, reader) = bs + let (matched_block_hash, block) = bs .block_at_slot(&hashes[hashes_index], slots[slot_index]) .unwrap() .unwrap(); assert_eq!(matched_block_hash, hashes[slot_index]); - assert_eq!(reader.slot(), slots[slot_index]); + assert_eq!(block.slot, slots[slot_index]); } let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap(); diff --git a/beacon_node/db/src/stores/beacon_state_store.rs b/beacon_node/db/src/stores/beacon_state_store.rs index ed22696cb..fd6ff569a 100644 --- a/beacon_node/db/src/stores/beacon_state_store.rs +++ b/beacon_node/db/src/stores/beacon_state_store.rs @@ -2,7 +2,7 @@ use super::STATES_DB_COLUMN as DB_COLUMN; use super::{ClientDB, DBError}; use ssz::Decodable; use std::sync::Arc; -use types::{readers::BeaconStateReader, BeaconState, Hash256}; +use types::{BeaconState, Hash256}; pub struct BeaconStateStore where @@ -30,23 +30,6 @@ impl BeaconStateStore { } } } - - /// Retuns an object implementing `BeaconStateReader`, or `None` (if hash not known). - /// - /// Note: Presently, this function fully deserializes a `BeaconState` and returns that. In the - /// future, it would be ideal to return an object capable of reading directly from serialized - /// SSZ bytes. - pub fn get_reader(&self, hash: &Hash256) -> Result, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError { - message: "Bad State SSZ.".to_string(), - })?; - Ok(Some(state)) - } - } - } } #[cfg(test)] @@ -72,8 +55,7 @@ mod tests { store.put(&state_root, &ssz_encode(&state)).unwrap(); - let reader = store.get_reader(&state_root).unwrap().unwrap(); - let decoded = reader.into_beacon_state().unwrap(); + let decoded = store.get_deserialized(&state_root).unwrap().unwrap(); assert_eq!(state, decoded); } From d94540c85c6f98068c6a5cadac312c55eff22353 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 17:59:29 +1100 Subject: [PATCH 096/154] Remove readers from fork choice crate. --- eth2/fork_choice/src/bitwise_lmd_ghost.rs | 24 ++++++++++----------- eth2/fork_choice/src/optimized_lmd_ghost.rs | 24 ++++++++++----------- eth2/fork_choice/src/slow_lmd_ghost.rs | 15 ++++++------- 3 files changed, 31 insertions(+), 32 deletions(-) diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index d7b10015b..9410fd203 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -11,8 +11,8 @@ use log::{debug, trace}; use std::collections::HashMap; use std::sync::Arc; use types::{ - readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, - ChainSpec, Hash256, Slot, SlotHeight, + validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot, + SlotHeight, }; //TODO: Pruning - Children @@ -255,17 +255,17 @@ impl ForkChoice for BitwiseLMDGhost { // get the height of the parent let parent_height = self .block_store - .get_deserialized(&block.parent_root)? - .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? - .slot() + .get_deserialized(&block.previous_block_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))? + .slot .height(spec.genesis_slot); - let parent_hash = &block.parent_root; + let parent_hash = &block.previous_block_root; // add the new block to the children of parent (*self .children - .entry(block.parent_root) + .entry(block.previous_block_root) .or_insert_with(|| vec![])) .push(block_hash.clone()); @@ -309,7 +309,7 @@ impl ForkChoice for BitwiseLMDGhost { .block_store .get_deserialized(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? - .slot() + .slot .height(spec.genesis_slot); // get the height of the past target block @@ -317,7 +317,7 @@ impl ForkChoice for BitwiseLMDGhost { .block_store .get_deserialized(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? - .slot() + .slot .height(spec.genesis_slot); // update the attestation only if the new target is higher if past_block_height < block_height { @@ -343,8 +343,8 @@ impl ForkChoice for BitwiseLMDGhost { .get_deserialized(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; - let block_slot = block.slot(); - let state_root = block.state_root(); + let block_slot = block.slot; + let state_root = block.state_root; let mut block_height = block_slot.height(spec.genesis_slot); let mut current_head = *justified_block_start; @@ -434,7 +434,7 @@ impl ForkChoice for BitwiseLMDGhost { .block_store .get_deserialized(¤t_head)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? - .slot() + .slot .height(spec.genesis_slot); // prune the latest votes for votes that are not part of current chosen chain // more specifically, only keep votes that have head as an ancestor diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index 30c84e9e1..e1b8914a6 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -11,8 +11,8 @@ use std::cmp::Ordering; use std::collections::HashMap; use std::sync::Arc; use types::{ - readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, - ChainSpec, Hash256, Slot, SlotHeight, + validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot, + SlotHeight, }; //TODO: Pruning - Children @@ -226,17 +226,17 @@ impl ForkChoice for OptimizedLMDGhost { // get the height of the parent let parent_height = self .block_store - .get_deserialized(&block.parent_root)? - .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? - .slot() + .get_deserialized(&block.previous_block_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))? + .slot .height(spec.genesis_slot); - let parent_hash = &block.parent_root; + let parent_hash = &block.previous_block_root; // add the new block to the children of parent (*self .children - .entry(block.parent_root) + .entry(block.previous_block_root) .or_insert_with(|| vec![])) .push(block_hash.clone()); @@ -280,7 +280,7 @@ impl ForkChoice for OptimizedLMDGhost { .block_store .get_deserialized(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? - .slot() + .slot .height(spec.genesis_slot); // get the height of the past target block @@ -288,7 +288,7 @@ impl ForkChoice for OptimizedLMDGhost { .block_store .get_deserialized(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? - .slot() + .slot .height(spec.genesis_slot); // update the attestation only if the new target is higher if past_block_height < block_height { @@ -314,8 +314,8 @@ impl ForkChoice for OptimizedLMDGhost { .get_deserialized(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; - let block_slot = block.slot(); - let state_root = block.state_root(); + let block_slot = block.slot; + let state_root = block.state_root; let mut block_height = block_slot.height(spec.genesis_slot); let mut current_head = *justified_block_start; @@ -405,7 +405,7 @@ impl ForkChoice for OptimizedLMDGhost { .block_store .get_deserialized(¤t_head)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? - .slot() + .slot .height(spec.genesis_slot); // prune the latest votes for votes that are not part of current chosen chain // more specifically, only keep votes that have head as an ancestor diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index abf13f21b..af58aa7b8 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -9,8 +9,7 @@ use log::{debug, trace}; use std::collections::HashMap; use std::sync::Arc; use types::{ - readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, - ChainSpec, Hash256, Slot, + validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot, }; //TODO: Pruning and syncing @@ -95,7 +94,7 @@ where .block_store .get_deserialized(&block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))? - .slot(); + .slot; for (vote_hash, votes) in latest_votes.iter() { let (root_at_slot, _) = self @@ -122,7 +121,7 @@ impl ForkChoice for SlowLMDGhost { // add the new block to the children of parent (*self .children - .entry(block.parent_root) + .entry(block.previous_block_root) .or_insert_with(|| vec![])) .push(block_hash.clone()); @@ -155,7 +154,7 @@ impl ForkChoice for SlowLMDGhost { .block_store .get_deserialized(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? - .slot() + .slot .height(spec.genesis_slot); // get the height of the past target block @@ -163,7 +162,7 @@ impl ForkChoice for SlowLMDGhost { .block_store .get_deserialized(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? - .slot() + .slot .height(spec.genesis_slot); // update the attestation only if the new target is higher if past_block_height < block_height { @@ -186,9 +185,9 @@ impl ForkChoice for SlowLMDGhost { .get_deserialized(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; - let start_state_root = start.state_root(); + let start_state_root = start.state_root; - let latest_votes = self.get_latest_votes(&start_state_root, start.slot(), spec)?; + let latest_votes = self.get_latest_votes(&start_state_root, start.slot, spec)?; let mut head_hash = *justified_block_start; From 6df5eee7f49809da48868629c1567860c957d3fc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 18:10:20 +1100 Subject: [PATCH 097/154] Update beacon_chain crate with v0.5.0 updates --- .../src/attestation_aggregator.rs | 53 ++++----- beacon_node/beacon_chain/src/beacon_chain.rs | 104 ++++++++---------- 2 files changed, 67 insertions(+), 90 deletions(-) diff --git a/beacon_node/beacon_chain/src/attestation_aggregator.rs b/beacon_node/beacon_chain/src/attestation_aggregator.rs index 75cfd7ee5..9b4e5a687 100644 --- a/beacon_node/beacon_chain/src/attestation_aggregator.rs +++ b/beacon_node/beacon_chain/src/attestation_aggregator.rs @@ -1,4 +1,3 @@ -use log::trace; use ssz::TreeHash; use state_processing::per_block_processing::validate_attestation_without_signature; use std::collections::{HashMap, HashSet}; @@ -86,34 +85,22 @@ impl AttestationAggregator { free_attestation: &FreeAttestation, spec: &ChainSpec, ) -> Result { - let attestation_duties = match state.attestation_slot_and_shard_for_validator( - free_attestation.validator_index as usize, - spec, - ) { - Err(BeaconStateError::EpochCacheUninitialized(e)) => { - panic!("Attempted to access unbuilt cache {:?}.", e) - } - Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld), - Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard), - Err(e) => return Err(e), - Ok(None) => invalid_outcome!(Message::BadValidatorIndex), - Ok(Some(attestation_duties)) => attestation_duties, - }; + let duties = + match state.get_attestation_duties(free_attestation.validator_index as usize, spec) { + Err(BeaconStateError::EpochCacheUninitialized(e)) => { + panic!("Attempted to access unbuilt cache {:?}.", e) + } + Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld), + Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard), + Err(e) => return Err(e), + Ok(None) => invalid_outcome!(Message::BadValidatorIndex), + Ok(Some(attestation_duties)) => attestation_duties, + }; - let (slot, shard, committee_index) = attestation_duties; - - trace!( - "slot: {}, shard: {}, committee_index: {}, val_index: {}", - slot, - shard, - committee_index, - free_attestation.validator_index - ); - - if free_attestation.data.slot != slot { + if free_attestation.data.slot != duties.slot { invalid_outcome!(Message::BadSlot); } - if free_attestation.data.shard != shard { + if free_attestation.data.shard != duties.shard { invalid_outcome!(Message::BadShard); } @@ -143,7 +130,7 @@ impl AttestationAggregator { if let Some(updated_attestation) = aggregate_attestation( existing_attestation, &free_attestation.signature, - committee_index as usize, + duties.committee_index as usize, ) { self.store.insert(signable_message, updated_attestation); valid_outcome!(Message::Aggregated); @@ -154,7 +141,7 @@ impl AttestationAggregator { let mut aggregate_signature = AggregateSignature::new(); aggregate_signature.add(&free_attestation.signature); let mut aggregation_bitfield = Bitfield::new(); - aggregation_bitfield.set(committee_index as usize, true); + aggregation_bitfield.set(duties.committee_index as usize, true); let new_attestation = Attestation { data: free_attestation.data.clone(), aggregation_bitfield, @@ -177,9 +164,13 @@ impl AttestationAggregator { ) -> Vec { let mut known_attestation_data: HashSet = HashSet::new(); - state.latest_attestations.iter().for_each(|attestation| { - known_attestation_data.insert(attestation.data.clone()); - }); + state + .previous_epoch_attestations + .iter() + .chain(state.current_epoch_attestations.iter()) + .for_each(|attestation| { + known_attestation_data.insert(attestation.data.clone()); + }); self.store .values() diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b0e84e1e1..1082f6cab 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -15,10 +15,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, SlotProcessingError, }; use std::sync::Arc; -use types::{ - readers::{BeaconBlockReader, BeaconStateReader}, - *, -}; +use types::*; #[derive(Debug, PartialEq)] pub enum ValidBlock { @@ -106,7 +103,8 @@ where genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?; - genesis_state.build_epoch_cache(RelativeEpoch::Next, &spec)?; + genesis_state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)?; + genesis_state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)?; Ok(Self { block_store, @@ -248,19 +246,15 @@ where /// present and prior epoch is available. pub fn block_proposer(&self, slot: Slot) -> Result { trace!("BeaconChain::block_proposer: slot: {}", slot); - let index = self - .state - .read() - .get_beacon_proposer_index(slot, &self.spec)?; + let index = self.state.read().get_beacon_proposer_index( + slot, + RelativeEpoch::Current, + &self.spec, + )?; Ok(index) } - /// Returns the justified slot for the present state. - pub fn justified_epoch(&self) -> Epoch { - self.state.read().justified_epoch - } - /// Returns the attestation slot and shard for a given validator index. /// /// Information is read from the current state, so only information from the present and prior @@ -273,12 +267,12 @@ where "BeaconChain::validator_attestion_slot_and_shard: validator_index: {}", validator_index ); - if let Some((slot, shard, _committee)) = self + if let Some(attestation_duty) = self .state .read() - .attestation_slot_and_shard_for_validator(validator_index, &self.spec)? + .get_attestation_duties(validator_index, &self.spec)? { - Ok(Some((slot, shard))) + Ok(Some((attestation_duty.slot, attestation_duty.shard))) } else { Ok(None) } @@ -287,37 +281,33 @@ where /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. pub fn produce_attestation_data(&self, shard: u64) -> Result { trace!("BeaconChain::produce_attestation_data: shard: {}", shard); - let justified_epoch = self.justified_epoch(); - let justified_block_root = *self - .state - .read() - .get_block_root( - justified_epoch.start_slot(self.spec.slots_per_epoch), - &self.spec, - ) - .ok_or_else(|| Error::BadRecentBlockRoots)?; + let source_epoch = self.state.read().current_justified_epoch; + let source_root = *self.state.read().get_block_root( + source_epoch.start_slot(self.spec.slots_per_epoch), + &self.spec, + )?; - let epoch_boundary_root = *self - .state - .read() - .get_block_root( - self.state.read().current_epoch_start_slot(&self.spec), - &self.spec, - ) - .ok_or_else(|| Error::BadRecentBlockRoots)?; + let target_root = *self.state.read().get_block_root( + self.state + .read() + .slot + .epoch(self.spec.slots_per_epoch) + .start_slot(self.spec.slots_per_epoch), + &self.spec, + )?; Ok(AttestationData { slot: self.state.read().slot, shard, beacon_block_root: self.head().beacon_block_root, - epoch_boundary_root, + target_root, crosslink_data_root: Hash256::zero(), - latest_crosslink: Crosslink { + previous_crosslink: Crosslink { epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch), crosslink_data_root: Hash256::zero(), }, - justified_epoch, - justified_block_root, + source_epoch, + source_root, }) } @@ -581,7 +571,7 @@ where dump.push(last_slot.clone()); loop { - let beacon_block_root = last_slot.beacon_block.parent_root; + let beacon_block_root = last_slot.beacon_block.previous_block_root; if beacon_block_root == self.spec.zero_hash { break; // Genesis has been reached. @@ -621,7 +611,7 @@ where /// /// Will accept blocks from prior slots, however it will reject any block from a future slot. pub fn process_block(&self, block: BeaconBlock) -> Result { - debug!("Processing block with slot {}...", block.slot()); + debug!("Processing block with slot {}...", block.slot); let block_root = block.canonical_root(); @@ -635,9 +625,9 @@ where // Load the blocks parent block from the database, returning invalid if that block is not // found. - let parent_block_root = block.parent_root; - let parent_block = match self.block_store.get_reader(&parent_block_root)? { - Some(parent_root) => parent_root, + let parent_block_root = block.previous_block_root; + let parent_block = match self.block_store.get_deserialized(&parent_block_root)? { + Some(previous_block_root) => previous_block_root, None => { return Ok(BlockProcessingOutcome::InvalidBlock( InvalidBlock::ParentUnknown, @@ -647,15 +637,11 @@ where // Load the parent blocks state from the database, returning an error if it is not found. // It is an error because if know the parent block we should also know the parent state. - let parent_state_root = parent_block.state_root(); + let parent_state_root = parent_block.state_root; let parent_state = self .state_store - .get_reader(&parent_state_root)? - .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))? - .into_beacon_state() - .ok_or_else(|| { - Error::DBInconsistent(format!("State SSZ invalid {}", parent_state_root)) - })?; + .get_deserialized(&parent_state_root)? + .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?; // TODO: check the block proposer signature BEFORE doing a state transition. This will // significantly lower exposure surface to DoS attacks. @@ -739,22 +725,22 @@ where attestations.len() ); - let parent_root = *state + let previous_block_root = *state .get_block_root(state.slot.saturating_sub(1_u64), &self.spec) - .ok_or_else(|| BlockProductionError::UnableToGetBlockRootFromState)?; + .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; let mut block = BeaconBlock { slot: state.slot, - parent_root, + previous_block_root, state_root: Hash256::zero(), // Updated after the state is calculated. - randao_reveal, - eth1_data: Eth1Data { - // TODO: replace with real data - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }, signature: self.spec.empty_signature.clone(), // To be completed by a validator. body: BeaconBlockBody { + randao_reveal, + eth1_data: Eth1Data { + // TODO: replace with real data + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }, proposer_slashings: self.get_proposer_slashings_for_block(), attester_slashings: self.get_attester_slashings_for_block(), attestations, From df3f8df7bdd5b00c4d81bf89f3ee1350478ea04d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 18:56:05 +1100 Subject: [PATCH 098/154] Ensure fork_choice tests pass under v0.5.0 --- eth2/fork_choice/src/longest_chain.rs | 2 +- eth2/fork_choice/tests/tests.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/eth2/fork_choice/src/longest_chain.rs b/eth2/fork_choice/src/longest_chain.rs index 333553c02..423edc567 100644 --- a/eth2/fork_choice/src/longest_chain.rs +++ b/eth2/fork_choice/src/longest_chain.rs @@ -34,7 +34,7 @@ impl ForkChoice for LongestChain { ) -> Result<(), ForkChoiceError> { // add the block hash to head_block_hashes removing the parent if it exists self.head_block_hashes - .retain(|hash| *hash != block.parent_root); + .retain(|hash| *hash != block.previous_block_root); self.head_block_hashes.push(*block_hash); Ok(()) } diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index cd5ff360f..80fbbbe20 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -90,6 +90,8 @@ fn test_yaml_vectors( let randao_reveal = Signature::empty_signature(); let signature = Signature::empty_signature(); let body = BeaconBlockBody { + eth1_data, + randao_reveal, proposer_slashings: vec![], attester_slashings: vec![], attestations: vec![], @@ -117,14 +119,14 @@ fn test_yaml_vectors( // default params for genesis let block_hash = id_to_hash(&block_id); let mut slot = spec.genesis_slot; - let parent_root = id_to_hash(&parent_id); + let previous_block_root = id_to_hash(&parent_id); // set the slot and parent based off the YAML. Start with genesis; // if not the genesis, update slot if parent_id != block_id { // find parent slot slot = *(block_slot - .get(&parent_root) + .get(&previous_block_root) .expect("Parent should have a slot number")) + 1; } else { @@ -137,10 +139,8 @@ fn test_yaml_vectors( // build the BeaconBlock let beacon_block = BeaconBlock { slot, - parent_root, + previous_block_root, state_root: state_root.clone(), - randao_reveal: randao_reveal.clone(), - eth1_data: eth1_data.clone(), signature: signature.clone(), body: body.clone(), }; From 446ff0c27e7f9a7a71d0135f3090e37ce8f745d7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 19:19:52 +1100 Subject: [PATCH 099/154] Ensure test_harness crate compiles under v0.5.0 --- .../test_harness/src/beacon_chain_harness.rs | 29 ++++++-- .../test_harness/src/test_case.rs | 67 +++++-------------- .../testing_beacon_block_builder.rs | 9 ++- .../src/test_utils/testing_deposit_builder.rs | 12 ++-- 4 files changed, 51 insertions(+), 66 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index d74464ad4..bc5c93b94 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -46,8 +46,8 @@ impl BeaconChainHarness { TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); let (genesis_state, keypairs) = state_builder.build(); - let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); - let genesis_block = BeaconBlock::genesis(state_root, &spec); + let mut genesis_block = BeaconBlock::empty(&spec); + genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); // Create the Beacon Chain let beacon_chain = Arc::new( @@ -127,8 +127,8 @@ impl BeaconChainHarness { .get_crosslink_committees_at_slot(present_slot, &self.spec) .unwrap() .iter() - .fold(vec![], |mut acc, (committee, _slot)| { - acc.append(&mut committee.clone()); + .fold(vec![], |mut acc, c| { + acc.append(&mut c.committee.clone()); acc }); let attesting_validators: HashSet = @@ -233,6 +233,27 @@ impl BeaconChainHarness { Some(Signature::new(message, domain, &validator.keypair.sk)) } + /// Returns the current `Fork` of the `beacon_chain`. + pub fn fork(&self) -> Fork { + self.beacon_chain.state.read().fork.clone() + } + + /// Returns the current `epoch` of the `beacon_chain`. + pub fn epoch(&self) -> Epoch { + self.beacon_chain + .state + .read() + .slot + .epoch(self.spec.slots_per_epoch) + } + + /// Returns the keypair for some validator index. + pub fn validator_keypair(&self, validator_index: usize) -> Option<&Keypair> { + self.validators + .get(validator_index) + .and_then(|v| Some(&v.keypair)) + } + /// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new /// `ValidatorHarness` instance for this validator. /// diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index 0a6206972..1361127a1 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -3,12 +3,11 @@ use crate::beacon_chain_harness::BeaconChainHarness; use beacon_chain::CheckPoint; -use bls::get_withdrawal_credentials; use log::{info, warn}; use ssz::SignedRoot; use types::*; -use types::test_utils::{TestingAttesterSlashingBuilder, TestingProposerSlashingBuilder}; +use types::test_utils::*; use yaml_rust::Yaml; mod config; @@ -222,27 +221,20 @@ impl TestCase { } /// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot. -fn build_transfer(harness: &BeaconChainHarness, from: u64, to: u64, amount: u64) -> Transfer { +fn build_transfer( + harness: &BeaconChainHarness, + sender: u64, + recipient: u64, + amount: u64, +) -> Transfer { let slot = harness.beacon_chain.state.read().slot + 1; - let mut transfer = Transfer { - from, - to, - amount, - fee: 0, - slot, - pubkey: harness.validators[from as usize].keypair.pk.clone(), - signature: Signature::empty_signature(), - }; + let mut builder = TestingTransferBuilder::new(sender, recipient, amount, slot); - let message = transfer.signed_root(); - let epoch = slot.epoch(harness.spec.slots_per_epoch); + let keypair = harness.validator_keypair(sender as usize).unwrap(); + builder.sign(keypair.clone(), &harness.fork(), &harness.spec); - transfer.signature = harness - .validator_sign(from as usize, &message[..], epoch, Domain::Transfer) - .expect("Unable to sign Transfer"); - - transfer + builder.build() } /// Builds a `Deposit` this is valid for the given `BeaconChainHarness`. @@ -255,41 +247,12 @@ fn build_deposit( index_offset: u64, ) -> (Deposit, Keypair) { let keypair = Keypair::random(); - let withdrawal_credentials = Hash256::from_slice( - &get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..], - ); - let proof_of_possession = DepositInput::create_proof_of_possession( - &keypair, - &withdrawal_credentials, - harness.spec.get_domain( - harness - .beacon_chain - .state - .read() - .current_epoch(&harness.spec), - Domain::Deposit, - &harness.beacon_chain.state.read().fork, - ), - ); - let index = harness.beacon_chain.state.read().deposit_index + index_offset; - let deposit = Deposit { - // Note: `branch` and `index` will need to be updated once the spec defines their - // validity. - branch: vec![], - index, - deposit_data: DepositData { - amount, - timestamp: 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials, - proof_of_possession, - }, - }, - }; + let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); + builder.set_index(harness.beacon_chain.state.read().deposit_index + index_offset); + builder.sign(&keypair, harness.epoch(), &harness.fork(), &harness.spec); - (deposit, keypair) + (builder.build(), keypair) } /// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`. diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index 6e48c8c17..c5cd22ed4 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -180,9 +180,14 @@ impl TestingBeaconBlockBuilder { ) { let keypair = Keypair::random(); - let mut builder = TestingDepositBuilder::new(amount); + let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); builder.set_index(index); - builder.sign(&keypair, state, spec); + builder.sign( + &keypair, + state.slot.epoch(spec.slots_per_epoch), + &state.fork, + spec, + ); self.block.body.deposits.push(builder.build()) } diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index 0d1c962f0..ee258e7fe 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -10,9 +10,7 @@ pub struct TestingDepositBuilder { impl TestingDepositBuilder { /// Instantiates a new builder. - pub fn new(amount: u64) -> Self { - let keypair = Keypair::random(); - + pub fn new(pubkey: PublicKey, amount: u64) -> Self { let deposit = Deposit { proof: vec![], index: 0, @@ -20,7 +18,7 @@ impl TestingDepositBuilder { amount, timestamp: 1, deposit_input: DepositInput { - pubkey: keypair.pk, + pubkey, withdrawal_credentials: Hash256::zero(), proof_of_possession: Signature::empty_signature(), }, @@ -40,13 +38,11 @@ impl TestingDepositBuilder { /// - `pubkey` to the signing pubkey. /// - `withdrawal_credentials` to the signing pubkey. /// - `proof_of_possesssion` - pub fn sign(&mut self, keypair: &Keypair, state: &BeaconState, spec: &ChainSpec) { + pub fn sign(&mut self, keypair: &Keypair, epoch: Epoch, fork: &Fork, spec: &ChainSpec) { let withdrawal_credentials = Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], ); - let epoch = state.current_epoch(spec); - self.deposit.deposit_data.deposit_input.pubkey = keypair.pk.clone(); self.deposit .deposit_data @@ -57,7 +53,7 @@ impl TestingDepositBuilder { .deposit .deposit_data .deposit_input - .create_proof_of_possession(&keypair.sk, epoch, &state.fork, spec); + .create_proof_of_possession(&keypair.sk, epoch, fork, spec); } /// Builds the deposit, consuming the builder. From 919a15de229fa97e90bb3082cc552dc9f2edb466 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 19:21:19 +1100 Subject: [PATCH 100/154] Ensure validator client compiles under v0.5.0 --- .../beacon_block_grpc_client.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/validator_client/src/block_producer_service/beacon_block_grpc_client.rs b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs index 6ce5c0fa0..04a02a221 100644 --- a/validator_client/src/block_producer_service/beacon_block_grpc_client.rs +++ b/validator_client/src/block_producer_service/beacon_block_grpc_client.rs @@ -50,15 +50,15 @@ impl BeaconNode for BeaconBlockGrpcClient { // TODO: this conversion is incomplete; fix it. Ok(Some(BeaconBlock { slot: Slot::new(block.get_slot()), - parent_root: Hash256::zero(), + previous_block_root: Hash256::zero(), state_root: Hash256::zero(), - randao_reveal, - eth1_data: Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }, signature, body: BeaconBlockBody { + randao_reveal, + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }, proposer_slashings: vec![], attester_slashings: vec![], attestations: vec![], @@ -83,7 +83,7 @@ impl BeaconNode for BeaconBlockGrpcClient { let mut grpc_block = GrpcBeaconBlock::new(); grpc_block.set_slot(block.slot.as_u64()); grpc_block.set_block_root(vec![0]); - grpc_block.set_randao_reveal(ssz_encode(&block.randao_reveal)); + grpc_block.set_randao_reveal(ssz_encode(&block.body.randao_reveal)); grpc_block.set_signature(ssz_encode(&block.signature)); req.set_block(grpc_block); From f71cab8ba2525142019c8adfc1386145f90cb2f9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 19:28:29 +1100 Subject: [PATCH 101/154] Ensure project tests compile on v0.5.0 --- beacon_node/src/main.rs | 47 +++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index eacbffa3e..780a3d338 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -20,7 +20,7 @@ use ssz::TreeHash; use std::sync::Arc; use types::{ beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, - Domain, Eth1Data, Fork, Hash256, Keypair, + Eth1Data, Fork, Hash256, Keypair, }; fn main() { @@ -103,35 +103,36 @@ fn main() { let initial_validator_deposits: Vec = keypairs .iter() - .map(|keypair| Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. - proof_of_possession: DepositInput::create_proof_of_possession( - &keypair, - &Hash256::zero(), - spec.get_domain( - // Get domain from genesis fork_version - spec.genesis_epoch, - Domain::Deposit, - &Fork::genesis(&spec), - ), - ), + .map(|keypair| { + let mut deposit_input = DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), + proof_of_possession: spec.empty_signature.clone(), + }; + deposit_input.proof_of_possession = deposit_input.create_proof_of_possession( + &keypair.sk, + spec.genesis_epoch, + &Fork::genesis(&spec), + &spec, + ); + + Deposit { + proof: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input, }, - }, + } }) .collect(); let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); state_builder.process_initial_deposits(&initial_validator_deposits, &spec); let genesis_state = state_builder.build(&spec).unwrap(); - let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); - let genesis_block = BeaconBlock::genesis(state_root, &spec); + let mut genesis_block = BeaconBlock::empty(&spec); + genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); // Genesis chain let _chain_result = BeaconChain::from_genesis( From 8677b9e9cc61fd6792fb2ae4f18ee7be92b4d9da Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 21:07:19 +1100 Subject: [PATCH 102/154] Fix bug with epoch caches, add tests --- .../src/per_block_processing.rs | 6 +- .../verify_slashable_attestation.rs | 2 +- .../per_block_processing/verify_transfer.rs | 2 +- eth2/types/src/beacon_state.rs | 4 +- eth2/types/src/beacon_state/epoch_cache.rs | 2 +- eth2/types/src/beacon_state/tests.rs | 54 ++++++++++++++++- eth2/types/src/relative_epoch.rs | 58 +++++++++++++++++++ 7 files changed, 120 insertions(+), 8 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index c6b22fa75..78cf927f5 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -32,7 +32,7 @@ const VERIFY_DEPOSIT_MERKLE_PROOFS: bool = false; /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn per_block_processing( state: &mut BeaconState, block: &BeaconBlock, @@ -47,7 +47,7 @@ pub fn per_block_processing( /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn per_block_processing_without_verifying_block_signature( state: &mut BeaconState, block: &BeaconBlock, @@ -62,7 +62,7 @@ pub fn per_block_processing_without_verifying_block_signature( /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.4.0 +/// Spec v0.5.0 fn per_block_processing_signature_optional( mut state: &mut BeaconState, block: &BeaconBlock, diff --git a/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs index f0d371043..aa9a32196 100644 --- a/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs @@ -10,7 +10,7 @@ use types::*; /// /// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_slashable_attestation( state: &BeaconState, slashable_attestation: &SlashableAttestation, diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index 546760fd0..f873cd850 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -94,7 +94,7 @@ pub fn verify_transfer( /// /// Does not check that the transfer is valid, however checks for overflow in all actions. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn execute_transfer( state: &mut BeaconState, transfer: &Transfer, diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index a90f09759..8999d8be8 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -279,7 +279,9 @@ impl BeaconState { fn cache(&self, relative_epoch: RelativeEpoch, spec: &ChainSpec) -> Result<&EpochCache, Error> { let cache = &self.caches[self.cache_index(relative_epoch)]; - if cache.initialized_epoch == Some(self.slot.epoch(spec.slots_per_epoch)) { + let epoch = relative_epoch.into_epoch(self.slot.epoch(spec.slots_per_epoch)); + + if cache.initialized_epoch == Some(epoch) { Ok(cache) } else { Err(Error::EpochCacheUninitialized(relative_epoch)) diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 0759a7617..4436972f1 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -159,7 +159,7 @@ impl EpochCrosslinkCommittees { let epoch_start_slot = self.epoch.start_slot(spec.slots_per_epoch); let epoch_end_slot = self.epoch.end_slot(spec.slots_per_epoch); - if (epoch_start_slot < slot) && (slot <= epoch_end_slot) { + if (epoch_start_slot <= slot) && (slot <= epoch_end_slot) { let index = slot - epoch_start_slot; self.crosslink_committees.get(index.as_usize()) } else { diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 6c10ebe86..dc16a013b 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -1,5 +1,57 @@ #![cfg(test)] - use super::*; +use crate::test_utils::*; ssz_tests!(BeaconState); + +/// Test that +/// +/// 1. Using the cache before it's built fails. +/// 2. Using the cache after it's build passes. +/// 3. Using the cache after it's dropped fails. +fn test_cache_initialization<'a>( + state: &'a mut BeaconState, + relative_epoch: RelativeEpoch, + spec: &ChainSpec, +) { + let slot = relative_epoch + .into_epoch(state.slot.epoch(spec.slots_per_epoch)) + .start_slot(spec.slots_per_epoch); + + // Assuming the cache isn't already built, assert that a call to a cache-using function fails. + assert_eq!( + state.get_beacon_proposer_index(slot, relative_epoch, spec), + Err(BeaconStateError::EpochCacheUninitialized(relative_epoch)) + ); + + // Build the cache. + state.build_epoch_cache(relative_epoch, spec).unwrap(); + + // Assert a call to a cache-using function passes. + let _ = state + .get_beacon_proposer_index(slot, relative_epoch, spec) + .unwrap(); + + // Drop the cache. + state.drop_cache(relative_epoch); + + // Assert a call to a cache-using function fail. + assert_eq!( + state.get_beacon_proposer_index(slot, relative_epoch, spec), + Err(BeaconStateError::EpochCacheUninitialized(relative_epoch)) + ); +} + +#[test] +fn cache_initialization() { + let spec = ChainSpec::few_validators(); + let (mut state, _keypairs) = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec).build(); + + state.slot = (spec.genesis_epoch + 1).start_slot(spec.slots_per_epoch); + + test_cache_initialization(&mut state, RelativeEpoch::Previous, &spec); + test_cache_initialization(&mut state, RelativeEpoch::Current, &spec); + test_cache_initialization(&mut state, RelativeEpoch::NextWithRegistryChange, &spec); + test_cache_initialization(&mut state, RelativeEpoch::NextWithoutRegistryChange, &spec); +} diff --git a/eth2/types/src/relative_epoch.rs b/eth2/types/src/relative_epoch.rs index 943936605..6c135b1a6 100644 --- a/eth2/types/src/relative_epoch.rs +++ b/eth2/types/src/relative_epoch.rs @@ -74,3 +74,61 @@ impl RelativeEpoch { ) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_into_epoch() { + let base = Epoch::new(10); + + assert_eq!(RelativeEpoch::Current.into_epoch(base), base); + assert_eq!(RelativeEpoch::Previous.into_epoch(base), base - 1); + assert_eq!( + RelativeEpoch::NextWithRegistryChange.into_epoch(base), + base + 1 + ); + assert_eq!( + RelativeEpoch::NextWithoutRegistryChange.into_epoch(base), + base + 1 + ); + } + + #[test] + fn from_epoch() { + let base = Epoch::new(10); + + assert_eq!( + RelativeEpoch::from_epoch(base, base - 1), + Ok(RelativeEpoch::Previous) + ); + assert_eq!( + RelativeEpoch::from_epoch(base, base), + Ok(RelativeEpoch::Current) + ); + assert_eq!( + RelativeEpoch::from_epoch(base, base + 1), + Err(RelativeEpochError::AmbiguiousNextEpoch) + ); + } + + #[test] + fn from_slot() { + let spec = ChainSpec::foundation(); + let base = Epoch::new(10).start_slot(spec.slots_per_epoch); + + assert_eq!( + RelativeEpoch::from_slot(base, base - 1, &spec), + Ok(RelativeEpoch::Previous) + ); + assert_eq!( + RelativeEpoch::from_slot(base, base, &spec), + Ok(RelativeEpoch::Current) + ); + assert_eq!( + RelativeEpoch::from_slot(base, base + spec.slots_per_epoch, &spec), + Err(RelativeEpochError::AmbiguiousNextEpoch) + ); + } +} From 9803ab30f291405ce19310d27679d4736648c1fe Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 17 Mar 2019 21:49:56 +1100 Subject: [PATCH 103/154] Propagate RPC through network service. - Basic network message handler threading - Correct references --- beacon_node/client/src/lib.rs | 2 +- beacon_node/libp2p/src/behaviour.rs | 29 ++-------------- beacon_node/libp2p/src/lib.rs | 4 ++- beacon_node/libp2p/src/rpc/methods.rs | 6 ++-- beacon_node/libp2p/src/rpc/mod.rs | 2 +- beacon_node/libp2p/src/rpc/protocol.rs | 6 ++-- beacon_node/libp2p/src/service.rs | 8 +++-- beacon_node/network/src/message_handler.rs | 39 ++++++++++++++++++---- beacon_node/network/src/messages.rs | 15 ++------- beacon_node/network/src/service.rs | 15 +++++++-- beacon_node/src/run.rs | 2 +- 11 files changed, 68 insertions(+), 60 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 6600c9e39..7312cc6c8 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -34,7 +34,7 @@ impl Client { pub fn new( config: ClientConfig, log: slog::Logger, - executor: TaskExecutor, + executor: &TaskExecutor, ) -> error::Result { let (exit_signal, exit) = exit_future::signal(); diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs index 2c0371095..96355cf3f 100644 --- a/beacon_node/libp2p/src/behaviour.rs +++ b/beacon_node/libp2p/src/behaviour.rs @@ -1,4 +1,4 @@ -use crate::rpc::{RPCMethod, RPCRequest, RPCResponse, Rpc, RpcEvent}; +use crate::rpc::{Rpc, RpcEvent}; use futures::prelude::*; use libp2p::{ core::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, @@ -42,22 +42,7 @@ impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: RpcEvent) { - match event { - RpcEvent::Request { - id, - method_id, - body, - } => self.events.push(BehaviourEvent::RPCRequest { - id, - method: RPCMethod::from(method_id), - body, - }), - RpcEvent::Response { - id, - method_id, - result, - } => self.events.push(BehaviourEvent::RPCResponse { id, result }), - } + self.events.push(BehaviourEvent::RPC(event)); } } @@ -95,15 +80,7 @@ impl Behaviour { /// The types of events than can be obtained from polling the behaviour. pub enum BehaviourEvent { - RPCRequest { - id: u64, - method: RPCMethod, - body: RPCRequest, - }, - RPCResponse { - id: u64, - result: RPCResponse, - }, + RPC(RpcEvent), // TODO: This is a stub at the moment Message(String), } diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index 718b7fc22..69f6eb650 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -5,7 +5,7 @@ pub mod behaviour; pub mod error; mod network_config; -mod rpc; +pub mod rpc; mod service; pub use libp2p::{ @@ -13,6 +13,8 @@ pub use libp2p::{ PeerId, }; pub use network_config::NetworkConfig; +pub use rpc::HelloMessage; +pub use rpc::RpcEvent; pub use service::Libp2pEvent; pub use service::Service; pub use types::multiaddr; diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index b6563ba64..ea9932806 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -19,17 +19,17 @@ impl From for RPCMethod { #[derive(Debug, Clone)] pub enum RPCRequest { - Hello(HelloBody), + Hello(HelloMessage), } #[derive(Debug, Clone)] pub enum RPCResponse { - Hello(HelloBody), + Hello(HelloMessage), } // request/response structs for RPC methods #[derive(Encode, Decode, Clone, Debug)] -pub struct HelloBody { +pub struct HelloMessage { pub network_id: u8, pub latest_finalized_root: Hash256, pub latest_finalized_epoch: Epoch, diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index 4cebb1e39..3420217ce 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -11,7 +11,7 @@ use libp2p::core::swarm::{ ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, }; use libp2p::{Multiaddr, PeerId}; -pub use methods::{RPCMethod, RPCRequest, RPCResponse}; +pub use methods::{HelloMessage, RPCMethod, RPCRequest, RPCResponse}; pub use protocol::{RPCProtocol, RpcEvent}; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/libp2p/src/rpc/protocol.rs index 4b462bb77..74b8322eb 100644 --- a/beacon_node/libp2p/src/rpc/protocol.rs +++ b/beacon_node/libp2p/src/rpc/protocol.rs @@ -1,4 +1,4 @@ -use super::methods::{HelloBody, RPCMethod, RPCRequest, RPCResponse}; +use super::methods::{HelloMessage, RPCMethod, RPCRequest, RPCResponse}; use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use ssz::{ssz_encode, Decodable, Encodable, SszStream}; use std::io; @@ -78,7 +78,7 @@ fn decode(packet: Vec) -> Result { if request { let body = match RPCMethod::from(method_id) { RPCMethod::Hello => { - let (hello_body, _index) = HelloBody::ssz_decode(&packet, index)?; + let (hello_body, _index) = HelloMessage::ssz_decode(&packet, index)?; RPCRequest::Hello(hello_body) } RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), @@ -94,7 +94,7 @@ fn decode(packet: Vec) -> Result { else { let result = match RPCMethod::from(method_id) { RPCMethod::Hello => { - let (body, _index) = HelloBody::ssz_decode(&packet, index)?; + let (body, _index) = HelloMessage::ssz_decode(&packet, index)?; RPCResponse::Hello(body) } RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 00c11101c..a672e153b 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -1,6 +1,7 @@ use crate::behaviour::{Behaviour, BehaviourEvent}; use crate::error; use crate::multiaddr::Protocol; +use crate::rpc::RpcEvent; use crate::NetworkConfig; use futures::prelude::*; use futures::Stream; @@ -104,8 +105,9 @@ impl Stream for Service { debug!(self.log, "Message received: {}", m); return Ok(Async::Ready(Some(Libp2pEvent::Message(m)))); } - // TODO: Fill with all behaviour events - _ => break, + Ok(Async::Ready(Some(BehaviourEvent::RPC(event)))) => { + return Ok(Async::Ready(Some(Libp2pEvent::RPC(event)))); + } Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"), Ok(Async::NotReady) => break, _ => break, @@ -152,5 +154,7 @@ fn build_transport( /// Events that can be obtained from polling the Libp2p Service. pub enum Libp2pEvent { + // We have received an RPC event on the swarm + RPC(RpcEvent), Message(String), } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 87935e899..fe9780ad5 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,7 +1,10 @@ use crate::error; use crate::messages::NodeMessage; -use crossbeam_channel::{unbounded as channel, Sender}; -use libp2p::PeerId; +use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; +use futures::future; +use futures::prelude::*; +use libp2p::rpc; +use libp2p::{PeerId, RpcEvent}; use slog::debug; use sync::SimpleSync; use types::Hash256; @@ -11,9 +14,11 @@ pub struct MessageHandler { sync: SimpleSync, //TODO: Implement beacon chain //chain: BeaconChain + log: slog::Logger, } /// Types of messages the handler can receive. +#[derive(Debug, Clone)] pub enum HandlerMessage { /// Peer has connected. PeerConnected(PeerId), @@ -21,11 +26,16 @@ pub enum HandlerMessage { PeerDisconnected(PeerId), /// A Node message has been received. Message(PeerId, NodeMessage), + /// An RPC response/request has been received. + RPC(RpcEvent), } impl MessageHandler { /// Initializes and runs the MessageHandler. - pub fn new(log: slog::Logger) -> error::Result> { + pub fn new( + executor: &tokio::runtime::TaskExecutor, + log: slog::Logger, + ) -> error::Result> { debug!(log, "Service starting"); let (handler_send, handler_recv) = channel(); @@ -33,12 +43,29 @@ impl MessageHandler { // Initialise sync and begin processing in thread //TODO: Load genesis from BeaconChain let temp_genesis = Hash256::zero(); + + // generate the Message handler let sync = SimpleSync::new(temp_genesis); + //TODO: Initialise beacon chain + let mut handler = MessageHandler { + sync, + log: log.clone(), + }; - let handler = MessageHandler { sync }; - - // spawn handler thread + // spawn handler task + // TODO: Handle manual termination of thread + executor.spawn(future::poll_fn(move || -> Result<_, _> { + loop { + handler.handle_message(handler_recv.recv().map_err(|_| { + debug!(log, "Handler channel closed. Handler terminating"); + })?); + } + })); Ok(handler_send) } + + fn handle_message(&mut self, message: HandlerMessage) { + debug!(self.log, "Message received {:?}", message); + } } diff --git a/beacon_node/network/src/messages.rs b/beacon_node/network/src/messages.rs index d3a83fd5c..064424a87 100644 --- a/beacon_node/network/src/messages.rs +++ b/beacon_node/network/src/messages.rs @@ -1,27 +1,16 @@ use libp2p::PeerId; +use libp2p::{HelloMessage, RpcEvent}; use types::{Hash256, Slot}; /// Messages between nodes across the network. #[derive(Debug, Clone)] pub enum NodeMessage { - Status(Status), + RPC(RpcEvent), BlockRequest, // TODO: only for testing - remove Message(String), } -#[derive(Debug, Clone)] -pub struct Status { - /// Current node version. - version: u8, - /// Genesis Hash. - genesis_hash: Hash256, - /// Best known slot number. - best_slot: Slot, - /// Best known slot hash. - best_slot_hash: Hash256, -} - /// Types of messages that the network service can receive. #[derive(Debug, Clone)] pub enum NetworkMessage { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e75b7e49a..bd01027e9 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -28,12 +28,12 @@ pub struct Service { impl Service { pub fn new( config: NetworkConfig, - executor: TaskExecutor, + executor: &TaskExecutor, log: slog::Logger, ) -> error::Result<(Arc, Sender)> { // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = MessageHandler::new(message_handler_log)?; + let message_handler_send = MessageHandler::new(executor, message_handler_log)?; // launch libp2p service let libp2p_log = log.new(o!("Service" => "Libp2p")); @@ -61,7 +61,7 @@ impl Service { fn spawn_service( libp2p_service: LibP2PService, message_handler_send: crossbeam_channel::Sender, - executor: TaskExecutor, + executor: &TaskExecutor, log: slog::Logger, ) -> error::Result<( crossbeam_channel::Sender, @@ -99,6 +99,15 @@ fn network_service( // poll the swarm loop { match libp2p_service.poll() { + Ok(Async::Ready(Some(Libp2pEvent::RPC(rpc_event)))) => { + debug!( + libp2p_service.log, + "RPC Event: Rpc message received: {:?}", rpc_event + ); + message_handler_send + .send(HandlerMessage::RPC(rpc_event)) + .map_err(|_| "failed to send rpc to handler"); + } Ok(Async::Ready(Some(Libp2pEvent::Message(m)))) => debug!( libp2p_service.log, "Network Service: Message received: {}", m diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 12d761d84..810f2aeaf 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -32,7 +32,7 @@ pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result let executor = runtime.executor(); // currently testing - using TestingNode type - let client: Client = Client::new(config, log.clone(), executor.clone())?; + let client: Client = Client::new(config, log.clone(), &executor)?; notifier::run(&client, executor, exit); runtime.block_on(ctrlc); From 816c2c651bc953472ecab72d2b44a5b0d1c8d417 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 23:11:07 +1100 Subject: [PATCH 104/154] Modify genesis processing process. - Removed BeaconStateBuilder - Added genesis code to `state_processing`. --- beacon_node/Cargo.toml | 1 + beacon_node/src/main.rs | 63 ++------ .../state_processing/src/get_genesis_state.rs | 59 ++++++++ eth2/state_processing/src/lib.rs | 2 + .../src/per_epoch_processing.rs | 6 +- eth2/types/src/beacon_state.rs | 112 ++------------ eth2/types/src/beacon_state/builder.rs | 101 ------------- eth2/types/src/beacon_state/epoch_cache.rs | 6 +- .../src/beacon_state/epoch_cache/tests.rs | 142 ++++++++++++++++++ .../testing_beacon_state_builder.rs | 16 +- 10 files changed, 234 insertions(+), 274 deletions(-) create mode 100644 eth2/state_processing/src/get_genesis_state.rs delete mode 100644 eth2/types/src/beacon_state/builder.rs create mode 100644 eth2/types/src/beacon_state/epoch_cache/tests.rs diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index a4804e07e..b76bc3e82 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -19,6 +19,7 @@ slog = "^2.2.3" slot_clock = { path = "../eth2/utils/slot_clock" } slog-term = "^2.4.0" slog-async = "^2.3.0" +state_processing = { path = "../eth2/state_processing" } types = { path = "../eth2/types" } ssz = { path = "../eth2/utils/ssz" } tokio = "0.1" diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 780a3d338..2436d4f7c 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -18,10 +18,8 @@ use slog::{error, info, o, Drain}; use slot_clock::SystemTimeSlotClock; use ssz::TreeHash; use std::sync::Arc; -use types::{ - beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, - Eth1Data, Fork, Hash256, Keypair, -}; +use types::test_utils::TestingBeaconStateBuilder; +use types::*; fn main() { let decorator = slog_term::TermDecorator::new().build(); @@ -79,61 +77,18 @@ fn main() { let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone())); + let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); + let (genesis_state, _keypairs) = state_builder.build(); + + let mut genesis_block = BeaconBlock::empty(&spec); + genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + // Slot clock - let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). - let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.seconds_per_slot) + let slot_clock = SystemTimeSlotClock::new(genesis_state.genesis_time, spec.seconds_per_slot) .expect("Unable to load SystemTimeSlotClock"); // Choose the fork choice let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - /* - * Generate some random data to start a chain with. - * - * This is will need to be replace for production usage. - */ - let latest_eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }; - let keypairs: Vec = (0..10) - .collect::>() - .iter() - .map(|_| Keypair::random()) - .collect(); - - let initial_validator_deposits: Vec = keypairs - .iter() - .map(|keypair| { - let mut deposit_input = DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), - proof_of_possession: spec.empty_signature.clone(), - }; - deposit_input.proof_of_possession = deposit_input.create_proof_of_possession( - &keypair.sk, - spec.genesis_epoch, - &Fork::genesis(&spec), - &spec, - ); - - Deposit { - proof: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input, - }, - } - }) - .collect(); - - let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); - state_builder.process_initial_deposits(&initial_validator_deposits, &spec); - let genesis_state = state_builder.build(&spec).unwrap(); - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); - // Genesis chain let _chain_result = BeaconChain::from_genesis( state_store.clone(), diff --git a/eth2/state_processing/src/get_genesis_state.rs b/eth2/state_processing/src/get_genesis_state.rs new file mode 100644 index 000000000..3c6612349 --- /dev/null +++ b/eth2/state_processing/src/get_genesis_state.rs @@ -0,0 +1,59 @@ +use super::per_block_processing::{errors::BlockProcessingError, process_deposits}; +use ssz::TreeHash; +use types::*; + +pub enum GenesisError { + BlockProcessingError(BlockProcessingError), + BeaconStateError(BeaconStateError), +} + +/// Returns the genesis `BeaconState` +/// +/// Spec v0.5.0 +pub fn get_genesis_state( + genesis_validator_deposits: &[Deposit], + genesis_time: u64, + genesis_eth1_data: Eth1Data, + spec: &ChainSpec, +) -> Result<(), BlockProcessingError> { + // Get the genesis `BeaconState` + let mut state = BeaconState::genesis(genesis_time, genesis_eth1_data, spec); + + // Process genesis deposits. + process_deposits(&mut state, genesis_validator_deposits, spec)?; + + // Process genesis activations. + for i in 0..state.validator_registry.len() { + if state.get_effective_balance(i, spec)? >= spec.max_deposit_amount { + state.validator_registry[i].activation_epoch = spec.genesis_epoch; + } + } + + // Ensure the current epoch cache is built. + state.build_epoch_cache(RelativeEpoch::Current, spec)?; + + // Set all the active index roots to be the genesis active index root. + let active_validator_indices = state + .get_active_validator_indices(spec.genesis_epoch, spec)? + .to_vec(); + let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.hash_tree_root()); + state.latest_active_index_roots = + vec![genesis_active_index_root; spec.latest_active_index_roots_length as usize]; + + // Generate the current shuffling seed. + state.current_shuffling_seed = state.generate_seed(spec.genesis_epoch, spec)?; + + Ok(()) +} + +impl From for GenesisError { + fn from(e: BlockProcessingError) -> GenesisError { + GenesisError::BlockProcessingError(e) + } +} + +impl From for GenesisError { + fn from(e: BeaconStateError) -> GenesisError { + GenesisError::BeaconStateError(e) + } +} diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index 2b30844cb..78dc7270d 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -1,10 +1,12 @@ #[macro_use] mod macros; +pub mod get_genesis_state; pub mod per_block_processing; pub mod per_epoch_processing; pub mod per_slot_processing; +pub use get_genesis_state::get_genesis_state; pub use per_block_processing::{ errors::{BlockInvalid, BlockProcessingError}, per_block_processing, per_block_processing_without_verifying_block_signature, diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 2f1cc3551..d1bb4269a 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -45,7 +45,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result process_rewards_and_penalities(state, &mut statuses, &winning_root_for_shards, spec)?; // Ejections - state.process_ejections(spec); + state.process_ejections(spec)?; // Validator Registry process_validator_registry(state, spec)?; @@ -53,7 +53,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result // Final updates update_active_tree_index_roots(state, spec)?; update_latest_slashed_balances(state, spec); - clean_attestations(state, spec); + clean_attestations(state); // Rotate the epoch caches to suit the epoch transition. state.advance_caches(); @@ -472,6 +472,6 @@ pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) /// Removes all pending attestations from the previous epoch. /// /// Spec v0.4.0 -pub fn clean_attestations(state: &mut BeaconState, spec: &ChainSpec) { +pub fn clean_attestations(state: &mut BeaconState) { state.previous_epoch_attestations = vec![]; } diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 8999d8be8..d7dbda782 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -10,9 +10,6 @@ use ssz_derive::{Decode, Encode, TreeHash}; use std::collections::HashMap; use test_random_derive::TestRandom; -pub use builder::BeaconStateBuilder; - -mod builder; mod epoch_cache; pub mod helpers; mod pubkey_cache; @@ -32,7 +29,8 @@ pub enum Error { InvalidBitfield, ValidatorIsWithdrawable, InsufficientRandaoMixes, - InsufficientValidators, + NoValidators, + UnableToDetermineProducer, InsufficientBlockRoots, InsufficientIndexRoots, InsufficientAttestations, @@ -534,7 +532,7 @@ impl BeaconState { /// /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn get_beacon_proposer_index( &self, slot: Slot, @@ -547,14 +545,16 @@ impl BeaconState { .get_crosslink_committees_at_slot(slot, spec) .ok_or_else(|| Error::SlotOutOfBounds)?; + let epoch = slot.epoch(spec.slots_per_epoch); + committees .first() - .ok_or(Error::InsufficientValidators) + .ok_or(Error::UnableToDetermineProducer) .and_then(|first| { - let index = slot + let index = epoch .as_usize() .checked_rem(first.committee.len()) - .ok_or(Error::InsufficientValidators)?; + .ok_or(Error::UnableToDetermineProducer)?; Ok(first.committee[index]) }) } @@ -581,103 +581,9 @@ impl BeaconState { epoch + 1 + spec.activation_exit_delay } - /// Process multiple deposits in sequence. - /// - /// Builds a hashmap of validator pubkeys to validator index and passes it to each successive - /// call to `process_deposit(..)`. This requires much less computation than successive calls to - /// `process_deposits(..)` without the hashmap. - /// - /// Spec v0.4.0 - pub fn process_deposits( - &mut self, - deposits: Vec<&DepositData>, - spec: &ChainSpec, - ) -> Vec { - let mut added_indices = vec![]; - let mut pubkey_map: HashMap = HashMap::new(); - - for (i, validator) in self.validator_registry.iter().enumerate() { - pubkey_map.insert(validator.pubkey.clone(), i); - } - - for deposit_data in deposits { - let result = self.process_deposit( - deposit_data.deposit_input.clone(), - deposit_data.amount, - Some(&pubkey_map), - spec, - ); - if let Ok(index) = result { - added_indices.push(index); - } - } - added_indices - } - - /// Process a validator deposit, returning the validator index if the deposit is valid. - /// - /// Optionally accepts a hashmap of all validator pubkeys to their validator index. Without - /// this hashmap, each call to `process_deposits` requires an iteration though - /// `self.validator_registry`. This becomes highly inefficient at scale. - /// - /// TODO: this function also exists in a more optimal form in the `state_processing` crate as - /// `process_deposits`; unify these two functions. - /// - /// Spec v0.4.0 - pub fn process_deposit( - &mut self, - deposit_input: DepositInput, - amount: u64, - pubkey_map: Option<&HashMap>, - spec: &ChainSpec, - ) -> Result { - let proof_is_valid = deposit_input.proof_of_possession.verify( - &deposit_input.signed_root(), - spec.get_domain(self.current_epoch(&spec), Domain::Deposit, &self.fork), - &deposit_input.pubkey, - ); - - if !proof_is_valid { - return Err(()); - } - - let pubkey = deposit_input.pubkey.clone(); - let withdrawal_credentials = deposit_input.withdrawal_credentials.clone(); - - let validator_index = if let Some(pubkey_map) = pubkey_map { - pubkey_map.get(&pubkey).and_then(|i| Some(*i)) - } else { - self.validator_registry - .iter() - .position(|v| v.pubkey == pubkey) - }; - - if let Some(index) = validator_index { - if self.validator_registry[index].withdrawal_credentials == withdrawal_credentials { - safe_add_assign!(self.validator_balances[index], amount); - Ok(index) - } else { - Err(()) - } - } else { - let validator = Validator { - pubkey, - withdrawal_credentials, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - initiated_exit: false, - slashed: false, - }; - self.validator_registry.push(validator); - self.validator_balances.push(amount); - Ok(self.validator_registry.len() - 1) - } - } - /// Activate the validator of the given ``index``. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn activate_validator( &mut self, validator_index: usize, diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs deleted file mode 100644 index 780ec9b8b..000000000 --- a/eth2/types/src/beacon_state/builder.rs +++ /dev/null @@ -1,101 +0,0 @@ -use super::BeaconStateError; -use crate::validator_registry::get_active_validator_indices; -use crate::*; -use rayon::prelude::*; -use ssz::TreeHash; - -/// Builds a `BeaconState` for use in production. -/// -/// This struct should _not_ be modified for use in testing scenarios. Use `TestingBeaconStateBuilder` for that purpose. -/// -/// This struct should remain safe and sensible for production usage. -pub struct BeaconStateBuilder { - pub state: BeaconState, -} - -impl BeaconStateBuilder { - /// Create a new builder with the given number of validators. - /// - /// Spec v0.4.0 - pub fn new(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> Self { - Self { - state: BeaconState::genesis(genesis_time, latest_eth1_data, spec), - } - } - - /// Process deposit objects. - /// - /// Spec v0.4.0 - pub fn process_initial_deposits( - &mut self, - initial_validator_deposits: &[Deposit], - spec: &ChainSpec, - ) { - let deposit_data = initial_validator_deposits - .par_iter() - .map(|deposit| &deposit.deposit_data) - .collect(); - - self.state.process_deposits(deposit_data, spec); - - self.activate_genesis_validators(spec); - - self.state.deposit_index = initial_validator_deposits.len() as u64; - } - - fn activate_genesis_validators(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { - for validator_index in 0..self.state.validator_registry.len() { - if self.state.get_effective_balance(validator_index, spec)? >= spec.max_deposit_amount { - self.state.activate_validator(validator_index, true, spec); - } - } - - Ok(()) - } - - /// Instantiate the validator registry from a YAML file. - /// - /// This skips a lot of signing and verification, useful if signing and verification has been - /// completed previously. - /// - /// Spec v0.4.0 - pub fn import_existing_validators( - &mut self, - validators: Vec, - initial_balances: Vec, - deposit_index: u64, - spec: &ChainSpec, - ) { - self.state.validator_registry = validators; - - assert_eq!( - self.state.validator_registry.len(), - initial_balances.len(), - "Not enough balances for validators" - ); - - self.state.validator_balances = initial_balances; - - self.activate_genesis_validators(spec); - - self.state.deposit_index = deposit_index; - } - - /// Updates the final state variables and returns a fully built genesis state. - /// - /// Spec v0.4.0 - pub fn build(mut self, spec: &ChainSpec) -> Result { - let genesis_active_index_root = - get_active_validator_indices(&self.state.validator_registry, spec.genesis_epoch) - .hash_tree_root(); - - self.state.latest_active_index_roots = vec![ - Hash256::from_slice(&genesis_active_index_root); - spec.latest_active_index_roots_length - ]; - - self.state.current_shuffling_seed = self.state.generate_seed(spec.genesis_epoch, spec)?; - - Ok(self.state) - } -} diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 4436972f1..0dbdf4054 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -4,6 +4,8 @@ use honey_badger_split::SplitExt; use serde_derive::{Deserialize, Serialize}; use swap_or_not_shuffle::shuffle_list; +mod tests; + #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] pub struct EpochCache { /// `Some(epoch)` if the cache is initialized, where `epoch` is the cache it holds. @@ -247,7 +249,7 @@ impl EpochCrosslinkCommitteesBuilder { pub fn build(self, spec: &ChainSpec) -> Result { if self.active_validator_indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(Error::NoValidators); } let shuffled_active_validator_indices = shuffle_list( @@ -277,7 +279,7 @@ impl EpochCrosslinkCommitteesBuilder { let crosslink_committee = CrosslinkCommittee { slot, shard, - committee: committees.remove(j), + committee: committees[j].drain(..).collect(), }; epoch_crosslink_committees.crosslink_committees[i].push(crosslink_committee); diff --git a/eth2/types/src/beacon_state/epoch_cache/tests.rs b/eth2/types/src/beacon_state/epoch_cache/tests.rs new file mode 100644 index 000000000..10df635f2 --- /dev/null +++ b/eth2/types/src/beacon_state/epoch_cache/tests.rs @@ -0,0 +1,142 @@ +#![cfg(test)] + +use super::*; +use crate::test_utils::*; +use swap_or_not_shuffle::shuffle_list; + +fn do_sane_cache_test( + state: BeaconState, + epoch: Epoch, + validator_count: usize, + expected_seed: Hash256, + expected_shuffling_start: u64, + spec: &ChainSpec, +) { + let active_indices: Vec = (0..validator_count).collect(); + assert_eq!( + &active_indices[..], + state.get_active_validator_indices(epoch, &spec).unwrap(), + "Validator indices mismatch" + ); + + let shuffling = shuffle_list( + active_indices, + spec.shuffle_round_count, + &expected_seed[..], + true, + ) + .unwrap(); + + let committees_per_epoch = spec.get_epoch_committee_count(shuffling.len()); + let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; + + let mut expected_indices_iter = shuffling.iter(); + let mut shard_counter = expected_shuffling_start; + + for (i, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() { + let crosslink_committees_at_slot = + state.get_crosslink_committees_at_slot(slot, &spec).unwrap(); + + assert_eq!( + crosslink_committees_at_slot.len(), + committees_per_slot as usize, + "Bad committees per slot ({})", + i + ); + + for c in crosslink_committees_at_slot { + assert_eq!(c.shard, shard_counter, "Bad shard"); + shard_counter += 1; + shard_counter %= spec.shard_count; + + for &i in &c.committee { + assert_eq!( + i, + *expected_indices_iter.next().unwrap(), + "Non-sequential validators." + ); + } + } + } +} + +fn setup_sane_cache_test(validator_count: usize, spec: &ChainSpec) -> BeaconState { + let mut builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec); + + let epoch = spec.genesis_epoch + 4; + let slot = epoch.start_slot(spec.slots_per_epoch); + builder.teleport_to_slot(slot, spec); + + let (mut state, _keypairs) = builder.build(); + + state.current_shuffling_start_shard = 0; + state.current_shuffling_seed = Hash256::from_slice(&[1; 32]); + + state.previous_shuffling_start_shard = spec.shard_count - 1; + state.previous_shuffling_seed = Hash256::from_slice(&[2; 32]); + + state + .build_epoch_cache(RelativeEpoch::Previous, spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Current, spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::NextWithRegistryChange, spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, spec) + .unwrap(); + + state +} + +#[test] +fn builds_sane_current_epoch_cache() { + let mut spec = ChainSpec::few_validators(); + spec.shard_count = 4; + let validator_count = (spec.shard_count * spec.target_committee_size) + 1; + let state = setup_sane_cache_test(validator_count as usize, &spec); + do_sane_cache_test( + state.clone(), + state.current_epoch(&spec), + validator_count as usize, + state.current_shuffling_seed, + state.current_shuffling_start_shard, + &spec, + ); +} + +#[test] +fn builds_sane_previous_epoch_cache() { + let mut spec = ChainSpec::few_validators(); + spec.shard_count = 2; + let validator_count = (spec.shard_count * spec.target_committee_size) + 1; + let state = setup_sane_cache_test(validator_count as usize, &spec); + do_sane_cache_test( + state.clone(), + state.previous_epoch(&spec), + validator_count as usize, + state.previous_shuffling_seed, + state.previous_shuffling_start_shard, + &spec, + ); +} + +#[test] +fn builds_sane_next_without_update_epoch_cache() { + let mut spec = ChainSpec::few_validators(); + spec.shard_count = 2; + let validator_count = (spec.shard_count * spec.target_committee_size) + 1; + let mut state = setup_sane_cache_test(validator_count as usize, &spec); + state.validator_registry_update_epoch = state.slot.epoch(spec.slots_per_epoch); + do_sane_cache_test( + state.clone(), + state.next_epoch(&spec), + validator_count as usize, + state.current_shuffling_seed, + state.current_shuffling_start_shard, + &spec, + ); +} diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 54e2fbe96..8180673d1 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -1,5 +1,4 @@ use super::{generate_deterministic_keypairs, KeypairsFile}; -use crate::beacon_state::BeaconStateBuilder; use crate::test_utils::TestingPendingAttestationBuilder; use crate::*; use bls::get_withdrawal_credentials; @@ -110,7 +109,8 @@ impl TestingBeaconStateBuilder { Validator { pubkey: keypair.pk.clone(), withdrawal_credentials, - activation_epoch: spec.far_future_epoch, + // All validators start active. + activation_epoch: spec.genesis_epoch, exit_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch, initiated_exit: false, @@ -119,7 +119,7 @@ impl TestingBeaconStateBuilder { }) .collect(); - let mut state_builder = BeaconStateBuilder::new( + let mut state = BeaconState::genesis( 0, Eth1Data { deposit_root: Hash256::zero(), @@ -131,14 +131,8 @@ impl TestingBeaconStateBuilder { let balances = vec![32_000_000_000; validator_count]; debug!("Importing {} existing validators...", validator_count); - state_builder.import_existing_validators( - validators, - balances, - validator_count as u64, - spec, - ); - - let state = state_builder.build(spec).unwrap(); + state.validator_registry = validators; + state.validator_balances = balances; debug!("BeaconState built."); From 2e0c8e2e479ea9e22a303681d697a74cf6ca98ee Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 17 Mar 2019 23:14:28 +1100 Subject: [PATCH 105/154] Handle peer dials and propagate to message handler --- beacon_node/libp2p/src/behaviour.rs | 20 ++++++---- beacon_node/libp2p/src/lib.rs | 3 +- beacon_node/libp2p/src/rpc/mod.rs | 44 ++++++++++++++++------ beacon_node/libp2p/src/rpc/protocol.rs | 22 +++++------ beacon_node/libp2p/src/service.rs | 10 +++-- beacon_node/network/src/message_handler.rs | 16 ++++++-- beacon_node/network/src/messages.rs | 4 +- beacon_node/network/src/service.rs | 6 +++ 8 files changed, 84 insertions(+), 41 deletions(-) diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs index 96355cf3f..604b84c8f 100644 --- a/beacon_node/libp2p/src/behaviour.rs +++ b/beacon_node/libp2p/src/behaviour.rs @@ -1,4 +1,4 @@ -use crate::rpc::{Rpc, RpcEvent}; +use crate::rpc::{RPCEvent, RPCMessage, Rpc}; use futures::prelude::*; use libp2p::{ core::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, @@ -38,19 +38,24 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess +impl NetworkBehaviourEventProcess for Behaviour { - fn inject_event(&mut self, event: RpcEvent) { - self.events.push(BehaviourEvent::RPC(event)); + fn inject_event(&mut self, event: RPCMessage) { + match event { + RPCMessage::PeerDialed(peer_id) => { + self.events.push(BehaviourEvent::PeerDialed(peer_id)) + } + RPCMessage::RPC(rpc_event) => self.events.push(BehaviourEvent::RPC(rpc_event)), + } } } impl Behaviour { - pub fn new(local_peer_id: PeerId, gs_config: GossipsubConfig) -> Self { + pub fn new(local_peer_id: PeerId, gs_config: GossipsubConfig, log: &slog::Logger) -> Self { Behaviour { gossipsub: Gossipsub::new(local_peer_id, gs_config), - serenity_rpc: Rpc::new(), + serenity_rpc: Rpc::new(log), events: Vec::new(), } } @@ -80,7 +85,8 @@ impl Behaviour { /// The types of events than can be obtained from polling the behaviour. pub enum BehaviourEvent { - RPC(RpcEvent), + RPC(RPCEvent), + PeerDialed(PeerId), // TODO: This is a stub at the moment Message(String), } diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/libp2p/src/lib.rs index 69f6eb650..f3e97355d 100644 --- a/beacon_node/libp2p/src/lib.rs +++ b/beacon_node/libp2p/src/lib.rs @@ -13,8 +13,7 @@ pub use libp2p::{ PeerId, }; pub use network_config::NetworkConfig; -pub use rpc::HelloMessage; -pub use rpc::RpcEvent; +pub use rpc::{HelloMessage, RPCEvent}; pub use service::Libp2pEvent; pub use service::Service; pub use types::multiaddr; diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index 3420217ce..d40e53935 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -12,7 +12,8 @@ use libp2p::core::swarm::{ }; use libp2p::{Multiaddr, PeerId}; pub use methods::{HelloMessage, RPCMethod, RPCRequest, RPCResponse}; -pub use protocol::{RPCProtocol, RpcEvent}; +pub use protocol::{RPCEvent, RPCProtocol}; +use slog::{debug, o, Logger}; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; @@ -21,22 +22,26 @@ use tokio::io::{AsyncRead, AsyncWrite}; pub struct Rpc { /// Queue of events to processed. - events: Vec>, + events: Vec>, /// Pins the generic substream. marker: PhantomData, + /// Slog logger for RPC behaviour. + log: slog::Logger, } impl Rpc { - pub fn new() -> Self { + pub fn new(log: &slog::Logger) -> Self { + let log = log.new(o!("Service" => "Libp2p-RPC")); Rpc { events: Vec::new(), marker: PhantomData, + log, } } /// Submits and RPC request. pub fn send_request(&mut self, peer_id: PeerId, id: u64, method_id: u16, body: RPCRequest) { - let request = RpcEvent::Request { + let request = RPCEvent::Request { id, method_id, body, @@ -52,8 +57,8 @@ impl NetworkBehaviour for Rpc where TSubstream: AsyncRead + AsyncWrite, { - type ProtocolsHandler = OneShotHandler; - type OutEvent = RpcEvent; + type ProtocolsHandler = OneShotHandler; + type OutEvent = RPCMessage; fn new_handler(&mut self) -> Self::ProtocolsHandler { Default::default() @@ -63,7 +68,14 @@ where Vec::new() } - fn inject_connected(&mut self, _: PeerId, _: ConnectedPoint) {} + fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { + // if initialised the connection, report this upwards to send the HELLO request + if let ConnectedPoint::Dialer { address } = connected_point { + self.events.push(NetworkBehaviourAction::GenerateEvent( + RPCMessage::PeerDialed(peer_id), + )); + } + } fn inject_disconnected(&mut self, _: &PeerId, _: ConnectedPoint) {} @@ -80,7 +92,9 @@ where // send the event to the user self.events - .push(NetworkBehaviourAction::GenerateEvent(event)); + .push(NetworkBehaviourAction::GenerateEvent(RPCMessage::RPC( + event, + ))); } fn poll( @@ -99,18 +113,24 @@ where } } -/// Transmission between the `OneShotHandler` and the `RpcEvent`. +/// Messages sent to the user from the RPC protocol. +pub enum RPCMessage { + RPC(RPCEvent), + PeerDialed(PeerId), +} + +/// Transmission between the `OneShotHandler` and the `RPCEvent`. #[derive(Debug)] pub enum OneShotEvent { /// We received an RPC from a remote. - Rx(RpcEvent), + Rx(RPCEvent), /// We successfully sent an RPC request. Sent, } -impl From for OneShotEvent { +impl From for OneShotEvent { #[inline] - fn from(rpc: RpcEvent) -> OneShotEvent { + fn from(rpc: RPCEvent) -> OneShotEvent { OneShotEvent::Rx(rpc) } } diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/libp2p/src/rpc/protocol.rs index 74b8322eb..dce714429 100644 --- a/beacon_node/libp2p/src/rpc/protocol.rs +++ b/beacon_node/libp2p/src/rpc/protocol.rs @@ -31,7 +31,7 @@ impl Default for RPCProtocol { /// The RPC types which are sent/received in this protocol. #[derive(Debug, Clone)] -pub enum RpcEvent { +pub enum RPCEvent { Request { id: u64, method_id: u16, @@ -44,7 +44,7 @@ pub enum RpcEvent { }, } -impl UpgradeInfo for RpcEvent { +impl UpgradeInfo for RPCEvent { type Info = &'static [u8]; type InfoIter = iter::Once; @@ -58,17 +58,17 @@ impl InboundUpgrade for RPCProtocol where TSocket: AsyncRead + AsyncWrite, { - type Output = RpcEvent; + type Output = RPCEvent; type Error = DecodeError; type Future = - upgrade::ReadOneThen, ()) -> Result>; + upgrade::ReadOneThen, ()) -> Result>; fn upgrade_inbound(self, socket: TSocket, _: Self::Info) -> Self::Future { upgrade::read_one_then(socket, MAX_READ_SIZE, (), |packet, ()| Ok(decode(packet)?)) } } -fn decode(packet: Vec) -> Result { +fn decode(packet: Vec) -> Result { // decode the header of the rpc // request/response let (request, index) = bool::ssz_decode(&packet, 0)?; @@ -84,7 +84,7 @@ fn decode(packet: Vec) -> Result { RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), }; - return Ok(RpcEvent::Request { + return Ok(RPCEvent::Request { id, method_id, body, @@ -99,7 +99,7 @@ fn decode(packet: Vec) -> Result { } RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), }; - return Ok(RpcEvent::Response { + return Ok(RPCEvent::Response { id, method_id, result, @@ -107,7 +107,7 @@ fn decode(packet: Vec) -> Result { } } -impl OutboundUpgrade for RpcEvent +impl OutboundUpgrade for RPCEvent where TSocket: AsyncWrite, { @@ -122,10 +122,10 @@ where } } -impl Encodable for RpcEvent { +impl Encodable for RPCEvent { fn ssz_append(&self, s: &mut SszStream) { match self { - RpcEvent::Request { + RPCEvent::Request { id, method_id, body, @@ -137,7 +137,7 @@ impl Encodable for RpcEvent { RPCRequest::Hello(body) => s.append(body), }; } - RpcEvent::Response { + RPCEvent::Response { id, method_id, result, diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index a672e153b..dd6deabad 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -1,7 +1,7 @@ use crate::behaviour::{Behaviour, BehaviourEvent}; use crate::error; use crate::multiaddr::Protocol; -use crate::rpc::RpcEvent; +use crate::rpc::RPCEvent; use crate::NetworkConfig; use futures::prelude::*; use futures::Stream; @@ -41,7 +41,7 @@ impl Service { // Set up the transport let transport = build_transport(local_private_key); // Set up gossipsub routing - let behaviour = Behaviour::new(local_peer_id.clone(), config.gs_config); + let behaviour = Behaviour::new(local_peer_id.clone(), config.gs_config, &log); // Set up Topology let topology = local_peer_id.clone(); Swarm::new(transport, behaviour, topology) @@ -108,6 +108,9 @@ impl Stream for Service { Ok(Async::Ready(Some(BehaviourEvent::RPC(event)))) => { return Ok(Async::Ready(Some(Libp2pEvent::RPC(event)))); } + Ok(Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id)))) => { + return Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id)))); + } Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"), Ok(Async::NotReady) => break, _ => break, @@ -155,6 +158,7 @@ fn build_transport( /// Events that can be obtained from polling the Libp2p Service. pub enum Libp2pEvent { // We have received an RPC event on the swarm - RPC(RpcEvent), + RPC(RPCEvent), + PeerDialed(PeerId), Message(String), } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index fe9780ad5..c059795ed 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -4,30 +4,37 @@ use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use futures::future; use futures::prelude::*; use libp2p::rpc; -use libp2p::{PeerId, RpcEvent}; +use libp2p::{PeerId, RPCEvent}; use slog::debug; +use std::collections::HashMap; +use std::time::{Duration, Instant}; use sync::SimpleSync; use types::Hash256; +/// Timeout for establishing a HELLO handshake. +const HELLO_TIMEOUT: Duration = Duration::from_secs(30); + /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { sync: SimpleSync, //TODO: Implement beacon chain //chain: BeaconChain + /// A mapping of peers we have sent a HELLO rpc request to + hello_requests: HashMap, log: slog::Logger, } /// Types of messages the handler can receive. #[derive(Debug, Clone)] pub enum HandlerMessage { - /// Peer has connected. - PeerConnected(PeerId), + /// We have initiated a connection to a new peer. + PeerDialed(PeerId), /// Peer has disconnected, PeerDisconnected(PeerId), /// A Node message has been received. Message(PeerId, NodeMessage), /// An RPC response/request has been received. - RPC(RpcEvent), + RPC(RPCEvent), } impl MessageHandler { @@ -49,6 +56,7 @@ impl MessageHandler { //TODO: Initialise beacon chain let mut handler = MessageHandler { sync, + hello_requests: HashMap::new(), log: log.clone(), }; diff --git a/beacon_node/network/src/messages.rs b/beacon_node/network/src/messages.rs index 064424a87..930c90b3e 100644 --- a/beacon_node/network/src/messages.rs +++ b/beacon_node/network/src/messages.rs @@ -1,11 +1,11 @@ use libp2p::PeerId; -use libp2p::{HelloMessage, RpcEvent}; +use libp2p::{HelloMessage, RPCEvent}; use types::{Hash256, Slot}; /// Messages between nodes across the network. #[derive(Debug, Clone)] pub enum NodeMessage { - RPC(RpcEvent), + RPC(RPCEvent), BlockRequest, // TODO: only for testing - remove Message(String), diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index bd01027e9..fc91cf53a 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -108,6 +108,12 @@ fn network_service( .send(HandlerMessage::RPC(rpc_event)) .map_err(|_| "failed to send rpc to handler"); } + Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id)))) => { + debug!(libp2p_service.log, "Peer Dialed: {:?}", peer_id); + message_handler_send + .send(HandlerMessage::PeerDialed(peer_id)) + .map_err(|_| "failed to send rpc to handler"); + } Ok(Async::Ready(Some(Libp2pEvent::Message(m)))) => debug!( libp2p_service.log, "Network Service: Message received: {}", m From 979353f136b7c9c434050483c1a244c1bdb48d12 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 23:23:45 +1100 Subject: [PATCH 106/154] Make separate errors for epoch cache. Helps with troubleshooting. --- eth2/types/src/beacon_state.rs | 12 ++++++++--- eth2/types/src/beacon_state/epoch_cache.rs | 23 ++++++++++++++++------ 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index d7dbda782..1a77d3449 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,13 +1,12 @@ -use self::epoch_cache::EpochCache; +use self::epoch_cache::{EpochCache, Error as EpochCacheError}; use crate::test_utils::TestRandom; use crate::*; use int_to_bytes::int_to_bytes32; use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{hash, ssz_encode, SignedRoot, TreeHash}; +use ssz::{hash, ssz_encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash}; -use std::collections::HashMap; use test_random_derive::TestRandom; mod epoch_cache; @@ -44,6 +43,7 @@ pub enum Error { registry_len: usize, }, RelativeEpochError(RelativeEpochError), + EpochCacheError(EpochCacheError), } macro_rules! safe_add_assign { @@ -883,3 +883,9 @@ impl From for Error { Error::RelativeEpochError(e) } } + +impl From for Error { + fn from(e: EpochCacheError) -> Error { + Error::EpochCacheError(e) + } +} diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 0dbdf4054..75d791e8f 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -1,9 +1,16 @@ -use super::{BeaconState, Error}; +use super::BeaconState; use crate::*; use honey_badger_split::SplitExt; use serde_derive::{Deserialize, Serialize}; use swap_or_not_shuffle::shuffle_list; +#[derive(Debug, PartialEq)] +pub enum Error { + UnableToShuffle, + NoValidators { epoch: Epoch }, + UnableToGenerateSeed, +} + mod tests; #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] @@ -212,7 +219,7 @@ impl EpochCrosslinkCommitteesBuilder { active_validator_indices: Vec, registry_change: bool, spec: &ChainSpec, - ) -> Result { + ) -> Result { let current_epoch = state.current_epoch(spec); let next_epoch = state.next_epoch(spec); let committees_per_epoch = spec.get_epoch_committee_count(active_validator_indices.len()); @@ -221,7 +228,9 @@ impl EpochCrosslinkCommitteesBuilder { current_epoch - state.validator_registry_update_epoch; let (seed, shuffling_start_shard) = if registry_change { - let next_seed = state.generate_seed(next_epoch, spec)?; + let next_seed = state + .generate_seed(next_epoch, spec) + .map_err(|_| Error::UnableToGenerateSeed)?; ( next_seed, (state.current_shuffling_start_shard + committees_per_epoch) % spec.shard_count, @@ -229,7 +238,9 @@ impl EpochCrosslinkCommitteesBuilder { } else if (epochs_since_last_registry_update > 1) & epochs_since_last_registry_update.is_power_of_two() { - let next_seed = state.generate_seed(next_epoch, spec)?; + let next_seed = state + .generate_seed(next_epoch, spec) + .map_err(|_| Error::UnableToGenerateSeed)?; (next_seed, state.current_shuffling_start_shard) } else { ( @@ -247,9 +258,9 @@ impl EpochCrosslinkCommitteesBuilder { }) } - pub fn build(self, spec: &ChainSpec) -> Result { + pub fn build(self, spec: &ChainSpec) -> Result { if self.active_validator_indices.is_empty() { - return Err(Error::NoValidators); + return Err(Error::NoValidators { epoch: self.epoch }); } let shuffled_active_validator_indices = shuffle_list( From 191761f356bb72b2ed06790360f40d56e0ab3856 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 17 Mar 2019 23:32:27 +1100 Subject: [PATCH 107/154] Allow epoch cache with zero validators. --- .../per_block_processing/verify_deposit.rs | 3 --- .../process_validator_registry.rs | 4 +-- eth2/types/src/beacon_state/epoch_cache.rs | 25 ++++++++++--------- .../testing_beacon_state_builder.rs | 2 +- 4 files changed, 16 insertions(+), 18 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 2aeab6c5a..80d8bc24f 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -3,11 +3,8 @@ use hashing::hash; use merkle_proof::verify_merkle_proof; use ssz::ssz_encode; use ssz_derive::Encode; -use std::collections::HashMap; use types::*; -pub type PublicKeyValidatorIndexHashmap = HashMap; - /// Indicates if a `Deposit` is valid to be included in a block in the current epoch of the given /// state. /// diff --git a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs index c830bfc24..26ebd60b3 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs @@ -14,7 +14,7 @@ pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> state.previous_shuffling_seed = state.current_shuffling_seed; if should_update_validator_registry(state, spec)? { - state.update_validator_registry(spec); + state.update_validator_registry(spec)?; state.current_shuffling_epoch = next_epoch; state.current_shuffling_start_shard = (state.current_shuffling_start_shard @@ -37,7 +37,7 @@ pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> } } - state.process_slashings(spec); + state.process_slashings(spec)?; state.process_exit_queue(spec); Ok(()) diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 75d791e8f..ca8bcc70e 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -7,7 +7,6 @@ use swap_or_not_shuffle::shuffle_list; #[derive(Debug, PartialEq)] pub enum Error { UnableToShuffle, - NoValidators { epoch: Epoch }, UnableToGenerateSeed, } @@ -259,17 +258,19 @@ impl EpochCrosslinkCommitteesBuilder { } pub fn build(self, spec: &ChainSpec) -> Result { - if self.active_validator_indices.is_empty() { - return Err(Error::NoValidators { epoch: self.epoch }); - } - - let shuffled_active_validator_indices = shuffle_list( - self.active_validator_indices, - spec.shuffle_round_count, - &self.shuffling_seed[..], - true, - ) - .ok_or_else(|| Error::UnableToShuffle)?; + // The shuffler fails on a empty list, so if there are no active validator indices, simply + // return an empty list. + let shuffled_active_validator_indices = if self.active_validator_indices.is_empty() { + vec![] + } else { + shuffle_list( + self.active_validator_indices, + spec.shuffle_round_count, + &self.shuffling_seed[..], + true, + ) + .ok_or_else(|| Error::UnableToShuffle)? + }; let mut committees: Vec> = shuffled_active_validator_indices .honey_badger_split(self.committees_per_epoch as usize) diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 8180673d1..e76a01e49 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -134,7 +134,7 @@ impl TestingBeaconStateBuilder { state.validator_registry = validators; state.validator_balances = balances; - debug!("BeaconState built."); + debug!("BeaconState initialized."); Self { state, keypairs } } From bbad4bfa19854d63bbc3237d4d4e0dea45f70bdb Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 18 Mar 2019 16:16:54 +1100 Subject: [PATCH 108/154] Starts initialisation of beacon chain in the client --- beacon_node/beacon_chain/src/initialise.rs | 146 +++++++++++++++++++++ beacon_node/beacon_chain/src/initialize.rs | 56 -------- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/client/src/client_types.rs | 18 ++- beacon_node/client/src/lib.rs | 17 ++- beacon_node/network/Cargo.toml | 1 + beacon_node/network/src/message_handler.rs | 27 +++- beacon_node/network/src/service.rs | 11 +- beacon_node/src/run.rs | 2 +- eth2/types/src/chain_spec.rs | 8 +- 10 files changed, 207 insertions(+), 80 deletions(-) create mode 100644 beacon_node/beacon_chain/src/initialise.rs delete mode 100644 beacon_node/beacon_chain/src/initialize.rs diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs new file mode 100644 index 000000000..131782470 --- /dev/null +++ b/beacon_node/beacon_chain/src/initialise.rs @@ -0,0 +1,146 @@ +// Initialisation functions to generate a new BeaconChain. +// Note: A new version of ClientTypes may need to be implemented for the lighthouse +// testnet. These are examples. Also. there is code duplication which can/should be cleaned up. + +use crate::BeaconChain; +use bls; +use db::stores::{BeaconBlockStore, BeaconStateStore}; +use db::{DiskDB, MemoryDB}; +use fork_choice::BitwiseLMDGhost; +use slot_clock::SystemTimeSlotClock; +use std::path::PathBuf; +use std::sync::Arc; +use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; + +//TODO: Correct this for prod +//TODO: Account for historical db +pub fn initialise_beacon_chain( + chain_spec: &ChainSpec, + db_name: Option<&PathBuf>, +) -> Arc>> { + // set up the db + let db = Arc::new(DiskDB::open( + db_name.expect("Database directory must be included"), + None, + )); + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + + // Slot clock + let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). + let slot_clock = SystemTimeSlotClock::new(genesis_time, chain_spec.seconds_per_slot) + .expect("Unable to load SystemTimeSlotClock"); + // Choose the fork choice + let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + + /* + * Generate some random data to start a chain with. + * + * This is will need to be replace for production usage. + */ + let latest_eth1_data = Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }; + let keypairs: Vec = (0..10) + .collect::>() + .iter() + .map(|_| Keypair::random()) + .collect(); + let initial_validator_deposits = keypairs + .iter() + .map(|keypair| Deposit { + branch: vec![], // branch verification is not chain_specified. + index: 0, // index verification is not chain_specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. + proof_of_possession: bls::create_proof_of_possession(&keypair), + }, + }, + }) + .collect(); + + // Genesis chain + // TODO:Remove the expect here. Propagate errors and handle somewhat gracefully. + Arc::new( + BeaconChain::genesis( + state_store.clone(), + block_store.clone(), + slot_clock, + genesis_time, + latest_eth1_data, + initial_validator_deposits, + chain_spec.clone(), + fork_choice, + ) + .expect("Cannot initialise a beacon chain. Exiting"), + ) +} + +/// Initialisation of a test beacon chain, uses an in memory db with fixed genesis time. +pub fn initialise_test_beacon_chain( + chain_spec: &ChainSpec, + _db_name: Option<&PathBuf>, +) -> Arc>> { + let db = Arc::new(MemoryDB::open()); + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + + // Slot clock + let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). + let slot_clock = SystemTimeSlotClock::new(genesis_time, chain_spec.seconds_per_slot) + .expect("Unable to load SystemTimeSlotClock"); + // Choose the fork choice + let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + + /* + * Generate some random data to start a chain with. + * + * This is will need to be replace for production usage. + */ + let latest_eth1_data = Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }; + let keypairs: Vec = (0..10) + .collect::>() + .iter() + .map(|_| Keypair::random()) + .collect(); + let initial_validator_deposits = keypairs + .iter() + .map(|keypair| Deposit { + branch: vec![], // branch verification is not chain_specified. + index: 0, // index verification is not chain_specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. + proof_of_possession: bls::create_proof_of_possession(&keypair), + }, + }, + }) + .collect(); + + // Genesis chain + // TODO: Handle error correctly + Arc::new( + BeaconChain::genesis( + state_store.clone(), + block_store.clone(), + slot_clock, + genesis_time, + latest_eth1_data, + initial_validator_deposits, + chain_spec.clone(), + fork_choice, + ) + .expect("Cannot generate beacon chain"), + ) +} diff --git a/beacon_node/beacon_chain/src/initialize.rs b/beacon_node/beacon_chain/src/initialize.rs deleted file mode 100644 index 14d0f81a6..000000000 --- a/beacon_node/beacon_chain/src/initialize.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Initialisation functions to generate a new BeaconChain. - -pub fn initialise_test_chain( - config: &ClientConfig, -) -> Arc> { - let spec = config.spec; - // Slot clock - let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). - let slot_clock = SystemTimeSlotClock::new(genesis_time, spec.slot_duration) - .expect("Unable to load SystemTimeSlotClock"); - // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - - /* - * Generate some random data to start a chain with. - * - * This is will need to be replace for production usage. - */ - let latest_eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }; - let keypairs: Vec = (0..10) - .collect::>() - .iter() - .map(|_| Keypair::random()) - .collect(); - let initial_validator_deposits = keypairs - .iter() - .map(|keypair| Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. - proof_of_possession: create_proof_of_possession(&keypair), - }, - }, - }) - .collect(); - - // Genesis chain - Arc::new(BeaconChain::genesis( - state_store.clone(), - block_store.clone(), - slot_clock, - genesis_time, - latest_eth1_data, - initial_validator_deposits, - spec, - fork_choice, - )); -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 0e879a415..89ee2029c 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,6 +2,7 @@ mod attestation_aggregator; mod beacon_chain; mod checkpoint; mod errors; +pub mod initialise; pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock}; pub use self::checkpoint::CheckPoint; diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs index 38ae1c8c3..744c9ab98 100644 --- a/beacon_node/client/src/client_types.rs +++ b/beacon_node/client/src/client_types.rs @@ -1,25 +1,39 @@ use db::{ClientDB, DiskDB, MemoryDB}; use fork_choice::{BitwiseLMDGhost, ForkChoice}; use slot_clock::{SlotClock, SystemTimeSlotClock, TestingSlotClock}; +use beacon_chain::initialise; +use std::sync::Arc; +use crate::ClientConfig pub trait ClientTypes { type ForkChoice: ForkChoice; type DB: ClientDB; type SlotClock: SlotClock; + + pub fn initialise_beacon_chain(cchain_spec: &ClientConfig) -> Arc>); } -pub struct StandardClientType {} +pub struct StandardClientType impl ClientTypes for StandardClientType { type DB = DiskDB; type ForkChoice = BitwiseLMDGhost; type SlotClock = SystemTimeSlotClock; + + pub fn initialise_beacon_chain(config: &ClientConfig) -> Arc>) { + initialise::initialise_beacon_chain(config.chain_spec, config.db_name) + } + } -pub struct TestingClientType {} +pub struct TestingClientType impl ClientTypes for TestingClientType { type DB = MemoryDB; type SlotClock = TestingSlotClock; type ForkChoice = BitwiseLMDGhost; + + pub fn initialise_beacon_chain(config: &ClientConfig) -> Arc>) { + initialise::initialise_test_beacon_chain(config.chain_spec, None) + } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7312cc6c8..46221c200 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -21,31 +21,36 @@ use tokio::runtime::TaskExecutor; /// sub-services in multiple threads. pub struct Client { config: ClientConfig, - // beacon_chain: Arc>, + beacon_chain: Arc>, pub network: Arc, pub exit: exit_future::Exit, pub exit_signal: Signal, log: slog::Logger, - phantom: PhantomData, } impl Client { /// Generate an instance of the client. Spawn and link all internal subprocesses. pub fn new( config: ClientConfig, + client_type: T, log: slog::Logger, executor: &TaskExecutor, ) -> error::Result { let (exit_signal, exit) = exit_future::signal(); - // TODO: generate a beacon_chain service. + // generate a beacon chain + let beacon_chain = client_type.initialise_beacon_chain(&config); // Start the network service, libp2p and syncing threads // TODO: Add beacon_chain reference to network parameters - let network_config = config.net_conf.clone(); + let network_config = &config.net_conf; let network_logger = log.new(o!("Service" => "Network")); - let (network, network_send) = - NetworkService::new(network_config, executor, network_logger)?; + let (network, network_send) = NetworkService::new( + beacon_chain.clone(), + network_config, + executor, + network_logger, + )?; Ok(Client { config, diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 19d3e82ad..f1a7ed258 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Age Manning "] edition = "2018" [dependencies] +beacon_chain = { path = "../beacon_chain" } libp2p = { path = "../libp2p" } version = { path = "../version" } types = { path = "../../eth2/types" } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index c059795ed..4ebedb89a 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,5 +1,6 @@ use crate::error; use crate::messages::NodeMessage; +use beacon_chain::BeaconChain; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use futures::future; use futures::prelude::*; @@ -7,6 +8,7 @@ use libp2p::rpc; use libp2p::{PeerId, RPCEvent}; use slog::debug; use std::collections::HashMap; +use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use sync::SimpleSync; use types::Hash256; @@ -15,12 +17,14 @@ use types::Hash256; const HELLO_TIMEOUT: Duration = Duration::from_secs(30); /// Handles messages received from the network and client and organises syncing. -pub struct MessageHandler { +pub struct MessageHandler { + /// Currently loaded and initialised beacon chain. + chain: BeaconChain, + /// The syncing framework. sync: SimpleSync, - //TODO: Implement beacon chain - //chain: BeaconChain - /// A mapping of peers we have sent a HELLO rpc request to + /// A mapping of peers we have sent a HELLO rpc request to. hello_requests: HashMap, + /// The `MessageHandler` logger. log: slog::Logger, } @@ -37,9 +41,10 @@ pub enum HandlerMessage { RPC(RPCEvent), } -impl MessageHandler { +impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn new( + beacon_chain: Arc>, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { @@ -49,12 +54,13 @@ impl MessageHandler { // Initialise sync and begin processing in thread //TODO: Load genesis from BeaconChain + //TODO: Initialise beacon chain let temp_genesis = Hash256::zero(); // generate the Message handler let sync = SimpleSync::new(temp_genesis); - //TODO: Initialise beacon chain let mut handler = MessageHandler { + chain: beacon_chain, sync, hello_requests: HashMap::new(), log: log.clone(), @@ -74,6 +80,13 @@ impl MessageHandler { } fn handle_message(&mut self, message: HandlerMessage) { - debug!(self.log, "Message received {:?}", message); + match message { + HandlerMessage::PeerDialed(peer_id) => self.send_hello(peer_id), + //TODO: Handle all messages + _ => {} + } } + + /// Sends a HELLO RPC request to a newly connected peer. + fn send_hello(&self, peer_id: PeerId) {} } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index fc91cf53a..6b9c0aff0 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -15,25 +15,28 @@ use libp2p::{Libp2pEvent, PeerId}; use slog::{debug, info, o, trace, warn, Logger}; use std::sync::{Arc, Mutex}; use tokio::runtime::TaskExecutor; +use client::ClientTypes; /// Service that handles communication between internal services and the libp2p network service. -pub struct Service { +pub struct Service { //libp2p_service: Arc>, libp2p_exit: oneshot::Sender<()>, network_send: crossbeam_channel::Sender, //message_handler: MessageHandler, //message_handler_send: Sender, + PhantomData: T, } -impl Service { +impl Service { pub fn new( - config: NetworkConfig, + beacon_chain: Arc, + config: &NetworkConfig, executor: &TaskExecutor, log: slog::Logger, ) -> error::Result<(Arc, Sender)> { // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = MessageHandler::new(executor, message_handler_log)?; + let message_handler_send = MessageHandler::new(beacon_chain, executor, message_handler_log)?; // launch libp2p service let libp2p_log = log.new(o!("Service" => "Libp2p")); diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 810f2aeaf..b7cbf5421 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -31,7 +31,7 @@ pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result let executor = runtime.executor(); - // currently testing - using TestingNode type + // currently testing - using TestingClientType let client: Client = Client::new(config, log.clone(), &executor)?; notifier::run(&client, executor, exit); diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index ef2c94d65..c6093231b 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -257,10 +257,10 @@ impl ChainSpec { .parse() .expect("correct multiaddr")]; - let mut standard_spec = ChainSpec::foundation(); - standard_spec.boot_nodes = boot_nodes; - - standard_spec + Self { + boot_nodes, + ..ChainSpec::foundation() + } } /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. From edeace9e7596b93e30b0955b3b09a32e6b09e0e6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 18 Mar 2019 16:53:59 +1100 Subject: [PATCH 109/154] Fix issues with building on genesis block --- beacon_node/beacon_chain/src/beacon_chain.rs | 129 +++++++++--------- .../src/per_slot_processing.rs | 38 +++++- eth2/types/src/beacon_block.rs | 24 +++- eth2/types/src/beacon_block_header.rs | 9 ++ eth2/types/src/beacon_state.rs | 76 ++++++++++- 5 files changed, 202 insertions(+), 74 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 1082f6cab..bf87adf10 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -82,20 +82,18 @@ where let state_root = genesis_state.canonical_root(); state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; - let block_root = genesis_block.canonical_root(); + let block_root = genesis_block.into_header().canonical_root(); block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; let finalized_head = RwLock::new(CheckPoint::new( genesis_block.clone(), block_root, - // TODO: this is a memory waste; remove full clone. genesis_state.clone(), state_root, )); let canonical_head = RwLock::new(CheckPoint::new( genesis_block.clone(), block_root, - // TODO: this is a memory waste; remove full clone. genesis_state.clone(), state_root, )); @@ -190,10 +188,13 @@ where /// processing applied to it. pub fn advance_state(&self, slot: Slot) -> Result<(), SlotProcessingError> { let state_slot = self.state.read().slot; - let head_block_root = self.head().beacon_block_root; + + let latest_block_header = self.head().beacon_block.into_header(); + for _ in state_slot.as_u64()..slot.as_u64() { - per_slot_processing(&mut *self.state.write(), head_block_root, &self.spec)?; + per_slot_processing(&mut *self.state.write(), &latest_block_header, &self.spec)?; } + Ok(()) } @@ -554,66 +555,13 @@ where } } - /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. - /// - /// This could be a very expensive operation and should only be done in testing/analysis - /// activities. - pub fn chain_dump(&self) -> Result, Error> { - let mut dump = vec![]; - - let mut last_slot = CheckPoint { - beacon_block: self.head().beacon_block.clone(), - beacon_block_root: self.head().beacon_block_root, - beacon_state: self.head().beacon_state.clone(), - beacon_state_root: self.head().beacon_state_root, - }; - - dump.push(last_slot.clone()); - - loop { - let beacon_block_root = last_slot.beacon_block.previous_block_root; - - if beacon_block_root == self.spec.zero_hash { - break; // Genesis has been reached. - } - - let beacon_block = self - .block_store - .get_deserialized(&beacon_block_root)? - .ok_or_else(|| { - Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) - })?; - let beacon_state_root = beacon_block.state_root; - let beacon_state = self - .state_store - .get_deserialized(&beacon_state_root)? - .ok_or_else(|| { - Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) - })?; - - let slot = CheckPoint { - beacon_block, - beacon_block_root, - beacon_state, - beacon_state_root, - }; - - dump.push(slot.clone()); - last_slot = slot; - } - - dump.reverse(); - - Ok(dump) - } - /// Accept some block and attempt to add it to block DAG. /// /// Will accept blocks from prior slots, however it will reject any block from a future slot. pub fn process_block(&self, block: BeaconBlock) -> Result { debug!("Processing block with slot {}...", block.slot); - let block_root = block.canonical_root(); + let block_root = block.into_header().canonical_root(); let present_slot = self.present_slot(); @@ -648,8 +596,10 @@ where // Transition the parent state to the present slot. let mut state = parent_state; + println!("parent process state: {:?}", state.latest_block_header); + let previous_block_header = parent_block.into_header(); for _ in state.slot.as_u64()..present_slot.as_u64() { - if let Err(e) = per_slot_processing(&mut state, parent_block_root, &self.spec) { + if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) { return Ok(BlockProcessingOutcome::InvalidBlock( InvalidBlock::SlotProcessingError(e), )); @@ -664,6 +614,8 @@ where )); } + println!("process state: {:?}", state.latest_block_header); + let state_root = state.canonical_root(); if block.state_root != state_root { @@ -726,7 +678,7 @@ where ); let previous_block_root = *state - .get_block_root(state.slot.saturating_sub(1_u64), &self.spec) + .get_block_root(state.slot - 1, &self.spec) .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; let mut block = BeaconBlock { @@ -754,6 +706,8 @@ where per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?; + println!("produce state: {:?}", state.latest_block_header); + let state_root = state.canonical_root(); block.state_root = state_root; @@ -788,6 +742,59 @@ where Ok(()) } + + /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. + /// + /// This could be a very expensive operation and should only be done in testing/analysis + /// activities. + pub fn chain_dump(&self) -> Result, Error> { + let mut dump = vec![]; + + let mut last_slot = CheckPoint { + beacon_block: self.head().beacon_block.clone(), + beacon_block_root: self.head().beacon_block_root, + beacon_state: self.head().beacon_state.clone(), + beacon_state_root: self.head().beacon_state_root, + }; + + dump.push(last_slot.clone()); + + loop { + let beacon_block_root = last_slot.beacon_block.previous_block_root; + + if beacon_block_root == self.spec.zero_hash { + break; // Genesis has been reached. + } + + let beacon_block = self + .block_store + .get_deserialized(&beacon_block_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) + })?; + let beacon_state_root = beacon_block.state_root; + let beacon_state = self + .state_store + .get_deserialized(&beacon_state_root)? + .ok_or_else(|| { + Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) + })?; + + let slot = CheckPoint { + beacon_block, + beacon_block_root, + beacon_state, + beacon_state_root, + }; + + dump.push(slot.clone()); + last_slot = slot; + } + + dump.reverse(); + + Ok(dump) + } } impl From for Error { diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index aafc7166a..a90c5b408 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -1,5 +1,6 @@ use crate::*; -use types::{BeaconState, BeaconStateError, ChainSpec, Hash256}; +use ssz::TreeHash; +use types::*; #[derive(Debug, PartialEq)] pub enum Error { @@ -12,9 +13,11 @@ pub enum Error { /// Spec v0.5.0 pub fn per_slot_processing( state: &mut BeaconState, - previous_block_root: Hash256, + latest_block_header: &BeaconBlockHeader, spec: &ChainSpec, ) -> Result<(), Error> { + cache_state(state, latest_block_header, spec)?; + if (state.slot + 1) % spec.slots_per_epoch == 0 { per_epoch_processing(state, spec)?; state.advance_caches(); @@ -22,6 +25,37 @@ pub fn per_slot_processing( state.slot += 1; + let latest_block_root = Hash256::from_slice(&state.latest_block_header.hash_tree_root()[..]); + state.set_block_root(state.slot - 1, latest_block_root, spec)?; + + Ok(()) +} + +fn cache_state( + state: &mut BeaconState, + latest_block_header: &BeaconBlockHeader, + spec: &ChainSpec, +) -> Result<(), Error> { + let previous_slot_state_root = Hash256::from_slice(&state.hash_tree_root()[..]); + + // Note: increment the state slot here to allow use of our `state_root` and `block_root` + // getter/setter functions. + // + // This is a bit hacky, however it gets the job safely without lots of code. + let previous_slot = state.slot; + state.slot += 1; + + // Store the previous slot's post-state transition root. + if state.latest_block_header.state_root == spec.zero_hash { + state.latest_block_header.state_root = previous_slot_state_root + } + + let latest_block_root = Hash256::from_slice(&latest_block_header.hash_tree_root()[..]); + state.set_block_root(previous_slot, latest_block_root, spec)?; + + // Set the state slot back to what it should be. + state.slot -= 1; + Ok(()) } diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 2dcf91d95..b966751ed 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -63,16 +63,32 @@ impl BeaconBlock { Hash256::from_slice(&self.hash_tree_root()[..]) } + /// Returns a full `BeaconBlockHeader` of this block. + /// + /// Note: This method is used instead of an `Into` impl to avoid a `Clone` of an entire block + /// when you want to have the block _and_ the header. + /// + /// Note: performs a full tree-hash of `self.body`. + /// + /// Spec v0.5.0 + pub fn into_header(&self) -> BeaconBlockHeader { + BeaconBlockHeader { + slot: self.slot, + previous_block_root: self.previous_block_root, + state_root: self.state_root, + block_body_root: Hash256::from_slice(&self.body.hash_tree_root()[..]), + signature: self.signature.clone(), + } + } + /// Returns a "temporary" header, where the `state_root` is `spec.zero_hash`. /// /// Spec v0.5.0 pub fn into_temporary_header(&self, spec: &ChainSpec) -> BeaconBlockHeader { BeaconBlockHeader { - slot: self.slot, - previous_block_root: self.previous_block_root, state_root: spec.zero_hash, - block_body_root: Hash256::from_slice(&self.hash_tree_root()), - signature: self.signature.clone(), + signature: spec.empty_signature.clone(), + ..self.into_header() } } } diff --git a/eth2/types/src/beacon_block_header.rs b/eth2/types/src/beacon_block_header.rs index 029c7e56b..3d8b08cc8 100644 --- a/eth2/types/src/beacon_block_header.rs +++ b/eth2/types/src/beacon_block_header.rs @@ -30,6 +30,15 @@ pub struct BeaconBlockHeader { pub signature: Signature, } +impl BeaconBlockHeader { + /// Returns the `hash_tree_root` of the header. + /// + /// Spec v0.5.0 + pub fn canonical_root(&self) -> Hash256 { + Hash256::from_slice(&self.hash_tree_root()[..]) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 1a77d3449..1b2424774 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -35,6 +35,7 @@ pub enum Error { InsufficientAttestations, InsufficientCommittees, InsufficientSlashedBalances, + InsufficientStateRoots, NoCommitteeForShard, EpochCacheUninitialized(RelativeEpoch), PubkeyCacheInconsistent, @@ -425,6 +426,22 @@ impl BeaconState { .ok_or_else(|| Error::NoCommitteeForShard)?) } + /// Safely obtains the index for latest block roots, given some `slot`. + /// + /// Spec v0.5.0 + fn get_latest_block_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { + let i = slot.as_usize() % spec.slots_per_historical_root; + if i >= self.latest_block_roots.len() { + Err(Error::InsufficientStateRoots) + } else { + Ok(i) + } + } else { + Err(BeaconStateError::SlotOutOfBounds) + } + } + /// Return the block root at a recent `slot`. /// /// Spec v0.5.0 @@ -433,13 +450,21 @@ impl BeaconState { slot: Slot, spec: &ChainSpec, ) -> Result<&Hash256, BeaconStateError> { - if (self.slot <= slot + spec.slots_per_historical_root as u64) && (slot < self.slot) { - self.latest_block_roots - .get(slot.as_usize() % spec.slots_per_historical_root) - .ok_or_else(|| Error::InsufficientBlockRoots) - } else { - Err(Error::EpochOutOfBounds) - } + let i = self.get_latest_block_roots_index(slot, spec)?; + Ok(&self.latest_block_roots[i]) + } + + /// Sets the block root for some given slot. + /// + /// Spec v0.5.0 + pub fn set_block_root( + &mut self, + slot: Slot, + block_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { + let i = self.get_latest_block_roots_index(slot, spec)?; + Ok(self.latest_block_roots[i] = block_root) } /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. @@ -506,6 +531,43 @@ impl BeaconState { } } + /// Safely obtains the index for latest state roots, given some `slot`. + /// + /// Spec v0.5.0 + fn get_latest_state_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { + let i = slot.as_usize() % spec.slots_per_historical_root; + if i >= self.latest_state_roots.len() { + Err(Error::InsufficientStateRoots) + } else { + Ok(i) + } + } else { + Err(BeaconStateError::SlotOutOfBounds) + } + } + + /// Gets the state root for some slot. + /// + /// Spec v0.5.0 + pub fn get_state_root(&mut self, slot: Slot, spec: &ChainSpec) -> Result<&Hash256, Error> { + let i = self.get_latest_state_roots_index(slot, spec)?; + Ok(&self.latest_state_roots[i]) + } + + /// Sets the latest state root for slot. + /// + /// Spec v0.5.0 + pub fn set_state_root( + &mut self, + slot: Slot, + state_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = self.get_latest_state_roots_index(slot, spec)?; + Ok(self.latest_state_roots[i] = state_root) + } + /// Generate a seed for the given `epoch`. /// /// Spec v0.4.0 From 6b5debe654fce0cca60e5a14cbbc54d2983e5b65 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 18 Mar 2019 17:38:23 +1100 Subject: [PATCH 110/154] Organize beacon_chain typing - Implements ClientTypes - New network BeaconChain type for the networking service --- beacon_node/beacon_chain/src/initialise.rs | 2 +- beacon_node/beacon_chain/src/lib.rs | 5 ++- beacon_node/client/src/client_types.rs | 46 +++++++++++++--------- beacon_node/client/src/lib.rs | 19 +++++++-- beacon_node/network/src/beacon_chain.rs | 27 +++++++++++++ beacon_node/network/src/lib.rs | 1 + beacon_node/network/src/message_handler.rs | 16 ++++---- beacon_node/network/src/service.rs | 22 +++++------ 8 files changed, 93 insertions(+), 45 deletions(-) create mode 100644 beacon_node/network/src/beacon_chain.rs diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs index 131782470..a8289a062 100644 --- a/beacon_node/beacon_chain/src/initialise.rs +++ b/beacon_node/beacon_chain/src/initialise.rs @@ -106,7 +106,7 @@ pub fn initialise_test_beacon_chain( deposit_root: Hash256::zero(), block_hash: Hash256::zero(), }; - let keypairs: Vec = (0..10) + let keypairs: Vec = (0..50) .collect::>() .iter() .map(|_| Keypair::random()) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 89ee2029c..5acac6ff2 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -7,4 +7,7 @@ pub mod initialise; pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock}; pub use self::checkpoint::CheckPoint; pub use self::errors::BeaconChainError; -pub use fork_choice::{ForkChoice, ForkChoiceAlgorithm, ForkChoiceError}; +pub use db; +pub use fork_choice; +pub use parking_lot; +pub use slot_clock; diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs index 744c9ab98..de0678fe7 100644 --- a/beacon_node/client/src/client_types.rs +++ b/beacon_node/client/src/client_types.rs @@ -1,39 +1,49 @@ -use db::{ClientDB, DiskDB, MemoryDB}; -use fork_choice::{BitwiseLMDGhost, ForkChoice}; -use slot_clock::{SlotClock, SystemTimeSlotClock, TestingSlotClock}; -use beacon_chain::initialise; +use crate::ClientConfig; +use beacon_chain::{ + db::{ClientDB, DiskDB, MemoryDB}, + fork_choice::BitwiseLMDGhost, + initialise, + slot_clock::{SlotClock, SystemTimeSlotClock, TestingSlotClock}, + BeaconChain, +}; +use fork_choice::ForkChoice; + use std::sync::Arc; -use crate::ClientConfig pub trait ClientTypes { - type ForkChoice: ForkChoice; - type DB: ClientDB; - type SlotClock: SlotClock; + type DB: ClientDB + 'static; + type SlotClock: SlotClock + 'static; + type ForkChoice: ForkChoice + 'static; - pub fn initialise_beacon_chain(cchain_spec: &ClientConfig) -> Arc>); + fn initialise_beacon_chain( + config: &ClientConfig, + ) -> Arc>; } -pub struct StandardClientType +pub struct StandardClientType; impl ClientTypes for StandardClientType { type DB = DiskDB; - type ForkChoice = BitwiseLMDGhost; type SlotClock = SystemTimeSlotClock; + type ForkChoice = BitwiseLMDGhost; - pub fn initialise_beacon_chain(config: &ClientConfig) -> Arc>) { - initialise::initialise_beacon_chain(config.chain_spec, config.db_name) + fn initialise_beacon_chain( + config: &ClientConfig, + ) -> Arc> { + initialise::initialise_beacon_chain(&config.spec, Some(&config.db_name)) } - } -pub struct TestingClientType +pub struct TestingClientType; impl ClientTypes for TestingClientType { type DB = MemoryDB; - type SlotClock = TestingSlotClock; + type SlotClock = SystemTimeSlotClock; type ForkChoice = BitwiseLMDGhost; - pub fn initialise_beacon_chain(config: &ClientConfig) -> Arc>) { - initialise::initialise_test_beacon_chain(config.chain_spec, None) + fn initialise_beacon_chain( + config: &ClientConfig, + ) -> Arc> { + initialise::initialise_test_beacon_chain(&config.spec, None) } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 46221c200..f3178eaa6 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -10,6 +10,7 @@ pub use client_config::ClientConfig; pub use client_types::ClientTypes; //use beacon_chain::BeaconChain; +use beacon_chain::BeaconChain; use exit_future::{Exit, Signal}; use network::Service as NetworkService; use slog::o; @@ -20,26 +21,35 @@ use tokio::runtime::TaskExecutor; /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. pub struct Client { + /// Configuration for the lighthouse client. config: ClientConfig, + /// The beacon chain for the running client. beacon_chain: Arc>, + /// Reference to the network service. pub network: Arc, + /// Future to stop and begin shutdown of the Client. + //TODO: Decide best way to handle shutdown pub exit: exit_future::Exit, + /// The sending future to call to terminate the Client. + //TODO: Decide best way to handle shutdown pub exit_signal: Signal, + /// The clients logger. log: slog::Logger, + /// Marker to pin the beacon chain generics. + phantom: PhantomData, } -impl Client { - /// Generate an instance of the client. Spawn and link all internal subprocesses. +impl Client { + /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( config: ClientConfig, - client_type: T, log: slog::Logger, executor: &TaskExecutor, ) -> error::Result { let (exit_signal, exit) = exit_future::signal(); // generate a beacon chain - let beacon_chain = client_type.initialise_beacon_chain(&config); + let beacon_chain = TClientType::initialise_beacon_chain(&config); // Start the network service, libp2p and syncing threads // TODO: Add beacon_chain reference to network parameters @@ -54,6 +64,7 @@ impl Client { Ok(Client { config, + beacon_chain, exit, exit_signal: exit_signal, log, diff --git a/beacon_node/network/src/beacon_chain.rs b/beacon_node/network/src/beacon_chain.rs new file mode 100644 index 000000000..5e0857f47 --- /dev/null +++ b/beacon_node/network/src/beacon_chain.rs @@ -0,0 +1,27 @@ +use beacon_chain::BeaconChain as RawBeaconChain; +use beacon_chain::{ + db::ClientDB, fork_choice::ForkChoice, parking_lot::RwLockReadGuard, slot_clock::SlotClock, + CheckPoint, +}; + +/// The network's API to the beacon chain. +pub trait BeaconChain: Send + Sync { + fn head(&self) -> RwLockReadGuard; + + fn finalized_head(&self) -> RwLockReadGuard; +} + +impl BeaconChain for RawBeaconChain +where + T: ClientDB + Sized, + U: SlotClock, + F: ForkChoice, +{ + fn head(&self) -> RwLockReadGuard { + self.head() + } + + fn finalized_head(&self) -> RwLockReadGuard { + self.finalized_head() + } +} diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 49b2abadd..dca83bb77 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,4 +1,5 @@ /// This crate provides the network server for Lighthouse. +pub mod beacon_chain; pub mod error; mod message_handler; mod messages; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 4ebedb89a..0471e8ce5 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,14 +1,14 @@ +use crate::beacon_chain::BeaconChain; use crate::error; use crate::messages::NodeMessage; -use beacon_chain::BeaconChain; -use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; +use crossbeam_channel::{unbounded as channel, Sender}; use futures::future; use futures::prelude::*; use libp2p::rpc; use libp2p::{PeerId, RPCEvent}; use slog::debug; use std::collections::HashMap; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use std::time::{Duration, Instant}; use sync::SimpleSync; use types::Hash256; @@ -17,9 +17,9 @@ use types::Hash256; const HELLO_TIMEOUT: Duration = Duration::from_secs(30); /// Handles messages received from the network and client and organises syncing. -pub struct MessageHandler { +pub struct MessageHandler { /// Currently loaded and initialised beacon chain. - chain: BeaconChain, + chain: Arc, /// The syncing framework. sync: SimpleSync, /// A mapping of peers we have sent a HELLO rpc request to. @@ -41,10 +41,10 @@ pub enum HandlerMessage { RPC(RPCEvent), } -impl MessageHandler { +impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn new( - beacon_chain: Arc>, + beacon_chain: Arc, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { @@ -60,7 +60,7 @@ impl MessageHandler { // generate the Message handler let sync = SimpleSync::new(temp_genesis); let mut handler = MessageHandler { - chain: beacon_chain, + chain: beacon_chain.clone(), sync, hello_requests: HashMap::new(), log: log.clone(), diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 6b9c0aff0..e42b39105 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,46 +1,42 @@ +use crate::beacon_chain::BeaconChain; use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::messages::{NetworkMessage, NodeMessage}; use crate::NetworkConfig; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; -use futures::future::lazy; -use futures::future::poll_fn; use futures::prelude::*; use futures::sync::oneshot; use futures::Stream; -use libp2p::behaviour::BehaviourEvent; -use libp2p::error::Error as libp2pError; use libp2p::Service as LibP2PService; use libp2p::{Libp2pEvent, PeerId}; -use slog::{debug, info, o, trace, warn, Logger}; -use std::sync::{Arc, Mutex}; +use slog::{debug, o}; +use std::sync::Arc; use tokio::runtime::TaskExecutor; -use client::ClientTypes; /// Service that handles communication between internal services and the libp2p network service. -pub struct Service { +pub struct Service { //libp2p_service: Arc>, libp2p_exit: oneshot::Sender<()>, network_send: crossbeam_channel::Sender, //message_handler: MessageHandler, //message_handler_send: Sender, - PhantomData: T, } -impl Service { +impl Service { pub fn new( - beacon_chain: Arc, + beacon_chain: Arc, config: &NetworkConfig, executor: &TaskExecutor, log: slog::Logger, ) -> error::Result<(Arc, Sender)> { // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = MessageHandler::new(beacon_chain, executor, message_handler_log)?; + let message_handler_send = + MessageHandler::new(beacon_chain, executor, message_handler_log)?; // launch libp2p service let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = LibP2PService::new(config, libp2p_log)?; + let libp2p_service = LibP2PService::new(config.clone(), libp2p_log)?; // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. let (network_send, libp2p_exit) = From 2d52d2954dce534cf12671e74a7ba8bdcd7c0f77 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 18 Mar 2019 17:45:40 +1100 Subject: [PATCH 111/154] Modify testnet spec to have few validators --- beacon_node/beacon_chain/src/initialise.rs | 2 +- eth2/types/src/chain_spec.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs index a8289a062..21b145d42 100644 --- a/beacon_node/beacon_chain/src/initialise.rs +++ b/beacon_node/beacon_chain/src/initialise.rs @@ -106,7 +106,7 @@ pub fn initialise_test_beacon_chain( deposit_root: Hash256::zero(), block_hash: Hash256::zero(), }; - let keypairs: Vec = (0..50) + let keypairs: Vec = (0..8) .collect::>() .iter() .map(|_| Keypair::random()) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index c6093231b..089d40385 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -259,7 +259,7 @@ impl ChainSpec { Self { boot_nodes, - ..ChainSpec::foundation() + ..ChainSpec::few_validators() } } From 71d95ee9db698aa558dd78780202db7128de8ac1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 18 Mar 2019 18:08:53 +1100 Subject: [PATCH 112/154] Add new field to test_harness YAML, remove prints --- beacon_node/beacon_chain/src/beacon_chain.rs | 5 ----- .../beacon_chain/test_harness/specs/validator_registry.yaml | 1 + beacon_node/beacon_chain/test_harness/src/test_case.rs | 4 ++++ .../beacon_chain/test_harness/src/test_case/config.rs | 3 +++ 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index bf87adf10..01787f95b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -596,7 +596,6 @@ where // Transition the parent state to the present slot. let mut state = parent_state; - println!("parent process state: {:?}", state.latest_block_header); let previous_block_header = parent_block.into_header(); for _ in state.slot.as_u64()..present_slot.as_u64() { if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) { @@ -614,8 +613,6 @@ where )); } - println!("process state: {:?}", state.latest_block_header); - let state_root = state.canonical_root(); if block.state_root != state_root { @@ -706,8 +703,6 @@ where per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?; - println!("produce state: {:?}", state.latest_block_header); - let state_root = state.canonical_root(); block.state_root = state_root; diff --git a/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml b/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml index aea7dcf31..0c4f5004b 100644 --- a/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml +++ b/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml @@ -9,6 +9,7 @@ test_cases: deposits_for_chain_start: 1000 num_slots: 64 skip_slots: [2, 3] + persistent_committee_period: 0 deposits: # At slot 1, create a new validator deposit of 5 ETH. - slot: 1 diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index 1361127a1..f65b45505 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -62,6 +62,10 @@ impl TestCase { spec.slots_per_epoch = n; } + if let Some(n) = self.config.persistent_committee_period { + spec.persistent_committee_period = n; + } + spec } diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/config.rs b/beacon_node/beacon_chain/test_harness/src/test_case/config.rs index f336b9d53..12d5da2d7 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case/config.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case/config.rs @@ -20,6 +20,8 @@ pub struct Config { pub deposits_for_chain_start: usize, /// Number of slots in an epoch. pub slots_per_epoch: Option, + /// Affects the number of epochs a validator must be active before they can withdraw. + pub persistent_committee_period: Option, /// Number of slots to build before ending execution. pub num_slots: u64, /// Number of slots that should be skipped due to inactive validator. @@ -45,6 +47,7 @@ impl Config { deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start") .expect("Must specify validator count"), slots_per_epoch: as_u64(&yaml, "slots_per_epoch"), + persistent_committee_period: as_u64(&yaml, "persistent_committee_period"), num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"), skip_slots: as_vec_u64(yaml, "skip_slots"), deposits: parse_deposits(&yaml), From 7503f31ddc117b91debcfacd8d7b3e517a86899e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 18 Mar 2019 18:09:31 +1100 Subject: [PATCH 113/154] Fix bug with per-block processing --- eth2/state_processing/src/per_block_processing.rs | 5 ++++- eth2/state_processing/src/per_slot_processing.rs | 3 --- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 78cf927f5..14c72c53b 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -100,8 +100,11 @@ pub fn process_block_header( ) -> Result<(), Error> { verify!(block.slot == state.slot, Invalid::StateSlotMismatch); + // NOTE: this is not to spec. I think spec is broken. See: + // + // https://github.com/ethereum/eth2.0-specs/issues/797 verify!( - block.previous_block_root.as_bytes() == &state.latest_block_header.hash_tree_root()[..], + block.previous_block_root == *state.get_block_root(state.slot - 1, spec)?, Invalid::ParentBlockRootMismatch ); diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index a90c5b408..8f02b70e3 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -25,9 +25,6 @@ pub fn per_slot_processing( state.slot += 1; - let latest_block_root = Hash256::from_slice(&state.latest_block_header.hash_tree_root()[..]); - state.set_block_root(state.slot - 1, latest_block_root, spec)?; - Ok(()) } From 6a89da43b71f94e1111fe601387f25f5362f5995 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 18 Mar 2019 18:22:01 +1100 Subject: [PATCH 114/154] Cleanup network shutdown messages --- beacon_node/network/src/message_handler.rs | 2 +- beacon_node/network/src/service.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 0471e8ce5..b904993bb 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -71,7 +71,7 @@ impl MessageHandler { executor.spawn(future::poll_fn(move || -> Result<_, _> { loop { handler.handle_message(handler_recv.recv().map_err(|_| { - debug!(log, "Handler channel closed. Handler terminating"); + debug!(log, "Network message handler terminated."); })?); } })); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e42b39105..21f948a71 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -9,7 +9,7 @@ use futures::sync::oneshot; use futures::Stream; use libp2p::Service as LibP2PService; use libp2p::{Libp2pEvent, PeerId}; -use slog::{debug, o}; +use slog::{debug, info, o}; use std::sync::Arc; use tokio::runtime::TaskExecutor; @@ -80,7 +80,7 @@ fn spawn_service( // allow for manual termination .select(exit_rx.then(|_| Ok(()))) .then(move |_| { - debug!(log.clone(), "Network service ended"); + info!(log.clone(), "Network service shutdown"); Ok(()) }), ); From 1028acf3f184bdf4982557ec65f0fa52d9eb92b6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 18 Mar 2019 21:34:42 +1100 Subject: [PATCH 115/154] Move state trans fns into state_processing --- .../src/common/exit_validator.rs | 22 + eth2/state_processing/src/common/mod.rs | 7 + .../src/common/slash_validator.rs | 62 + .../src/common/verify_bitfield.rs} | 2 +- .../state_processing/src/get_genesis_state.rs | 3 +- eth2/state_processing/src/lib.rs | 1 + .../src/per_block_processing.rs | 5 +- .../validate_attestation.rs | 2 +- .../verify_slashable_attestation.rs | 2 +- .../src/per_epoch_processing.rs | 31 +- .../get_attestation_participants.rs | 3 +- .../per_epoch_processing/process_ejections.rs | 28 + .../process_exit_queue.rs | 42 + .../per_epoch_processing/process_slashings.rs | 36 + .../process_validator_registry.rs | 6 +- .../update_validator_registry.rs | 51 + eth2/types/src/beacon_state.rs | 1107 +++++++---------- 17 files changed, 741 insertions(+), 669 deletions(-) create mode 100644 eth2/state_processing/src/common/exit_validator.rs create mode 100644 eth2/state_processing/src/common/mod.rs create mode 100644 eth2/state_processing/src/common/slash_validator.rs rename eth2/{types/src/beacon_state/helpers.rs => state_processing/src/common/verify_bitfield.rs} (96%) create mode 100644 eth2/state_processing/src/per_epoch_processing/process_ejections.rs create mode 100644 eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs create mode 100644 eth2/state_processing/src/per_epoch_processing/process_slashings.rs create mode 100644 eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs diff --git a/eth2/state_processing/src/common/exit_validator.rs b/eth2/state_processing/src/common/exit_validator.rs new file mode 100644 index 000000000..8ab530b18 --- /dev/null +++ b/eth2/state_processing/src/common/exit_validator.rs @@ -0,0 +1,22 @@ +use types::{BeaconStateError as Error, *}; + +/// Exit the validator of the given `index`. +/// +/// Spec v0.5.0 +pub fn exit_validator( + state: &mut BeaconState, + validator_index: usize, + spec: &ChainSpec, +) -> Result<(), Error> { + if validator_index >= state.validator_registry.len() { + return Err(Error::UnknownValidator); + } + + let delayed_epoch = state.get_delayed_activation_exit_epoch(state.current_epoch(spec), spec); + + if state.validator_registry[validator_index].exit_epoch > delayed_epoch { + state.validator_registry[validator_index].exit_epoch = delayed_epoch; + } + + Ok(()) +} diff --git a/eth2/state_processing/src/common/mod.rs b/eth2/state_processing/src/common/mod.rs new file mode 100644 index 000000000..49898d10f --- /dev/null +++ b/eth2/state_processing/src/common/mod.rs @@ -0,0 +1,7 @@ +mod exit_validator; +mod slash_validator; +mod verify_bitfield; + +pub use exit_validator::exit_validator; +pub use slash_validator::slash_validator; +pub use verify_bitfield::verify_bitfield_length; diff --git a/eth2/state_processing/src/common/slash_validator.rs b/eth2/state_processing/src/common/slash_validator.rs new file mode 100644 index 000000000..9be87b978 --- /dev/null +++ b/eth2/state_processing/src/common/slash_validator.rs @@ -0,0 +1,62 @@ +use crate::common::exit_validator; +use types::{BeaconStateError as Error, *}; + +/// Slash the validator with index ``index``. +/// +/// Spec v0.5.0 +pub fn slash_validator( + state: &mut BeaconState, + validator_index: usize, + spec: &ChainSpec, +) -> Result<(), Error> { + let current_epoch = state.current_epoch(spec); + + if (validator_index >= state.validator_registry.len()) + | (validator_index >= state.validator_balances.len()) + { + return Err(BeaconStateError::UnknownValidator); + } + + let validator = &state.validator_registry[validator_index]; + + let effective_balance = state.get_effective_balance(validator_index, spec)?; + + // A validator that is withdrawn cannot be slashed. + // + // This constraint will be lifted in Phase 0. + if state.slot + >= validator + .withdrawable_epoch + .start_slot(spec.slots_per_epoch) + { + return Err(Error::ValidatorIsWithdrawable); + } + + exit_validator(state, validator_index, spec)?; + + state.set_slashed_balance( + current_epoch, + state.get_slashed_balance(current_epoch, spec)? + effective_balance, + spec, + )?; + + let whistleblower_index = + state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?; + let whistleblower_reward = effective_balance / spec.whistleblower_reward_quotient; + + safe_add_assign!( + state.validator_balances[whistleblower_index as usize], + whistleblower_reward + ); + safe_sub_assign!( + state.validator_balances[validator_index], + whistleblower_reward + ); + + state.validator_registry[validator_index].slashed = true; + + state.validator_registry[validator_index].withdrawable_epoch = + current_epoch + Epoch::from(spec.latest_slashed_exit_length); + + Ok(()) +} diff --git a/eth2/types/src/beacon_state/helpers.rs b/eth2/state_processing/src/common/verify_bitfield.rs similarity index 96% rename from eth2/types/src/beacon_state/helpers.rs rename to eth2/state_processing/src/common/verify_bitfield.rs index adae7bab4..8ff5c96ca 100644 --- a/eth2/types/src/beacon_state/helpers.rs +++ b/eth2/state_processing/src/common/verify_bitfield.rs @@ -1,4 +1,4 @@ -use crate::*; +use types::*; /// Verify ``bitfield`` against the ``committee_size``. /// diff --git a/eth2/state_processing/src/get_genesis_state.rs b/eth2/state_processing/src/get_genesis_state.rs index 3c6612349..bfcf82216 100644 --- a/eth2/state_processing/src/get_genesis_state.rs +++ b/eth2/state_processing/src/get_genesis_state.rs @@ -37,8 +37,7 @@ pub fn get_genesis_state( .get_active_validator_indices(spec.genesis_epoch, spec)? .to_vec(); let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.hash_tree_root()); - state.latest_active_index_roots = - vec![genesis_active_index_root; spec.latest_active_index_roots_length as usize]; + state.fill_active_index_roots_with(genesis_active_index_root, spec); // Generate the current shuffling seed. state.current_shuffling_seed = state.generate_seed(spec.genesis_epoch, spec)?; diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index 78dc7270d..6757b5dbd 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -1,6 +1,7 @@ #[macro_use] mod macros; +pub mod common; pub mod get_genesis_state; pub mod per_block_processing; pub mod per_epoch_processing; diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 14c72c53b..33f953b71 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,4 +1,5 @@ use self::verify_proposer_slashing::verify_proposer_slashing; +use crate::common::slash_validator; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use rayon::prelude::*; use ssz::{SignedRoot, TreeHash}; @@ -222,7 +223,7 @@ pub fn process_proposer_slashings( // Update the state. for proposer_slashing in proposer_slashings { - state.slash_validator(proposer_slashing.proposer_index as usize, spec)?; + slash_validator(state, proposer_slashing.proposer_index as usize, spec)?; } Ok(()) @@ -279,7 +280,7 @@ pub fn process_attester_slashings( .map_err(|e| e.into_with_index(i))?; for i in slashable_indices { - state.slash_validator(i as usize, spec)?; + slash_validator(state, i as usize, spec)?; } } diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index 272eeb18b..113dbc4ce 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -1,6 +1,6 @@ use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error}; +use crate::common::verify_bitfield_length; use ssz::TreeHash; -use types::beacon_state::helpers::*; use types::*; /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the diff --git a/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs index aa9a32196..d3ab5e398 100644 --- a/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs @@ -1,8 +1,8 @@ use super::errors::{ SlashableAttestationInvalid as Invalid, SlashableAttestationValidationError as Error, }; +use crate::common::verify_bitfield_length; use ssz::TreeHash; -use types::beacon_state::helpers::verify_bitfield_length; use types::*; /// Indicates if a `SlashableAttestation` is valid to be included in a block in the current epoch of the given diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index d1bb4269a..97a0e9987 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,5 +1,8 @@ use errors::EpochProcessingError as Error; use integer_sqrt::IntegerSquareRoot; +use process_ejections::process_ejections; +use process_exit_queue::process_exit_queue; +use process_slashings::process_slashings; use process_validator_registry::process_validator_registry; use rayon::prelude::*; use ssz::TreeHash; @@ -11,8 +14,12 @@ use winning_root::{winning_root, WinningRoot}; pub mod errors; pub mod get_attestation_participants; pub mod inclusion_distance; +pub mod process_ejections; +pub mod process_exit_queue; +pub mod process_slashings; pub mod process_validator_registry; pub mod tests; +pub mod update_validator_registry; pub mod validator_statuses; pub mod winning_root; @@ -45,14 +52,16 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result process_rewards_and_penalities(state, &mut statuses, &winning_root_for_shards, spec)?; // Ejections - state.process_ejections(spec)?; + process_ejections(state, spec)?; // Validator Registry process_validator_registry(state, spec)?; + process_slashings(state, spec)?; + process_exit_queue(state, spec); // Final updates update_active_tree_index_roots(state, spec)?; - update_latest_slashed_balances(state, spec); + update_latest_slashed_balances(state, spec)?; clean_attestations(state); // Rotate the epoch caches to suit the epoch transition. @@ -451,9 +460,7 @@ pub fn update_active_tree_index_roots( ) .hash_tree_root(); - state.latest_active_index_roots[(next_epoch.as_usize() - + spec.activation_exit_delay as usize) - % spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]); + state.set_active_index_root(next_epoch, Hash256::from_slice(&active_tree_root[..]), spec)?; Ok(()) } @@ -461,12 +468,20 @@ pub fn update_active_tree_index_roots( /// Advances the state's `latest_slashed_balances` field. /// /// Spec v0.4.0 -pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) { +pub fn update_latest_slashed_balances( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); let next_epoch = state.next_epoch(spec); - state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] = - state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length]; + state.set_slashed_balance( + next_epoch, + state.get_slashed_balance(current_epoch, spec)?, + spec, + )?; + + Ok(()) } /// Removes all pending attestations from the previous epoch. diff --git a/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs index d822e434d..3e52776b1 100644 --- a/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs +++ b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs @@ -1,4 +1,5 @@ -use types::{beacon_state::helpers::verify_bitfield_length, *}; +use crate::common::verify_bitfield_length; +use types::*; /// Returns validator indices which participated in the attestation. /// diff --git a/eth2/state_processing/src/per_epoch_processing/process_ejections.rs b/eth2/state_processing/src/per_epoch_processing/process_ejections.rs new file mode 100644 index 000000000..27dd37479 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/process_ejections.rs @@ -0,0 +1,28 @@ +use crate::common::exit_validator; +use types::{BeaconStateError as Error, *}; + +/// Iterate through the validator registry and eject active validators with balance below +/// ``EJECTION_BALANCE``. +/// +/// Spec v0.5.0 +pub fn process_ejections(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { + // There is an awkward double (triple?) loop here because we can't loop across the borrowed + // active validator indices and mutate state in the one loop. + let exitable: Vec = state + .get_active_validator_indices(state.current_epoch(spec), spec)? + .iter() + .filter_map(|&i| { + if state.validator_balances[i as usize] < spec.ejection_balance { + Some(i) + } else { + None + } + }) + .collect(); + + for validator_index in exitable { + exit_validator(state, validator_index, spec)? + } + + Ok(()) +} diff --git a/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs b/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs new file mode 100644 index 000000000..f672c97be --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs @@ -0,0 +1,42 @@ +use types::*; + +/// Process the exit queue. +/// +/// Spec v0.5.0 +pub fn process_exit_queue(state: &mut BeaconState, spec: &ChainSpec) { + let current_epoch = state.current_epoch(spec); + + let eligible = |index: usize| { + let validator = &state.validator_registry[index]; + + if validator.withdrawable_epoch != spec.far_future_epoch { + false + } else { + current_epoch >= validator.exit_epoch + spec.min_validator_withdrawability_delay + } + }; + + let mut eligable_indices: Vec = (0..state.validator_registry.len()) + .filter(|i| eligible(*i)) + .collect(); + eligable_indices.sort_by_key(|i| state.validator_registry[*i].exit_epoch); + + for (withdrawn_so_far, index) in eligable_indices.iter().enumerate() { + if withdrawn_so_far as u64 >= spec.max_exit_dequeues_per_epoch { + break; + } + prepare_validator_for_withdrawal(state, *index, spec); + } +} + +/// Initiate an exit for the validator of the given `index`. +/// +/// Spec v0.5.0 +fn prepare_validator_for_withdrawal( + state: &mut BeaconState, + validator_index: usize, + spec: &ChainSpec, +) { + state.validator_registry[validator_index].withdrawable_epoch = + state.current_epoch(spec) + spec.min_validator_withdrawability_delay; +} diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs new file mode 100644 index 000000000..b14a9ee37 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -0,0 +1,36 @@ +use types::{BeaconStateError as Error, *}; + +/// Process slashings. +/// +/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. +/// +/// Spec v0.4.0 +pub fn process_slashings(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { + let current_epoch = state.current_epoch(spec); + let active_validator_indices = state.get_active_validator_indices(current_epoch, spec)?; + let total_balance = state.get_total_balance(&active_validator_indices[..], spec)?; + + for (index, validator) in state.validator_registry.iter().enumerate() { + if validator.slashed + && (current_epoch + == validator.withdrawable_epoch - Epoch::from(spec.latest_slashed_exit_length / 2)) + { + // TODO: check the following two lines are correct. + let total_at_start = state.get_slashed_balance(current_epoch + 1, spec)?; + let total_at_end = state.get_slashed_balance(current_epoch, spec)?; + + let total_penalities = total_at_end.saturating_sub(total_at_start); + + let effective_balance = state.get_effective_balance(index, spec)?; + let penalty = std::cmp::max( + effective_balance * std::cmp::min(total_penalities * 3, total_balance) + / total_balance, + effective_balance / spec.min_penalty_quotient, + ); + + safe_sub_assign!(state.validator_balances[index], penalty); + } + } + + Ok(()) +} diff --git a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs index 26ebd60b3..2eb39711d 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs @@ -1,3 +1,4 @@ +use super::update_validator_registry::update_validator_registry; use super::Error; use types::*; @@ -14,7 +15,7 @@ pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> state.previous_shuffling_seed = state.current_shuffling_seed; if should_update_validator_registry(state, spec)? { - state.update_validator_registry(spec)?; + update_validator_registry(state, spec)?; state.current_shuffling_epoch = next_epoch; state.current_shuffling_start_shard = (state.current_shuffling_start_shard @@ -37,9 +38,6 @@ pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> } } - state.process_slashings(spec)?; - state.process_exit_queue(spec); - Ok(()) } diff --git a/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs new file mode 100644 index 000000000..8b612c346 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs @@ -0,0 +1,51 @@ +use crate::common::exit_validator; +use types::{BeaconStateError as Error, *}; + +/// Update validator registry, activating/exiting validators if possible. +/// +/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. +/// +/// Spec v0.4.0 +pub fn update_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { + let current_epoch = state.current_epoch(spec); + let active_validator_indices = state.get_active_validator_indices(current_epoch, spec)?; + let total_balance = state.get_total_balance(&active_validator_indices[..], spec)?; + + let max_balance_churn = std::cmp::max( + spec.max_deposit_amount, + total_balance / (2 * spec.max_balance_churn_quotient), + ); + + let mut balance_churn = 0; + for index in 0..state.validator_registry.len() { + let validator = &state.validator_registry[index]; + + if (validator.activation_epoch == spec.far_future_epoch) + & (state.validator_balances[index] == spec.max_deposit_amount) + { + balance_churn += state.get_effective_balance(index, spec)?; + if balance_churn > max_balance_churn { + break; + } + state.activate_validator(index, false, spec); + } + } + + let mut balance_churn = 0; + for index in 0..state.validator_registry.len() { + let validator = &state.validator_registry[index]; + + if (validator.exit_epoch == spec.far_future_epoch) & (validator.initiated_exit) { + balance_churn += state.get_effective_balance(index, spec)?; + if balance_churn > max_balance_churn { + break; + } + + exit_validator(state, index, spec)?; + } + } + + state.validator_registry_update_epoch = current_epoch; + + Ok(()) +} diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 1b2424774..30f95c02c 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -10,7 +10,6 @@ use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; mod epoch_cache; -pub mod helpers; mod pubkey_cache; mod tests; @@ -19,17 +18,13 @@ pub const CACHED_EPOCHS: usize = 4; #[derive(Debug, PartialEq)] pub enum Error { EpochOutOfBounds, - /// The supplied shard is unknown. It may be larger than the maximum shard count, or not in a - /// committee for the given slot. SlotOutOfBounds, ShardOutOfBounds, - UnableToShuffle, UnknownValidator, + UnableToDetermineProducer, InvalidBitfield, ValidatorIsWithdrawable, InsufficientRandaoMixes, - NoValidators, - UnableToDetermineProducer, InsufficientBlockRoots, InsufficientIndexRoots, InsufficientAttestations, @@ -37,27 +32,16 @@ pub enum Error { InsufficientSlashedBalances, InsufficientStateRoots, NoCommitteeForShard, - EpochCacheUninitialized(RelativeEpoch), PubkeyCacheInconsistent, PubkeyCacheIncomplete { cache_len: usize, registry_len: usize, }, + EpochCacheUninitialized(RelativeEpoch), RelativeEpochError(RelativeEpochError), EpochCacheError(EpochCacheError), } -macro_rules! safe_add_assign { - ($a: expr, $b: expr) => { - $a = $a.saturating_add($b); - }; -} -macro_rules! safe_sub_assign { - ($a: expr, $b: expr) => { - $a = $a.saturating_sub($b); - }; -} - /// The state of the `BeaconChain` at some slot. /// /// Spec v0.5.0 @@ -95,10 +79,10 @@ pub struct BeaconState { // Recent state pub latest_crosslinks: Vec, - pub latest_block_roots: Vec, - pub latest_state_roots: Vec, - pub latest_active_index_roots: Vec, - pub latest_slashed_balances: Vec, + latest_block_roots: Vec, + latest_state_roots: Vec, + latest_active_index_roots: Vec, + latest_slashed_balances: Vec, pub latest_block_header: BeaconBlockHeader, pub historical_roots: Vec, @@ -209,6 +193,458 @@ impl BeaconState { Hash256::from_slice(&self.hash_tree_root()[..]) } + /// If a validator pubkey exists in the validator registry, returns `Some(i)`, otherwise + /// returns `None`. + /// + /// Requires a fully up-to-date `pubkey_cache`, returns an error if this is not the case. + pub fn get_validator_index(&self, pubkey: &PublicKey) -> Result, Error> { + if self.pubkey_cache.len() == self.validator_registry.len() { + Ok(self.pubkey_cache.get(pubkey)) + } else { + Err(Error::PubkeyCacheIncomplete { + cache_len: self.pubkey_cache.len(), + registry_len: self.validator_registry.len(), + }) + } + } + + /// The epoch corresponding to `self.slot`. + /// + /// Spec v0.5.0 + pub fn current_epoch(&self, spec: &ChainSpec) -> Epoch { + self.slot.epoch(spec.slots_per_epoch) + } + + /// The epoch prior to `self.current_epoch()`. + /// + /// If the current epoch is the genesis epoch, the genesis_epoch is returned. + /// + /// Spec v0.5.0 + pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch { + self.current_epoch(&spec) - 1 + } + + /// The epoch following `self.current_epoch()`. + /// + /// Spec v0.5.0 + pub fn next_epoch(&self, spec: &ChainSpec) -> Epoch { + self.current_epoch(spec) + 1 + } + + /// Returns the active validator indices for the given epoch, assuming there is no validator + /// registry update in the next epoch. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.5.0 + pub fn get_active_validator_indices( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result<&[usize], Error> { + // If the slot is in the next epoch, assume there was no validator registry update. + let relative_epoch = + match RelativeEpoch::from_epoch(self.slot.epoch(spec.slots_per_epoch), epoch) { + Err(RelativeEpochError::AmbiguiousNextEpoch) => { + Ok(RelativeEpoch::NextWithoutRegistryChange) + } + e => e, + }?; + + let cache = self.cache(relative_epoch, spec)?; + + Ok(&cache.active_validator_indices) + } + + /// Returns the crosslink committees for some slot. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.5.0 + pub fn get_crosslink_committees_at_slot( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result<&Vec, Error> { + // If the slot is in the next epoch, assume there was no validator registry update. + let relative_epoch = match RelativeEpoch::from_slot(self.slot, slot, spec) { + Err(RelativeEpochError::AmbiguiousNextEpoch) => { + Ok(RelativeEpoch::NextWithoutRegistryChange) + } + e => e, + }?; + + let cache = self.cache(relative_epoch, spec)?; + + Ok(cache + .get_crosslink_committees_at_slot(slot, spec) + .ok_or_else(|| Error::SlotOutOfBounds)?) + } + + /// Returns the crosslink committees for some shard in an epoch. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.5.0 + pub fn get_crosslink_committee_for_shard( + &self, + epoch: Epoch, + shard: Shard, + spec: &ChainSpec, + ) -> Result<&CrosslinkCommittee, Error> { + // If the slot is in the next epoch, assume there was no validator registry update. + let relative_epoch = match RelativeEpoch::from_epoch(self.current_epoch(spec), epoch) { + Err(RelativeEpochError::AmbiguiousNextEpoch) => { + Ok(RelativeEpoch::NextWithoutRegistryChange) + } + e => e, + }?; + + let cache = self.cache(relative_epoch, spec)?; + + Ok(cache + .get_crosslink_committee_for_shard(shard, spec) + .ok_or_else(|| Error::NoCommitteeForShard)?) + } + + /// Returns the beacon proposer index for the `slot`. + /// + /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. + /// + /// Spec v0.5.0 + pub fn get_beacon_proposer_index( + &self, + slot: Slot, + relative_epoch: RelativeEpoch, + spec: &ChainSpec, + ) -> Result { + let cache = self.cache(relative_epoch, spec)?; + + let committees = cache + .get_crosslink_committees_at_slot(slot, spec) + .ok_or_else(|| Error::SlotOutOfBounds)?; + + let epoch = slot.epoch(spec.slots_per_epoch); + + committees + .first() + .ok_or(Error::UnableToDetermineProducer) + .and_then(|first| { + let index = epoch + .as_usize() + .checked_rem(first.committee.len()) + .ok_or(Error::UnableToDetermineProducer)?; + Ok(first.committee[index]) + }) + } + + /// Safely obtains the index for latest block roots, given some `slot`. + /// + /// Spec v0.5.0 + fn get_latest_block_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { + let i = slot.as_usize() % spec.slots_per_historical_root; + if i >= self.latest_block_roots.len() { + Err(Error::InsufficientStateRoots) + } else { + Ok(i) + } + } else { + Err(BeaconStateError::SlotOutOfBounds) + } + } + + /// Return the block root at a recent `slot`. + /// + /// Spec v0.5.0 + pub fn get_block_root( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result<&Hash256, BeaconStateError> { + let i = self.get_latest_block_roots_index(slot, spec)?; + Ok(&self.latest_block_roots[i]) + } + + /// Sets the block root for some given slot. + /// + /// Spec v0.5.0 + pub fn set_block_root( + &mut self, + slot: Slot, + block_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { + let i = self.get_latest_block_roots_index(slot, spec)?; + Ok(self.latest_block_roots[i] = block_root) + } + + /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. + /// + /// # Errors: + /// + /// See `Self::get_randao_mix`. + /// + /// Spec v0.5.0 + pub fn update_randao_mix( + &mut self, + epoch: Epoch, + signature: &Signature, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = epoch.as_usize() % spec.latest_randao_mixes_length; + + let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature))); + + self.latest_randao_mixes[i] = *self.get_randao_mix(epoch, spec)? ^ signature_hash; + + Ok(()) + } + + /// Return the randao mix at a recent ``epoch``. + /// + /// # Errors: + /// - `InsufficientRandaoMixes` if `self.latest_randao_mixes` is shorter than + /// `spec.latest_randao_mixes_length`. + /// - `EpochOutOfBounds` if the state no longer stores randao mixes for the given `epoch`. + /// + /// Spec v0.5.0 + pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Result<&Hash256, Error> { + let current_epoch = self.current_epoch(spec); + + if (current_epoch - (spec.latest_randao_mixes_length as u64) < epoch) + & (epoch <= current_epoch) + { + self.latest_randao_mixes + .get(epoch.as_usize() % spec.latest_randao_mixes_length) + .ok_or_else(|| Error::InsufficientRandaoMixes) + } else { + Err(Error::EpochOutOfBounds) + } + } + + /// Safely obtains the index for `latest_active_index_roots`, given some `epoch`. + /// + /// Spec v0.5.0 + fn get_active_index_root_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + let current_epoch = self.current_epoch(spec); + + if (current_epoch - spec.latest_active_index_roots_length as u64 + + spec.activation_exit_delay + < epoch) + & (epoch <= current_epoch + spec.activation_exit_delay) + { + let i = epoch.as_usize() % spec.latest_active_index_roots_length; + if i < self.latest_active_index_roots.len() { + Ok(i) + } else { + Err(Error::InsufficientIndexRoots) + } + } else { + Err(Error::EpochOutOfBounds) + } + } + + /// Return the `active_index_root` at a recent `epoch`. + /// + /// Spec v0.5.0 + pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + let i = self.get_active_index_root_index(epoch, spec)?; + Ok(self.latest_active_index_roots[i]) + } + + /// Set the `active_index_root` at a recent `epoch`. + /// + /// Spec v0.5.0 + pub fn set_active_index_root( + &mut self, + epoch: Epoch, + index_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = self.get_active_index_root_index(epoch, spec)?; + Ok(self.latest_active_index_roots[i] = index_root) + } + + /// Replace `active_index_roots` with clones of `index_root`. + /// + /// Spec v0.5.0 + pub fn fill_active_index_roots_with(&mut self, index_root: Hash256, spec: &ChainSpec) { + self.latest_active_index_roots = + vec![index_root; spec.latest_active_index_roots_length as usize] + } + + /// Safely obtains the index for latest state roots, given some `slot`. + /// + /// Spec v0.5.0 + fn get_latest_state_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { + let i = slot.as_usize() % spec.slots_per_historical_root; + if i >= self.latest_state_roots.len() { + Err(Error::InsufficientStateRoots) + } else { + Ok(i) + } + } else { + Err(BeaconStateError::SlotOutOfBounds) + } + } + + /// Gets the state root for some slot. + /// + /// Spec v0.5.0 + pub fn get_state_root(&mut self, slot: Slot, spec: &ChainSpec) -> Result<&Hash256, Error> { + let i = self.get_latest_state_roots_index(slot, spec)?; + Ok(&self.latest_state_roots[i]) + } + + /// Sets the latest state root for slot. + /// + /// Spec v0.5.0 + pub fn set_state_root( + &mut self, + slot: Slot, + state_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = self.get_latest_state_roots_index(slot, spec)?; + Ok(self.latest_state_roots[i] = state_root) + } + + /// Safely obtains the index for `latest_slashed_balances`, given some `epoch`. + /// + /// Spec v0.5.0 + fn get_slashed_balance_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + let i = epoch.as_usize() % spec.latest_slashed_exit_length; + + // NOTE: the validity of the epoch is not checked. It is not in the spec but it's probably + // useful to have. + if i < self.latest_slashed_balances.len() { + Ok(i) + } else { + Err(Error::InsufficientSlashedBalances) + } + } + + /// Gets the total slashed balances for some epoch. + /// + /// Spec v0.5.0 + pub fn get_slashed_balance(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + let i = self.get_slashed_balance_index(epoch, spec)?; + Ok(self.latest_slashed_balances[i]) + } + + /// Sets the total slashed balances for some epoch. + /// + /// Spec v0.5.0 + pub fn set_slashed_balance( + &mut self, + epoch: Epoch, + balance: u64, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = self.get_slashed_balance_index(epoch, spec)?; + Ok(self.latest_slashed_balances[i] = balance) + } + + /// Generate a seed for the given `epoch`. + /// + /// Spec v0.5.0 + pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + let mut input = self + .get_randao_mix(epoch - spec.min_seed_lookahead, spec)? + .as_bytes() + .to_vec(); + + input.append(&mut self.get_active_index_root(epoch, spec)?.as_bytes().to_vec()); + + input.append(&mut int_to_bytes32(epoch.as_u64())); + + Ok(Hash256::from_slice(&hash(&input[..])[..])) + } + + /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. + /// + /// Spec v0.5.0 + pub fn get_effective_balance( + &self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + let balance = self + .validator_balances + .get(validator_index) + .ok_or_else(|| Error::UnknownValidator)?; + Ok(std::cmp::min(*balance, spec.max_deposit_amount)) + } + + /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. + /// + /// Spec v0.5.0 + pub fn get_delayed_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { + epoch + 1 + spec.activation_exit_delay + } + + /// Activate the validator of the given ``index``. + /// + /// Spec v0.5.0 + pub fn activate_validator( + &mut self, + validator_index: usize, + is_genesis: bool, + spec: &ChainSpec, + ) { + let current_epoch = self.current_epoch(spec); + + self.validator_registry[validator_index].activation_epoch = if is_genesis { + spec.genesis_epoch + } else { + self.get_delayed_activation_exit_epoch(current_epoch, spec) + } + } + + /// Initiate an exit for the validator of the given `index`. + /// + /// Spec v0.5.0 + pub fn initiate_validator_exit(&mut self, validator_index: usize) { + self.validator_registry[validator_index].initiated_exit = true; + } + + /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an + /// attestation. + /// + /// Only reads the current epoch. + /// + /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. + /// + /// Spec v0.5.0 + pub fn get_attestation_duties( + &self, + validator_index: usize, + spec: &ChainSpec, + ) -> Result<&Option, Error> { + let cache = self.cache(RelativeEpoch::Current, spec)?; + + Ok(cache + .attestation_duties + .get(validator_index) + .ok_or_else(|| Error::UnknownValidator)?) + } + + /// Return the combined effective balance of an array of validators. + /// + /// Spec v0.5.0 + pub fn get_total_balance( + &self, + validator_indices: &[usize], + spec: &ChainSpec, + ) -> Result { + validator_indices.iter().try_fold(0_u64, |acc, i| { + self.get_effective_balance(*i, spec) + .and_then(|bal| Ok(bal + acc)) + }) + } + /// Build an epoch cache, unless it is has already been built. pub fn build_epoch_cache( &mut self, @@ -311,633 +747,6 @@ impl BeaconState { pub fn drop_pubkey_cache(&mut self) { self.pubkey_cache = PubkeyCache::default() } - - /// If a validator pubkey exists in the validator registry, returns `Some(i)`, otherwise - /// returns `None`. - /// - /// Requires a fully up-to-date `pubkey_cache`, returns an error if this is not the case. - pub fn get_validator_index(&self, pubkey: &PublicKey) -> Result, Error> { - if self.pubkey_cache.len() == self.validator_registry.len() { - Ok(self.pubkey_cache.get(pubkey)) - } else { - Err(Error::PubkeyCacheIncomplete { - cache_len: self.pubkey_cache.len(), - registry_len: self.validator_registry.len(), - }) - } - } - - /// The epoch corresponding to `self.slot`. - /// - /// Spec v0.5.0 - pub fn current_epoch(&self, spec: &ChainSpec) -> Epoch { - self.slot.epoch(spec.slots_per_epoch) - } - - /// The epoch prior to `self.current_epoch()`. - /// - /// If the current epoch is the genesis epoch, the genesis_epoch is returned. - /// - /// Spec v0.5.0 - pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch { - self.current_epoch(&spec) - 1 - } - - /// The epoch following `self.current_epoch()`. - /// - /// Spec v0.5.0 - pub fn next_epoch(&self, spec: &ChainSpec) -> Epoch { - self.current_epoch(spec) + 1 - } - - /// Returns the active validator indices for the given epoch, assuming there is no validator - /// registry update in the next epoch. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.5.0 - pub fn get_active_validator_indices( - &self, - epoch: Epoch, - spec: &ChainSpec, - ) -> Result<&[usize], Error> { - // If the slot is in the next epoch, assume there was no validator registry update. - let relative_epoch = - match RelativeEpoch::from_epoch(self.slot.epoch(spec.slots_per_epoch), epoch) { - Err(RelativeEpochError::AmbiguiousNextEpoch) => { - Ok(RelativeEpoch::NextWithoutRegistryChange) - } - e => e, - }?; - - let cache = self.cache(relative_epoch, spec)?; - - Ok(&cache.active_validator_indices) - } - - /// Returns the crosslink committees for some slot. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.5.0 - pub fn get_crosslink_committees_at_slot( - &self, - slot: Slot, - spec: &ChainSpec, - ) -> Result<&Vec, Error> { - // If the slot is in the next epoch, assume there was no validator registry update. - let relative_epoch = match RelativeEpoch::from_slot(self.slot, slot, spec) { - Err(RelativeEpochError::AmbiguiousNextEpoch) => { - Ok(RelativeEpoch::NextWithoutRegistryChange) - } - e => e, - }?; - - let cache = self.cache(relative_epoch, spec)?; - - Ok(cache - .get_crosslink_committees_at_slot(slot, spec) - .ok_or_else(|| Error::SlotOutOfBounds)?) - } - - /// Returns the crosslink committees for some shard in an epoch. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.4.0 - pub fn get_crosslink_committee_for_shard( - &self, - epoch: Epoch, - shard: Shard, - spec: &ChainSpec, - ) -> Result<&CrosslinkCommittee, Error> { - // If the slot is in the next epoch, assume there was no validator registry update. - let relative_epoch = match RelativeEpoch::from_epoch(self.current_epoch(spec), epoch) { - Err(RelativeEpochError::AmbiguiousNextEpoch) => { - Ok(RelativeEpoch::NextWithoutRegistryChange) - } - e => e, - }?; - - let cache = self.cache(relative_epoch, spec)?; - - Ok(cache - .get_crosslink_committee_for_shard(shard, spec) - .ok_or_else(|| Error::NoCommitteeForShard)?) - } - - /// Safely obtains the index for latest block roots, given some `slot`. - /// - /// Spec v0.5.0 - fn get_latest_block_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { - if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { - let i = slot.as_usize() % spec.slots_per_historical_root; - if i >= self.latest_block_roots.len() { - Err(Error::InsufficientStateRoots) - } else { - Ok(i) - } - } else { - Err(BeaconStateError::SlotOutOfBounds) - } - } - - /// Return the block root at a recent `slot`. - /// - /// Spec v0.5.0 - pub fn get_block_root( - &self, - slot: Slot, - spec: &ChainSpec, - ) -> Result<&Hash256, BeaconStateError> { - let i = self.get_latest_block_roots_index(slot, spec)?; - Ok(&self.latest_block_roots[i]) - } - - /// Sets the block root for some given slot. - /// - /// Spec v0.5.0 - pub fn set_block_root( - &mut self, - slot: Slot, - block_root: Hash256, - spec: &ChainSpec, - ) -> Result<(), BeaconStateError> { - let i = self.get_latest_block_roots_index(slot, spec)?; - Ok(self.latest_block_roots[i] = block_root) - } - - /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. - /// - /// # Errors: - /// - /// See `Self::get_randao_mix`. - /// - /// Spec v0.5.0 - pub fn update_randao_mix( - &mut self, - epoch: Epoch, - signature: &Signature, - spec: &ChainSpec, - ) -> Result<(), Error> { - let i = epoch.as_usize() % spec.latest_randao_mixes_length; - - let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature))); - - self.latest_randao_mixes[i] = *self.get_randao_mix(epoch, spec)? ^ signature_hash; - - Ok(()) - } - - /// Return the randao mix at a recent ``epoch``. - /// - /// # Errors: - /// - `InsufficientRandaoMixes` if `self.latest_randao_mixes` is shorter than - /// `spec.latest_randao_mixes_length`. - /// - `EpochOutOfBounds` if the state no longer stores randao mixes for the given `epoch`. - /// - /// Spec v0.5.0 - pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Result<&Hash256, Error> { - let current_epoch = self.current_epoch(spec); - - if (current_epoch - (spec.latest_randao_mixes_length as u64) < epoch) - & (epoch <= current_epoch) - { - self.latest_randao_mixes - .get(epoch.as_usize() % spec.latest_randao_mixes_length) - .ok_or_else(|| Error::InsufficientRandaoMixes) - } else { - Err(Error::EpochOutOfBounds) - } - } - - /// Return the index root at a recent `epoch`. - /// - /// Spec v0.4.0 - pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Option { - let current_epoch = self.current_epoch(spec); - - if (current_epoch - spec.latest_active_index_roots_length as u64 - + spec.activation_exit_delay - < epoch) - & (epoch <= current_epoch + spec.activation_exit_delay) - { - Some( - self.latest_active_index_roots - [epoch.as_usize() % spec.latest_active_index_roots_length], - ) - } else { - None - } - } - - /// Safely obtains the index for latest state roots, given some `slot`. - /// - /// Spec v0.5.0 - fn get_latest_state_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { - if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { - let i = slot.as_usize() % spec.slots_per_historical_root; - if i >= self.latest_state_roots.len() { - Err(Error::InsufficientStateRoots) - } else { - Ok(i) - } - } else { - Err(BeaconStateError::SlotOutOfBounds) - } - } - - /// Gets the state root for some slot. - /// - /// Spec v0.5.0 - pub fn get_state_root(&mut self, slot: Slot, spec: &ChainSpec) -> Result<&Hash256, Error> { - let i = self.get_latest_state_roots_index(slot, spec)?; - Ok(&self.latest_state_roots[i]) - } - - /// Sets the latest state root for slot. - /// - /// Spec v0.5.0 - pub fn set_state_root( - &mut self, - slot: Slot, - state_root: Hash256, - spec: &ChainSpec, - ) -> Result<(), Error> { - let i = self.get_latest_state_roots_index(slot, spec)?; - Ok(self.latest_state_roots[i] = state_root) - } - - /// Generate a seed for the given `epoch`. - /// - /// Spec v0.4.0 - pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { - let mut input = self - .get_randao_mix(epoch - spec.min_seed_lookahead, spec)? - .as_bytes() - .to_vec(); - - input.append( - &mut self - .get_active_index_root(epoch, spec) - .ok_or_else(|| Error::InsufficientIndexRoots)? - .as_bytes() - .to_vec(), - ); - - input.append(&mut int_to_bytes32(epoch.as_u64())); - - Ok(Hash256::from_slice(&hash(&input[..])[..])) - } - - /// Returns the beacon proposer index for the `slot`. - /// - /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. - /// - /// Spec v0.5.0 - pub fn get_beacon_proposer_index( - &self, - slot: Slot, - relative_epoch: RelativeEpoch, - spec: &ChainSpec, - ) -> Result { - let cache = self.cache(relative_epoch, spec)?; - - let committees = cache - .get_crosslink_committees_at_slot(slot, spec) - .ok_or_else(|| Error::SlotOutOfBounds)?; - - let epoch = slot.epoch(spec.slots_per_epoch); - - committees - .first() - .ok_or(Error::UnableToDetermineProducer) - .and_then(|first| { - let index = epoch - .as_usize() - .checked_rem(first.committee.len()) - .ok_or(Error::UnableToDetermineProducer)?; - Ok(first.committee[index]) - }) - } - - /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. - /// - /// Spec v0.4.0 - pub fn get_effective_balance( - &self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result { - let balance = self - .validator_balances - .get(validator_index) - .ok_or_else(|| Error::UnknownValidator)?; - Ok(std::cmp::min(*balance, spec.max_deposit_amount)) - } - - /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. - /// - /// Spec v0.4.0 - pub fn get_delayed_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { - epoch + 1 + spec.activation_exit_delay - } - - /// Activate the validator of the given ``index``. - /// - /// Spec v0.5.0 - pub fn activate_validator( - &mut self, - validator_index: usize, - is_genesis: bool, - spec: &ChainSpec, - ) { - let current_epoch = self.current_epoch(spec); - - self.validator_registry[validator_index].activation_epoch = if is_genesis { - spec.genesis_epoch - } else { - self.get_delayed_activation_exit_epoch(current_epoch, spec) - } - } - - /// Initiate an exit for the validator of the given `index`. - /// - /// Spec v0.5.0 - pub fn initiate_validator_exit(&mut self, validator_index: usize) { - self.validator_registry[validator_index].initiated_exit = true; - } - - /// Exit the validator of the given `index`. - /// - /// Spec v0.4.0 - fn exit_validator(&mut self, validator_index: usize, spec: &ChainSpec) { - let current_epoch = self.current_epoch(spec); - let delayed_epoch = self.get_delayed_activation_exit_epoch(current_epoch, spec); - - if self.validator_registry[validator_index].exit_epoch <= delayed_epoch { - return; - } - - self.validator_registry[validator_index].exit_epoch = delayed_epoch; - } - - /// Slash the validator with index ``index``. - /// - /// Spec v0.5.0 - pub fn slash_validator( - &mut self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result<(), Error> { - let current_epoch = self.current_epoch(spec); - - let validator = &self - .validator_registry - .get(validator_index) - .ok_or_else(|| Error::UnknownValidator)?; - let effective_balance = self.get_effective_balance(validator_index, spec)?; - - // A validator that is withdrawn cannot be slashed. - // - // This constraint will be lifted in Phase 0. - if self.slot - >= validator - .withdrawable_epoch - .start_slot(spec.slots_per_epoch) - { - return Err(Error::ValidatorIsWithdrawable); - } - - self.exit_validator(validator_index, spec); - - self.increment_current_epoch_slashed_balances(effective_balance, spec)?; - - let whistleblower_index = - self.get_beacon_proposer_index(self.slot, RelativeEpoch::Current, spec)?; - let whistleblower_reward = effective_balance / spec.whistleblower_reward_quotient; - - safe_add_assign!( - self.validator_balances[whistleblower_index as usize], - whistleblower_reward - ); - safe_sub_assign!( - self.validator_balances[validator_index], - whistleblower_reward - ); - - self.validator_registry[validator_index].slashed = true; - - self.validator_registry[validator_index].withdrawable_epoch = - current_epoch + Epoch::from(spec.latest_slashed_exit_length); - - Ok(()) - } - - /// Increment `self.latest_slashed_balances` with a slashing from the current epoch. - /// - /// Spec v0.5.0. - fn increment_current_epoch_slashed_balances( - &mut self, - increment: u64, - spec: &ChainSpec, - ) -> Result<(), Error> { - let current_epoch = self.current_epoch(spec); - - let slashed_balances_index = current_epoch.as_usize() % spec.latest_slashed_exit_length; - if slashed_balances_index >= self.latest_slashed_balances.len() { - return Err(Error::InsufficientSlashedBalances); - } - - self.latest_slashed_balances[slashed_balances_index] += increment; - - Ok(()) - } - - /// Initiate an exit for the validator of the given `index`. - /// - /// Spec v0.4.0 - pub fn prepare_validator_for_withdrawal(&mut self, validator_index: usize, spec: &ChainSpec) { - //TODO: we're not ANDing here, we're setting. Potentially wrong. - self.validator_registry[validator_index].withdrawable_epoch = - self.current_epoch(spec) + spec.min_validator_withdrawability_delay; - } - - /// Returns the `slot`, `shard` and `committee_index` for which a validator must produce an - /// attestation. - /// - /// Only reads the current epoch. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.4.0 - pub fn get_attestation_duties( - &self, - validator_index: usize, - spec: &ChainSpec, - ) -> Result<&Option, Error> { - let cache = self.cache(RelativeEpoch::Current, spec)?; - - Ok(cache - .attestation_duties - .get(validator_index) - .ok_or_else(|| Error::UnknownValidator)?) - } - - /// Process slashings. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.4.0 - pub fn process_slashings(&mut self, spec: &ChainSpec) -> Result<(), Error> { - let current_epoch = self.current_epoch(spec); - let active_validator_indices = self.get_active_validator_indices(current_epoch, spec)?; - let total_balance = self.get_total_balance(&active_validator_indices[..], spec)?; - - for (index, validator) in self.validator_registry.iter().enumerate() { - if validator.slashed - && (current_epoch - == validator.withdrawable_epoch - - Epoch::from(spec.latest_slashed_exit_length / 2)) - { - let epoch_index: usize = current_epoch.as_usize() % spec.latest_slashed_exit_length; - - let total_at_start = self.latest_slashed_balances - [(epoch_index + 1) % spec.latest_slashed_exit_length]; - let total_at_end = self.latest_slashed_balances[epoch_index]; - let total_penalities = total_at_end.saturating_sub(total_at_start); - - let effective_balance = self.get_effective_balance(index, spec)?; - let penalty = std::cmp::max( - effective_balance * std::cmp::min(total_penalities * 3, total_balance) - / total_balance, - effective_balance / spec.min_penalty_quotient, - ); - - safe_sub_assign!(self.validator_balances[index], penalty); - } - } - - Ok(()) - } - - /// Process the exit queue. - /// - /// Spec v0.4.0 - pub fn process_exit_queue(&mut self, spec: &ChainSpec) { - let current_epoch = self.current_epoch(spec); - - let eligible = |index: usize| { - let validator = &self.validator_registry[index]; - - if validator.withdrawable_epoch != spec.far_future_epoch { - false - } else { - current_epoch >= validator.exit_epoch + spec.min_validator_withdrawability_delay - } - }; - - let mut eligable_indices: Vec = (0..self.validator_registry.len()) - .filter(|i| eligible(*i)) - .collect(); - eligable_indices.sort_by_key(|i| self.validator_registry[*i].exit_epoch); - - for (withdrawn_so_far, index) in eligable_indices.iter().enumerate() { - if withdrawn_so_far as u64 >= spec.max_exit_dequeues_per_epoch { - break; - } - self.prepare_validator_for_withdrawal(*index, spec); - } - } - - /// Update validator registry, activating/exiting validators if possible. - /// - /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. - /// - /// Spec v0.4.0 - pub fn update_validator_registry(&mut self, spec: &ChainSpec) -> Result<(), Error> { - let current_epoch = self.current_epoch(spec); - let active_validator_indices = self.get_active_validator_indices(current_epoch, spec)?; - let total_balance = self.get_total_balance(&active_validator_indices[..], spec)?; - - let max_balance_churn = std::cmp::max( - spec.max_deposit_amount, - total_balance / (2 * spec.max_balance_churn_quotient), - ); - - let mut balance_churn = 0; - for index in 0..self.validator_registry.len() { - let validator = &self.validator_registry[index]; - - if (validator.activation_epoch == spec.far_future_epoch) - & (self.validator_balances[index] == spec.max_deposit_amount) - { - balance_churn += self.get_effective_balance(index, spec)?; - if balance_churn > max_balance_churn { - break; - } - self.activate_validator(index, false, spec); - } - } - - let mut balance_churn = 0; - for index in 0..self.validator_registry.len() { - let validator = &self.validator_registry[index]; - - if (validator.exit_epoch == spec.far_future_epoch) & (validator.initiated_exit) { - balance_churn += self.get_effective_balance(index, spec)?; - if balance_churn > max_balance_churn { - break; - } - - self.exit_validator(index, spec); - } - } - - self.validator_registry_update_epoch = current_epoch; - - Ok(()) - } - - /// Iterate through the validator registry and eject active validators with balance below - /// ``EJECTION_BALANCE``. - /// - /// Spec v0.5.0 - pub fn process_ejections(&mut self, spec: &ChainSpec) -> Result<(), Error> { - // There is an awkward double (triple?) loop here because we can't loop across the borrowed - // active validator indices and mutate state in the one loop. - let exitable: Vec = self - .get_active_validator_indices(self.current_epoch(spec), spec)? - .iter() - .filter_map(|&i| { - if self.validator_balances[i as usize] < spec.ejection_balance { - Some(i) - } else { - None - } - }) - .collect(); - - for validator_index in exitable { - self.exit_validator(validator_index, spec) - } - - Ok(()) - } - - /// Return the combined effective balance of an array of validators. - /// - /// Spec v0.5.0 - pub fn get_total_balance( - &self, - validator_indices: &[usize], - spec: &ChainSpec, - ) -> Result { - validator_indices.iter().try_fold(0_u64, |acc, i| { - self.get_effective_balance(*i, spec) - .and_then(|bal| Ok(bal + acc)) - }) - } } impl From for Error { From be712f5b05d4171957e1554633ca6f4ee6719a16 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 18 Mar 2019 23:04:17 +1100 Subject: [PATCH 116/154] Add network id to chainspec --- eth2/types/src/chain_spec.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 1607b85aa..0c6caaeee 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -118,6 +118,7 @@ pub struct ChainSpec { * */ pub boot_nodes: Vec, + pub network_id: u8, } impl ChainSpec { @@ -254,6 +255,7 @@ impl ChainSpec { * Boot nodes */ boot_nodes: vec![], + network_id: 1, // foundation network id } } @@ -270,6 +272,7 @@ impl ChainSpec { Self { boot_nodes, + network_id: 2, // lighthouse testnet network id ..ChainSpec::few_validators() } } From 0625bb6b03ecaed807db54208d1c6749ceecc52d Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 18 Mar 2019 23:18:25 +1100 Subject: [PATCH 117/154] Add network channel into message handler --- beacon_node/network/src/message_handler.rs | 16 +++++++-- beacon_node/network/src/messages.rs | 11 ++----- beacon_node/network/src/service.rs | 38 +++++++++++++++------- 3 files changed, 44 insertions(+), 21 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index b904993bb..02234f326 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,6 +1,7 @@ use crate::beacon_chain::BeaconChain; use crate::error; use crate::messages::NodeMessage; +use crate::service::NetworkMessage; use crossbeam_channel::{unbounded as channel, Sender}; use futures::future; use futures::prelude::*; @@ -22,6 +23,8 @@ pub struct MessageHandler { chain: Arc, /// The syncing framework. sync: SimpleSync, + /// The network channel to relay messages to the Network service. + network_send: crossbeam_channel::Sender, /// A mapping of peers we have sent a HELLO rpc request to. hello_requests: HashMap, /// The `MessageHandler` logger. @@ -45,6 +48,7 @@ impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn new( beacon_chain: Arc, + network_send: crossbeam_channel::Sender, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { @@ -62,6 +66,7 @@ impl MessageHandler { let mut handler = MessageHandler { chain: beacon_chain.clone(), sync, + network_send, hello_requests: HashMap::new(), log: log.clone(), }; @@ -81,12 +86,19 @@ impl MessageHandler { fn handle_message(&mut self, message: HandlerMessage) { match message { - HandlerMessage::PeerDialed(peer_id) => self.send_hello(peer_id), + HandlerMessage::PeerDialed(peer_id) => { + // register RPC request + self.hello_requests.insert(peer_id.clone(), Instant::now()); + self.send_hello(peer_id); + } //TODO: Handle all messages _ => {} } } /// Sends a HELLO RPC request to a newly connected peer. - fn send_hello(&self, peer_id: PeerId) {} + fn send_hello(&self, peer_id: PeerId) { + // send the hello request to the network + //sync.hello() + } } diff --git a/beacon_node/network/src/messages.rs b/beacon_node/network/src/messages.rs index 930c90b3e..6a69cbb87 100644 --- a/beacon_node/network/src/messages.rs +++ b/beacon_node/network/src/messages.rs @@ -2,7 +2,10 @@ use libp2p::PeerId; use libp2p::{HelloMessage, RPCEvent}; use types::{Hash256, Slot}; +//TODO: This module can be entirely replaced in the RPC rewrite + /// Messages between nodes across the network. +//TODO: Remove this in the RPC rewrite #[derive(Debug, Clone)] pub enum NodeMessage { RPC(RPCEvent), @@ -10,11 +13,3 @@ pub enum NodeMessage { // TODO: only for testing - remove Message(String), } - -/// Types of messages that the network service can receive. -#[derive(Debug, Clone)] -pub enum NetworkMessage { - /// Send a message to libp2p service. - //TODO: Define typing for messages across the wire - Send(PeerId, NodeMessage), -} diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 21f948a71..7ad3bdb3e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,7 +1,7 @@ use crate::beacon_chain::BeaconChain; use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; -use crate::messages::{NetworkMessage, NodeMessage}; +use crate::messages::NodeMessage; use crate::NetworkConfig; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use futures::prelude::*; @@ -29,18 +29,29 @@ impl Service { executor: &TaskExecutor, log: slog::Logger, ) -> error::Result<(Arc, Sender)> { + // build the network channel + let (network_send, network_recv) = channel::(); // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = - MessageHandler::new(beacon_chain, executor, message_handler_log)?; + let message_handler_send = MessageHandler::new( + beacon_chain, + network_send.clone(), + executor, + message_handler_log, + )?; // launch libp2p service let libp2p_log = log.new(o!("Service" => "Libp2p")); let libp2p_service = LibP2PService::new(config.clone(), libp2p_log)?; // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. - let (network_send, libp2p_exit) = - spawn_service(libp2p_service, message_handler_send, executor, log)?; + let libp2p_exit = spawn_service( + libp2p_service, + network_recv, + message_handler_send, + executor, + log, + )?; let network = Service { libp2p_exit, network_send: network_send.clone(), @@ -59,15 +70,12 @@ impl Service { fn spawn_service( libp2p_service: LibP2PService, + network_recv: crossbeam_channel::Receiver, message_handler_send: crossbeam_channel::Sender, executor: &TaskExecutor, log: slog::Logger, -) -> error::Result<( - crossbeam_channel::Sender, - oneshot::Sender<()>, -)> { +) -> error::Result> { let (network_exit, exit_rx) = oneshot::channel(); - let (network_send, network_recv) = channel::(); // spawn on the current executor executor.spawn( @@ -85,7 +93,7 @@ fn spawn_service( }), ); - Ok((network_send, network_exit)) + Ok(network_exit) } fn network_service( @@ -148,3 +156,11 @@ fn network_service( Ok(Async::NotReady) }) } + +/// Types of messages that the network service can receive. +#[derive(Debug, Clone)] +pub enum NetworkMessage { + /// Send a message to libp2p service. + //TODO: Define typing for messages across the wire + Send(PeerId, NodeMessage), +} From 8ec0688cb93c71cede5072cef007818aebc4df75 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 18 Mar 2019 23:34:44 +1100 Subject: [PATCH 118/154] Implements RPC call functionality --- beacon_node/libp2p/src/behaviour.rs | 8 +++--- beacon_node/libp2p/src/rpc/mod.rs | 11 +++----- beacon_node/network/src/service.rs | 41 ++++++++++++++++++----------- 3 files changed, 34 insertions(+), 26 deletions(-) diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs index 604b84c8f..f0a89027b 100644 --- a/beacon_node/libp2p/src/behaviour.rs +++ b/beacon_node/libp2p/src/behaviour.rs @@ -72,14 +72,16 @@ impl Behaviour { } } +/// Implements the combined behaviour for the libp2p service. impl Behaviour { + /// Subscribes to a gossipsub topic. pub fn subscribe(&mut self, topic: Topic) -> bool { self.gossipsub.subscribe(topic) } - pub fn send_message(&self, message: String) { - // TODO: Encode and send via gossipsub - + /// Sends an RPC Request/Response via the RPC protocol. + pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + self.serenity_rpc.send_rpc(peer_id, rpc_event); } } diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index d40e53935..907e95763 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -13,7 +13,7 @@ use libp2p::core::swarm::{ use libp2p::{Multiaddr, PeerId}; pub use methods::{HelloMessage, RPCMethod, RPCRequest, RPCResponse}; pub use protocol::{RPCEvent, RPCProtocol}; -use slog::{debug, o, Logger}; +use slog::{debug, o}; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; @@ -40,15 +40,10 @@ impl Rpc { } /// Submits and RPC request. - pub fn send_request(&mut self, peer_id: PeerId, id: u64, method_id: u16, body: RPCRequest) { - let request = RPCEvent::Request { - id, - method_id, - body, - }; + pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { self.events.push(NetworkBehaviourAction::SendEvent { peer_id, - event: request, + event: rpc_event, }); } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 7ad3bdb3e..4cb1038d1 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -7,9 +7,10 @@ use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use futures::prelude::*; use futures::sync::oneshot; use futures::Stream; +use libp2p::RPCEvent; use libp2p::Service as LibP2PService; use libp2p::{Libp2pEvent, PeerId}; -use slog::{debug, info, o}; +use slog::{debug, info, o, trace}; use std::sync::Arc; use tokio::runtime::TaskExecutor; @@ -63,8 +64,10 @@ impl Service { // TODO: Testing only pub fn send_message(&self, message: String) { let node_message = NodeMessage::Message(message); - self.network_send - .send(NetworkMessage::Send(PeerId::random(), node_message)); + self.network_send.send(NetworkMessage::Send( + PeerId::random(), + OutgoingMessage::NotifierTest, + )); } } @@ -113,13 +116,13 @@ fn network_service( ); message_handler_send .send(HandlerMessage::RPC(rpc_event)) - .map_err(|_| "failed to send rpc to handler"); + .map_err(|_| "failed to send rpc to handler")?; } Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id)))) => { debug!(libp2p_service.log, "Peer Dialed: {:?}", peer_id); message_handler_send .send(HandlerMessage::PeerDialed(peer_id)) - .map_err(|_| "failed to send rpc to handler"); + .map_err(|_| "failed to send rpc to handler")?; } Ok(Async::Ready(Some(Libp2pEvent::Message(m)))) => debug!( libp2p_service.log, @@ -133,24 +136,23 @@ fn network_service( loop { match network_recv.try_recv() { // TODO: Testing message - remove - Ok(NetworkMessage::Send(_peer_id, node_message)) => { - match node_message { - NodeMessage::Message(m) => { - debug!(log, "Message received via network channel: {:?}", m); + Ok(NetworkMessage::Send(peer_id, outgoing_message)) => { + match outgoing_message { + OutgoingMessage::RPC(rpc_event) => { + trace!(log, "Sending RPC Event: {:?}", rpc_event); //TODO: Make swarm private //TODO: Implement correct peer id topic message handling - libp2p_service.swarm.send_message(m); + libp2p_service.swarm.send_rpc(peer_id, rpc_event); + } + OutgoingMessage::NotifierTest => { + debug!(log, "Received message from notifier"); } - //TODO: Handle all NodeMessage types - _ => break, }; } Err(TryRecvError::Empty) => break, Err(TryRecvError::Disconnected) => { return Err(libp2p::error::Error::from("Network channel disconnected")); } - // TODO: Implement all NetworkMessage - _ => break, } } Ok(Async::NotReady) @@ -162,5 +164,14 @@ fn network_service( pub enum NetworkMessage { /// Send a message to libp2p service. //TODO: Define typing for messages across the wire - Send(PeerId, NodeMessage), + Send(PeerId, OutgoingMessage), +} + +/// Type of outgoing messages that can be sent through the network service. +#[derive(Debug, Clone)] +pub enum OutgoingMessage { + /// Send an RPC request/response. + RPC(RPCEvent), + //TODO: Remove + NotifierTest, } From 41abdb7599168ba760d78e4c36b76ea8c991392b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 00:05:06 +1100 Subject: [PATCH 119/154] Remove sync crate, move into network crate --- Cargo.toml | 1 - beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/client/Cargo.toml | 1 - beacon_node/network/Cargo.toml | 1 - beacon_node/network/src/beacon_chain.rs | 8 +++++++- beacon_node/network/src/lib.rs | 1 + beacon_node/network/src/message_handler.rs | 11 ++++------- .../{sync/src/lib.rs => network/src/sync/mod.rs} | 0 .../src => network/src/sync}/simple_sync.rs | 16 +++++++++++----- beacon_node/sync/Cargo.toml | 9 --------- 10 files changed, 24 insertions(+), 25 deletions(-) rename beacon_node/{sync/src/lib.rs => network/src/sync/mod.rs} (100%) rename beacon_node/{sync/src => network/src/sync}/simple_sync.rs (54%) delete mode 100644 beacon_node/sync/Cargo.toml diff --git a/Cargo.toml b/Cargo.toml index 89158542e..d34f6fd30 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,6 @@ members = [ "beacon_node/client", "beacon_node/network", "beacon_node/rpc", - "beacon_node/sync", "beacon_node/version", "beacon_node/beacon_chain", "beacon_node/beacon_chain/test_harness", diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 5acac6ff2..2137c0edf 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -11,3 +11,4 @@ pub use db; pub use fork_choice; pub use parking_lot; pub use slot_clock; +pub use types; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8914a9e7e..11453e4b8 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,7 +7,6 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } -sync = { path = "../sync" } db = { path = "../db" } fork_choice = { path = "../../eth2/fork_choice" } types = { path = "../../eth2/types" } diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index f1a7ed258..8b87a9d50 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -9,7 +9,6 @@ beacon_chain = { path = "../beacon_chain" } libp2p = { path = "../libp2p" } version = { path = "../version" } types = { path = "../../eth2/types" } -sync = { path = "../sync" } slog = "2.4.1" futures = "0.1.25" error-chain = "0.12.0" diff --git a/beacon_node/network/src/beacon_chain.rs b/beacon_node/network/src/beacon_chain.rs index 5e0857f47..5e9857c09 100644 --- a/beacon_node/network/src/beacon_chain.rs +++ b/beacon_node/network/src/beacon_chain.rs @@ -1,11 +1,13 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ db::ClientDB, fork_choice::ForkChoice, parking_lot::RwLockReadGuard, slot_clock::SlotClock, - CheckPoint, + types::ChainSpec, CheckPoint, }; /// The network's API to the beacon chain. pub trait BeaconChain: Send + Sync { + fn get_spec(&self) -> &ChainSpec; + fn head(&self) -> RwLockReadGuard; fn finalized_head(&self) -> RwLockReadGuard; @@ -17,6 +19,10 @@ where U: SlotClock, F: ForkChoice, { + fn get_spec(&self) -> &ChainSpec { + &self.spec + } + fn head(&self) -> RwLockReadGuard { self.head() } diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index dca83bb77..c1840f592 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -4,6 +4,7 @@ pub mod error; mod message_handler; mod messages; mod service; +pub mod sync; pub use libp2p::NetworkConfig; pub use messages::NodeMessage; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 02234f326..7e5a74a10 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -2,16 +2,15 @@ use crate::beacon_chain::BeaconChain; use crate::error; use crate::messages::NodeMessage; use crate::service::NetworkMessage; +use crate::sync::SimpleSync; use crossbeam_channel::{unbounded as channel, Sender}; use futures::future; use futures::prelude::*; -use libp2p::rpc; use libp2p::{PeerId, RPCEvent}; use slog::debug; use std::collections::HashMap; use std::sync::Arc; use std::time::{Duration, Instant}; -use sync::SimpleSync; use types::Hash256; /// Timeout for establishing a HELLO handshake. @@ -57,13 +56,11 @@ impl MessageHandler { let (handler_send, handler_recv) = channel(); // Initialise sync and begin processing in thread - //TODO: Load genesis from BeaconChain - //TODO: Initialise beacon chain - let temp_genesis = Hash256::zero(); - // generate the Message handler - let sync = SimpleSync::new(temp_genesis); + let sync = SimpleSync::new(beacon_chain.clone()); + let mut handler = MessageHandler { + // TODO: The handler may not need a chain, perhaps only sync? chain: beacon_chain.clone(), sync, network_send, diff --git a/beacon_node/sync/src/lib.rs b/beacon_node/network/src/sync/mod.rs similarity index 100% rename from beacon_node/sync/src/lib.rs rename to beacon_node/network/src/sync/mod.rs diff --git a/beacon_node/sync/src/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs similarity index 54% rename from beacon_node/sync/src/simple_sync.rs rename to beacon_node/network/src/sync/simple_sync.rs index 01a6a1adf..4034c63c9 100644 --- a/beacon_node/sync/src/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,11 +1,15 @@ +use crate::beacon_chain::BeaconChain; use libp2p::PeerId; use std::collections::HashMap; -use types::{Hash256, Slot}; +use std::sync::Arc; +use types::{Epoch, Hash256, Slot}; /// Keeps track of syncing information for known connected peers. pub struct PeerSyncInfo { + latest_finalized_root: Hash256, + latest_finalized_epoch: Epoch, + best_root: Hash256, best_slot: Slot, - best_slot_hash: Hash256, } /// The current syncing state. @@ -16,18 +20,20 @@ pub enum SyncState { } /// Simple Syncing protocol. +//TODO: Decide for HELLO messages whether its better to keep current in RAM or build on the fly +//when asked. pub struct SimpleSync { - genesis_hash: Hash256, known_peers: HashMap, state: SyncState, + network_id: u8, } impl SimpleSync { - pub fn new(genesis_hash: Hash256) -> Self { + pub fn new(beacon_chain: Arc) -> Self { SimpleSync { - genesis_hash, known_peers: HashMap::new(), state: SyncState::Idle, + network_id: beacon_chain.get_spec().network_id, } } } diff --git a/beacon_node/sync/Cargo.toml b/beacon_node/sync/Cargo.toml deleted file mode 100644 index a4ebe3eed..000000000 --- a/beacon_node/sync/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "sync" -version = "0.1.0" -authors = ["Age Manning "] -edition = "2018" - -[dependencies] -types = { path = "../../eth2/types" } -libp2p = { path = "../libp2p" } From dfdec78a7a32177e42c10255e8ba5d3efb1fa55d Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 00:26:15 +1100 Subject: [PATCH 120/154] Implements hello generation in sync module --- beacon_node/network/src/beacon_chain.rs | 14 ++++++++++++-- beacon_node/network/src/sync/simple_sync.rs | 20 ++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/beacon_chain.rs b/beacon_node/network/src/beacon_chain.rs index 5e9857c09..91628cc7e 100644 --- a/beacon_node/network/src/beacon_chain.rs +++ b/beacon_node/network/src/beacon_chain.rs @@ -1,13 +1,19 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ - db::ClientDB, fork_choice::ForkChoice, parking_lot::RwLockReadGuard, slot_clock::SlotClock, - types::ChainSpec, CheckPoint, + db::ClientDB, + fork_choice::ForkChoice, + parking_lot::RwLockReadGuard, + slot_clock::SlotClock, + types::{BeaconState, ChainSpec}, + CheckPoint, }; /// The network's API to the beacon chain. pub trait BeaconChain: Send + Sync { fn get_spec(&self) -> &ChainSpec; + fn get_state(&self) -> RwLockReadGuard; + fn head(&self) -> RwLockReadGuard; fn finalized_head(&self) -> RwLockReadGuard; @@ -23,6 +29,10 @@ where &self.spec } + fn get_state(&self) -> RwLockReadGuard { + self.state.read() + } + fn head(&self) -> RwLockReadGuard { self.head() } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 4034c63c9..336f225b2 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,4 +1,5 @@ use crate::beacon_chain::BeaconChain; +use libp2p::rpc::HelloMessage; use libp2p::PeerId; use std::collections::HashMap; use std::sync::Arc; @@ -23,8 +24,13 @@ pub enum SyncState { //TODO: Decide for HELLO messages whether its better to keep current in RAM or build on the fly //when asked. pub struct SimpleSync { + /// A reference to the underlying beacon chain. + chain: Arc, + /// A mapping of Peers to their respective PeerSyncInfo. known_peers: HashMap, + /// The current state of the syncing protocol. state: SyncState, + /// The network id, for quick HELLO RPC message lookup. network_id: u8, } @@ -34,6 +40,20 @@ impl SimpleSync { known_peers: HashMap::new(), state: SyncState::Idle, network_id: beacon_chain.get_spec().network_id, + chain: beacon_chain, + } + } + + /// Generates our current state in the form of a HELLO RPC message. + pub fn generate_hello(&self) -> HelloMessage { + let state = &self.chain.get_state(); + //TODO: Paul to verify the logic of these fields. + HelloMessage { + network_id: self.network_id, + latest_finalized_root: state.finalized_root.clone(), + latest_finalized_epoch: state.finalized_epoch, + best_root: state.latest_block_roots[0], // 0 or len of vec? + best_slot: state.slot, } } } From 37b8e9f39a50737bd7d74f984fc614d6bf6e5f49 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 19 Mar 2019 09:09:57 +1100 Subject: [PATCH 121/154] Move `get_active_validator_indices` to state --- eth2/fork_choice/src/bitwise_lmd_ghost.rs | 11 +- eth2/fork_choice/src/optimized_lmd_ghost.rs | 11 +- eth2/fork_choice/src/slow_lmd_ghost.rs | 10 +- eth2/fork_choice/tests/tests.rs | 3 +- .../state_processing/src/get_genesis_state.rs | 2 +- .../src/per_epoch_processing.rs | 21 +-- .../per_epoch_processing/process_ejections.rs | 2 +- .../per_epoch_processing/process_slashings.rs | 3 +- .../process_validator_registry.rs | 4 +- .../update_validator_registry.rs | 3 +- eth2/types/src/beacon_state.rs | 27 +-- .../src/beacon_state/epoch_cache/tests.rs | 9 +- eth2/types/src/beacon_state/pubkey_cache.rs | 11 +- eth2/types/src/chain_spec.rs | 2 +- eth2/types/src/lib.rs | 1 - eth2/types/src/transfer.rs | 2 +- eth2/types/src/validator_registry.rs | 174 ------------------ 17 files changed, 57 insertions(+), 239 deletions(-) delete mode 100644 eth2/types/src/validator_registry.rs diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index 9410fd203..8ae0251d2 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -10,10 +10,7 @@ use db::{ use log::{debug, trace}; use std::collections::HashMap; use std::sync::Arc; -use types::{ - validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot, - SlotHeight, -}; +use types::{BeaconBlock, ChainSpec, Hash256, Slot, SlotHeight}; //TODO: Pruning - Children //TODO: Handle Syncing @@ -93,10 +90,8 @@ where .get_deserialized(&state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; - let active_validator_indices = get_active_validator_indices( - ¤t_state.validator_registry[..], - block_slot.epoch(spec.slots_per_epoch), - ); + let active_validator_indices = + current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); for index in active_validator_indices { let balance = std::cmp::min( diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index e1b8914a6..ee2919e85 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -10,10 +10,7 @@ use log::{debug, trace}; use std::cmp::Ordering; use std::collections::HashMap; use std::sync::Arc; -use types::{ - validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot, - SlotHeight, -}; +use types::{BeaconBlock, ChainSpec, Hash256, Slot, SlotHeight}; //TODO: Pruning - Children //TODO: Handle Syncing @@ -93,10 +90,8 @@ where .get_deserialized(&state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; - let active_validator_indices = get_active_validator_indices( - ¤t_state.validator_registry[..], - block_slot.epoch(spec.slots_per_epoch), - ); + let active_validator_indices = + current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); for index in active_validator_indices { let balance = std::cmp::min( diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index af58aa7b8..25d137089 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -8,9 +8,7 @@ use db::{ use log::{debug, trace}; use std::collections::HashMap; use std::sync::Arc; -use types::{ - validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot, -}; +use types::{BeaconBlock, ChainSpec, Hash256, Slot}; //TODO: Pruning and syncing @@ -61,10 +59,8 @@ where .get_deserialized(&state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; - let active_validator_indices = get_active_validator_indices( - ¤t_state.validator_registry[..], - block_slot.epoch(spec.slots_per_epoch), - ); + let active_validator_indices = + current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); for index in active_validator_indices { let balance = std::cmp::min( diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 80fbbbe20..3ce63eeb7 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -242,8 +242,9 @@ fn setup_inital_state( let spec = ChainSpec::foundation(); - let state_builder = + let mut state_builder = TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec); + state_builder.build_caches(&spec).unwrap(); let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); diff --git a/eth2/state_processing/src/get_genesis_state.rs b/eth2/state_processing/src/get_genesis_state.rs index bfcf82216..7c4d4cafd 100644 --- a/eth2/state_processing/src/get_genesis_state.rs +++ b/eth2/state_processing/src/get_genesis_state.rs @@ -34,7 +34,7 @@ pub fn get_genesis_state( // Set all the active index roots to be the genesis active index root. let active_validator_indices = state - .get_active_validator_indices(spec.genesis_epoch, spec)? + .get_cached_active_validator_indices(RelativeEpoch::Current, spec)? .to_vec(); let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.hash_tree_root()); state.fill_active_index_roots_with(genesis_active_index_root, spec); diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 97a0e9987..8e03457d3 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -7,7 +7,7 @@ use process_validator_registry::process_validator_registry; use rayon::prelude::*; use ssz::TreeHash; use std::collections::HashMap; -use types::{validator_registry::get_active_validator_indices, *}; +use types::*; use validator_statuses::{TotalBalances, ValidatorStatuses}; use winning_root::{winning_root, WinningRoot}; @@ -70,16 +70,6 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result Ok(()) } -/// Returns a list of active validator indices for the state's current epoch. -/// -/// Spec v0.5.0 -pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) -> Vec { - get_active_validator_indices( - &state.validator_registry, - state.slot.epoch(spec.slots_per_epoch), - ) -} - /// Calculates various sets of attesters, including: /// /// - current epoch attesters @@ -454,11 +444,10 @@ pub fn update_active_tree_index_roots( ) -> Result<(), Error> { let next_epoch = state.next_epoch(spec); - let active_tree_root = get_active_validator_indices( - &state.validator_registry, - next_epoch + Epoch::from(spec.activation_exit_delay), - ) - .hash_tree_root(); + let active_tree_root = state + .get_active_validator_indices(next_epoch + Epoch::from(spec.activation_exit_delay)) + .to_vec() + .hash_tree_root(); state.set_active_index_root(next_epoch, Hash256::from_slice(&active_tree_root[..]), spec)?; diff --git a/eth2/state_processing/src/per_epoch_processing/process_ejections.rs b/eth2/state_processing/src/per_epoch_processing/process_ejections.rs index 27dd37479..a60d92187 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_ejections.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_ejections.rs @@ -9,7 +9,7 @@ pub fn process_ejections(state: &mut BeaconState, spec: &ChainSpec) -> Result<() // There is an awkward double (triple?) loop here because we can't loop across the borrowed // active validator indices and mutate state in the one loop. let exitable: Vec = state - .get_active_validator_indices(state.current_epoch(spec), spec)? + .get_cached_active_validator_indices(RelativeEpoch::Current, spec)? .iter() .filter_map(|&i| { if state.validator_balances[i as usize] < spec.ejection_balance { diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index b14a9ee37..19c1e519b 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -7,7 +7,8 @@ use types::{BeaconStateError as Error, *}; /// Spec v0.4.0 pub fn process_slashings(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); - let active_validator_indices = state.get_active_validator_indices(current_epoch, spec)?; + let active_validator_indices = + state.get_cached_active_validator_indices(RelativeEpoch::Current, spec)?; let total_balance = state.get_total_balance(&active_validator_indices[..], spec)?; for (index, validator) in state.validator_registry.iter().enumerate() { diff --git a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs index 2eb39711d..85d6c37f6 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs @@ -21,7 +21,7 @@ pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> state.current_shuffling_start_shard = (state.current_shuffling_start_shard + spec.get_epoch_committee_count( state - .get_active_validator_indices(current_epoch, spec)? + .get_cached_active_validator_indices(RelativeEpoch::Current, spec)? .len(), ) as u64) % spec.shard_count; @@ -53,7 +53,7 @@ pub fn should_update_validator_registry( } let num_active_validators = state - .get_active_validator_indices(state.current_epoch(spec), spec)? + .get_cached_active_validator_indices(RelativeEpoch::Current, spec)? .len(); let current_epoch_committee_count = spec.get_epoch_committee_count(num_active_validators); diff --git a/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs index 8b612c346..ecf05ce6f 100644 --- a/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs +++ b/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs @@ -8,7 +8,8 @@ use types::{BeaconStateError as Error, *}; /// Spec v0.4.0 pub fn update_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); - let active_validator_indices = state.get_active_validator_indices(current_epoch, spec)?; + let active_validator_indices = + state.get_cached_active_validator_indices(RelativeEpoch::Current, spec)?; let total_balance = state.get_total_balance(&active_validator_indices[..], spec)?; let max_balance_churn = std::cmp::max( diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 30f95c02c..22e7c6ecf 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,4 +1,4 @@ -use self::epoch_cache::{EpochCache, Error as EpochCacheError}; +use self::epoch_cache::{get_active_validator_indices, EpochCache, Error as EpochCacheError}; use crate::test_utils::TestRandom; use crate::*; use int_to_bytes::int_to_bytes32; @@ -234,28 +234,31 @@ impl BeaconState { /// Returns the active validator indices for the given epoch, assuming there is no validator /// registry update in the next epoch. /// + /// This uses the cache, so it saves an iteration over the validator registry, however it can + /// not return a result for any epoch before the previous epoch. + /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// /// Spec v0.5.0 - pub fn get_active_validator_indices( + pub fn get_cached_active_validator_indices( &self, - epoch: Epoch, + relative_epoch: RelativeEpoch, spec: &ChainSpec, ) -> Result<&[usize], Error> { - // If the slot is in the next epoch, assume there was no validator registry update. - let relative_epoch = - match RelativeEpoch::from_epoch(self.slot.epoch(spec.slots_per_epoch), epoch) { - Err(RelativeEpochError::AmbiguiousNextEpoch) => { - Ok(RelativeEpoch::NextWithoutRegistryChange) - } - e => e, - }?; - let cache = self.cache(relative_epoch, spec)?; Ok(&cache.active_validator_indices) } + /// Returns the active validator indices for the given epoch. + /// + /// Does not utilize the cache, performs a full iteration over the validator registry. + /// + /// Spec v0.5.0 + pub fn get_active_validator_indices(&self, epoch: Epoch) -> Vec { + get_active_validator_indices(&self.validator_registry, epoch) + } + /// Returns the crosslink committees for some slot. /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. diff --git a/eth2/types/src/beacon_state/epoch_cache/tests.rs b/eth2/types/src/beacon_state/epoch_cache/tests.rs index 10df635f2..5643776e2 100644 --- a/eth2/types/src/beacon_state/epoch_cache/tests.rs +++ b/eth2/types/src/beacon_state/epoch_cache/tests.rs @@ -7,15 +7,19 @@ use swap_or_not_shuffle::shuffle_list; fn do_sane_cache_test( state: BeaconState, epoch: Epoch, + relative_epoch: RelativeEpoch, validator_count: usize, expected_seed: Hash256, expected_shuffling_start: u64, spec: &ChainSpec, ) { let active_indices: Vec = (0..validator_count).collect(); + assert_eq!( &active_indices[..], - state.get_active_validator_indices(epoch, &spec).unwrap(), + state + .get_cached_active_validator_indices(relative_epoch, &spec) + .unwrap(), "Validator indices mismatch" ); @@ -101,6 +105,7 @@ fn builds_sane_current_epoch_cache() { do_sane_cache_test( state.clone(), state.current_epoch(&spec), + RelativeEpoch::Current, validator_count as usize, state.current_shuffling_seed, state.current_shuffling_start_shard, @@ -117,6 +122,7 @@ fn builds_sane_previous_epoch_cache() { do_sane_cache_test( state.clone(), state.previous_epoch(&spec), + RelativeEpoch::Previous, validator_count as usize, state.previous_shuffling_seed, state.previous_shuffling_start_shard, @@ -134,6 +140,7 @@ fn builds_sane_next_without_update_epoch_cache() { do_sane_cache_test( state.clone(), state.next_epoch(&spec), + RelativeEpoch::NextWithoutRegistryChange, validator_count as usize, state.current_shuffling_seed, state.current_shuffling_start_shard, diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs index 340bdb311..4632a2d9c 100644 --- a/eth2/types/src/beacon_state/pubkey_cache.rs +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -6,13 +6,17 @@ type ValidatorIndex = usize; #[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] pub struct PubkeyCache { + /// Maintain the number of keys added to the map. It is not sufficient to just use the HashMap + /// len, as it does not increase when duplicate keys are added. Duplicate keys are used during + /// testing. + len: usize, map: HashMap, } impl PubkeyCache { - /// Returns the number of validator indices already in the map. + /// Returns the number of validator indices added to the map so far. pub fn len(&self) -> ValidatorIndex { - self.map.len() + self.len } /// Inserts a validator index into the map. @@ -20,8 +24,9 @@ impl PubkeyCache { /// The added index must equal the number of validators already added to the map. This ensures /// that an index is never skipped. pub fn insert(&mut self, pubkey: PublicKey, index: ValidatorIndex) -> bool { - if index == self.map.len() { + if index == self.len { self.map.insert(pubkey, index); + self.len += 1; true } else { false diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index e9ade2c91..cfb88bcb8 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -117,7 +117,7 @@ pub struct ChainSpec { impl ChainSpec { /// Return the number of committees in one epoch. /// - /// Spec v0.4.0 + /// Spec v0.5.0 pub fn get_epoch_committee_count(&self, active_validator_count: usize) -> u64 { std::cmp::max( 1, diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 05f8254d5..30b0e4a13 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -34,7 +34,6 @@ pub mod relative_epoch; pub mod slot_epoch; pub mod slot_height; pub mod validator; -pub mod validator_registry; use ethereum_types::{H160, H256, U256}; use std::collections::HashMap; diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index 1c9968702..2570d7b3f 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -9,7 +9,7 @@ use test_random_derive::TestRandom; /// The data submitted to the deposit contract. /// -/// Spec v0.4.0 +/// Spec v0.5.0 #[derive( Debug, PartialEq, diff --git a/eth2/types/src/validator_registry.rs b/eth2/types/src/validator_registry.rs deleted file mode 100644 index db35ae993..000000000 --- a/eth2/types/src/validator_registry.rs +++ /dev/null @@ -1,174 +0,0 @@ -/// Contains logic to manipulate a `&[Validator]`. -/// For now, we avoid defining a newtype and just have flat functions here. -use super::validator::*; -use crate::Epoch; - -/// Given an indexed sequence of `validators`, return the indices corresponding to validators that are active at `epoch`. -/// -/// Spec v0.4.0 -pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { - let mut active = Vec::with_capacity(validators.len()); - - for (index, validator) in validators.iter().enumerate() { - if validator.is_active_at(epoch) { - active.push(index) - } - } - - active.shrink_to_fit(); - - active -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - - #[test] - fn can_get_empty_active_validator_indices() { - let mut rng = XorShiftRng::from_seed([42; 16]); - - let validators = vec![]; - let some_epoch = Epoch::random_for_test(&mut rng); - let indices = get_active_validator_indices(&validators, some_epoch); - assert_eq!(indices, vec![]); - } - - #[test] - fn can_get_no_active_validator_indices() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let mut validators = vec![]; - let count_validators = 10; - for _ in 0..count_validators { - validators.push(Validator::default()) - } - - let some_epoch = Epoch::random_for_test(&mut rng); - let indices = get_active_validator_indices(&validators, some_epoch); - assert_eq!(indices, vec![]); - } - - #[test] - fn can_get_all_active_validator_indices() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let count_validators = 10; - let some_epoch = Epoch::random_for_test(&mut rng); - - let mut validators = (0..count_validators) - .into_iter() - .map(|_| { - let mut validator = Validator::default(); - - let activation_offset = u64::random_for_test(&mut rng); - let exit_offset = u64::random_for_test(&mut rng); - - validator.activation_epoch = some_epoch - activation_offset; - validator.exit_epoch = some_epoch + exit_offset; - - validator - }) - .collect::>(); - - // test boundary condition by ensuring that at least one validator in the list just activated - if let Some(validator) = validators.get_mut(0) { - validator.activation_epoch = some_epoch; - } - - let indices = get_active_validator_indices(&validators, some_epoch); - assert_eq!( - indices, - (0..count_validators).into_iter().collect::>() - ); - } - - fn set_validators_to_default_entry_exit(validators: &mut [Validator]) { - for validator in validators.iter_mut() { - validator.activation_epoch = Epoch::max_value(); - validator.exit_epoch = Epoch::max_value(); - } - } - - // sets all `validators` to be active as of some epoch prior to `epoch`. returns the activation epoch. - fn set_validators_to_activated(validators: &mut [Validator], epoch: Epoch) -> Epoch { - let activation_epoch = epoch - 10; - for validator in validators.iter_mut() { - validator.activation_epoch = activation_epoch; - } - activation_epoch - } - - // sets all `validators` to be exited as of some epoch before `epoch`. - fn set_validators_to_exited( - validators: &mut [Validator], - epoch: Epoch, - activation_epoch: Epoch, - ) { - assert!(activation_epoch < epoch); - let mut exit_epoch = activation_epoch + 10; - while exit_epoch >= epoch { - exit_epoch -= 1; - } - assert!(activation_epoch < exit_epoch && exit_epoch < epoch); - - for validator in validators.iter_mut() { - validator.exit_epoch = exit_epoch; - } - } - - #[test] - fn can_get_some_active_validator_indices() { - let mut rng = XorShiftRng::from_seed([42; 16]); - const COUNT_PARTITIONS: usize = 3; - const COUNT_VALIDATORS: usize = 3 * COUNT_PARTITIONS; - let some_epoch: Epoch = Epoch::random_for_test(&mut rng); - - let mut validators = (0..COUNT_VALIDATORS) - .into_iter() - .map(|_| { - let mut validator = Validator::default(); - - let activation_offset = Epoch::random_for_test(&mut rng); - let exit_offset = Epoch::random_for_test(&mut rng); - - validator.activation_epoch = some_epoch - activation_offset; - validator.exit_epoch = some_epoch + exit_offset; - - validator - }) - .collect::>(); - - // we partition the set into partitions based on lifecycle: - for (i, chunk) in validators.chunks_exact_mut(COUNT_PARTITIONS).enumerate() { - match i { - 0 => { - // 1. not activated (Default::default()) - set_validators_to_default_entry_exit(chunk); - } - 1 => { - // 2. activated, but not exited - set_validators_to_activated(chunk, some_epoch); - // test boundary condition by ensuring that at least one validator in the list just activated - if let Some(validator) = chunk.get_mut(0) { - validator.activation_epoch = some_epoch; - } - } - 2 => { - // 3. exited - let activation_epoch = set_validators_to_activated(chunk, some_epoch); - set_validators_to_exited(chunk, some_epoch, activation_epoch); - // test boundary condition by ensuring that at least one validator in the list just exited - if let Some(validator) = chunk.get_mut(0) { - validator.exit_epoch = some_epoch; - } - } - _ => unreachable!( - "constants local to this test not in sync with generation of test case" - ), - } - } - - let indices = get_active_validator_indices(&validators, some_epoch); - assert_eq!(indices, vec![3, 4, 5]); - } -} From 495348f934895a2e0057e47889da1010ecd41ced Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 11:25:42 +1100 Subject: [PATCH 122/154] Adds RPC request send framework in message handler --- beacon_node/network/src/message_handler.rs | 51 +++++++++++++++++----- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 7e5a74a10..2d4d47b86 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -6,15 +6,18 @@ use crate::sync::SimpleSync; use crossbeam_channel::{unbounded as channel, Sender}; use futures::future; use futures::prelude::*; -use libp2p::{PeerId, RPCEvent}; +use libp2p::{ + rpc::{RPCRequest, RPCResponse}, + PeerId, RPCEvent, +}; use slog::debug; use std::collections::HashMap; use std::sync::Arc; use std::time::{Duration, Instant}; use types::Hash256; -/// Timeout for establishing a HELLO handshake. -const HELLO_TIMEOUT: Duration = Duration::from_secs(30); +/// Timeout for RPC requests. +const REQUEST_TIMEOUT: Duration = Duration::from_secs(30); /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -24,12 +27,22 @@ pub struct MessageHandler { sync: SimpleSync, /// The network channel to relay messages to the Network service. network_send: crossbeam_channel::Sender, - /// A mapping of peers we have sent a HELLO rpc request to. - hello_requests: HashMap, + /// A mapping of peers we have sent an RPC request to. + requests: HashMap>, + /// A counter of request id for each peer. + request_ids: HashMap, /// The `MessageHandler` logger. log: slog::Logger, } +/// RPC request information +pub struct RPCRequestInfo { + /// The id of the request + id: u16, + /// The time the request was sent, to check ttl. + request_time: Instant, +} + /// Types of messages the handler can receive. #[derive(Debug, Clone)] pub enum HandlerMessage { @@ -64,7 +77,8 @@ impl MessageHandler { chain: beacon_chain.clone(), sync, network_send, - hello_requests: HashMap::new(), + requests: HashMap::new(), + request_ids: HashMap::new(), log: log.clone(), }; @@ -84,8 +98,6 @@ impl MessageHandler { fn handle_message(&mut self, message: HandlerMessage) { match message { HandlerMessage::PeerDialed(peer_id) => { - // register RPC request - self.hello_requests.insert(peer_id.clone(), Instant::now()); self.send_hello(peer_id); } //TODO: Handle all messages @@ -94,8 +106,27 @@ impl MessageHandler { } /// Sends a HELLO RPC request to a newly connected peer. - fn send_hello(&self, peer_id: PeerId) { + fn send_hello(&mut self, peer_id: PeerId) { + // generate a unique id for the peer + let id = { + let borrowed_id = self.request_ids.entry(peer_id.clone()).or_insert_with(|| 0); + let id = borrowed_id.clone(); + //increment the counter + *borrowed_id += 1; + id + }; + // register RPC request + { + let requests = self.requests.entry(peer_id).or_insert_with(|| vec![]); + requests.push(RPCRequestInfo { + id: id.clone(), + request_time: Instant::now(), + }); + } // send the hello request to the network - //sync.hello() + self.send_rpc_request(id, RPCResponse::Hello(self.sync.generate_hello())); } + + /// Sends and RPC response + fn send_rpc_request(&self, request_id: u16, response: RPCResponse) {} } From 31333e8f8eec5fa59240ebae93435d596e1320c9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 12:19:07 +1100 Subject: [PATCH 123/154] Add send rpc in message handler --- beacon_node/libp2p/src/rpc/methods.rs | 9 +++++ beacon_node/network/src/message_handler.rs | 42 +++++++++++++++++----- 2 files changed, 42 insertions(+), 9 deletions(-) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index ea9932806..c99994b7c 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -17,6 +17,15 @@ impl From for RPCMethod { } } +impl Into for RPCMethod { + fn into(self) -> u16 { + match self { + RPCMethod::Hello => 0, + _ => 0, + } + } +} + #[derive(Debug, Clone)] pub enum RPCRequest { Hello(HelloMessage), diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 2d4d47b86..dcc145294 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,16 +1,17 @@ use crate::beacon_chain::BeaconChain; use crate::error; use crate::messages::NodeMessage; -use crate::service::NetworkMessage; +use crate::service::{NetworkMessage, OutgoingMessage}; use crate::sync::SimpleSync; use crossbeam_channel::{unbounded as channel, Sender}; use futures::future; use futures::prelude::*; use libp2p::{ - rpc::{RPCRequest, RPCResponse}, + rpc::{RPCMethod, RPCRequest, RPCResponse}, PeerId, RPCEvent, }; use slog::debug; +use slog::warn; use std::collections::HashMap; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -30,7 +31,7 @@ pub struct MessageHandler { /// A mapping of peers we have sent an RPC request to. requests: HashMap>, /// A counter of request id for each peer. - request_ids: HashMap, + request_ids: HashMap, /// The `MessageHandler` logger. log: slog::Logger, } @@ -38,7 +39,7 @@ pub struct MessageHandler { /// RPC request information pub struct RPCRequestInfo { /// The id of the request - id: u16, + id: u64, /// The time the request was sent, to check ttl. request_time: Instant, } @@ -98,7 +99,7 @@ impl MessageHandler { fn handle_message(&mut self, message: HandlerMessage) { match message { HandlerMessage::PeerDialed(peer_id) => { - self.send_hello(peer_id); + self.send_hello_request(peer_id); } //TODO: Handle all messages _ => {} @@ -106,7 +107,7 @@ impl MessageHandler { } /// Sends a HELLO RPC request to a newly connected peer. - fn send_hello(&mut self, peer_id: PeerId) { + fn send_hello_request(&mut self, peer_id: PeerId) { // generate a unique id for the peer let id = { let borrowed_id = self.request_ids.entry(peer_id.clone()).or_insert_with(|| 0); @@ -117,16 +118,39 @@ impl MessageHandler { }; // register RPC request { - let requests = self.requests.entry(peer_id).or_insert_with(|| vec![]); + let requests = self + .requests + .entry(peer_id.clone()) + .or_insert_with(|| vec![]); requests.push(RPCRequestInfo { id: id.clone(), request_time: Instant::now(), }); } + + // build the rpc request + let rpc_event = RPCEvent::Request { + id, + method_id: RPCMethod::Hello.into(), + body: RPCRequest::Hello(self.sync.generate_hello()), + }; + // send the hello request to the network - self.send_rpc_request(id, RPCResponse::Hello(self.sync.generate_hello())); + self.send_rpc(peer_id, rpc_event); } /// Sends and RPC response - fn send_rpc_request(&self, request_id: u16, response: RPCResponse) {} + fn send_rpc(&self, peer_id: PeerId, rpc_event: RPCEvent) { + self.network_send + .send(NetworkMessage::Send( + peer_id, + OutgoingMessage::RPC(rpc_event), + )) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send RPC message to the network service" + ) + }); + } } From 2657dc1465d7ac746c5c92394a68f0d76eef5a23 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 12:47:36 +1100 Subject: [PATCH 124/154] Builds RPC infrastructure to handle RPC responses --- beacon_node/libp2p/src/behaviour.rs | 6 ++-- beacon_node/libp2p/src/rpc/mod.rs | 6 ++-- beacon_node/libp2p/src/service.rs | 6 ++-- beacon_node/network/src/message_handler.rs | 32 ++++++++++++++++++++-- beacon_node/network/src/service.rs | 6 ++-- 5 files changed, 42 insertions(+), 14 deletions(-) diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/libp2p/src/behaviour.rs index f0a89027b..78d013002 100644 --- a/beacon_node/libp2p/src/behaviour.rs +++ b/beacon_node/libp2p/src/behaviour.rs @@ -46,7 +46,9 @@ impl NetworkBehaviourEventProcess { self.events.push(BehaviourEvent::PeerDialed(peer_id)) } - RPCMessage::RPC(rpc_event) => self.events.push(BehaviourEvent::RPC(rpc_event)), + RPCMessage::RPC(peer_id, rpc_event) => { + self.events.push(BehaviourEvent::RPC(peer_id, rpc_event)) + } } } } @@ -87,7 +89,7 @@ impl Behaviour { /// The types of events than can be obtained from polling the behaviour. pub enum BehaviourEvent { - RPC(RPCEvent), + RPC(PeerId, RPCEvent), PeerDialed(PeerId), // TODO: This is a stub at the moment Message(String), diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index 907e95763..e06f4effc 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -76,7 +76,7 @@ where fn inject_node_event( &mut self, - _source: PeerId, + source: PeerId, event: ::OutEvent, ) { // ignore successful send events @@ -88,7 +88,7 @@ where // send the event to the user self.events .push(NetworkBehaviourAction::GenerateEvent(RPCMessage::RPC( - event, + source, event, ))); } @@ -110,7 +110,7 @@ where /// Messages sent to the user from the RPC protocol. pub enum RPCMessage { - RPC(RPCEvent), + RPC(PeerId, RPCEvent), PeerDialed(PeerId), } diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index dd6deabad..92e6e8897 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -105,8 +105,8 @@ impl Stream for Service { debug!(self.log, "Message received: {}", m); return Ok(Async::Ready(Some(Libp2pEvent::Message(m)))); } - Ok(Async::Ready(Some(BehaviourEvent::RPC(event)))) => { - return Ok(Async::Ready(Some(Libp2pEvent::RPC(event)))); + Ok(Async::Ready(Some(BehaviourEvent::RPC(peer_id, event)))) => { + return Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, event)))); } Ok(Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id)))) => { return Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id)))); @@ -158,7 +158,7 @@ fn build_transport( /// Events that can be obtained from polling the Libp2p Service. pub enum Libp2pEvent { // We have received an RPC event on the swarm - RPC(RPCEvent), + RPC(PeerId, RPCEvent), PeerDialed(PeerId), Message(String), } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index dcc145294..11ce3d4c0 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -10,8 +10,8 @@ use libp2p::{ rpc::{RPCMethod, RPCRequest, RPCResponse}, PeerId, RPCEvent, }; -use slog::debug; use slog::warn; +use slog::{debug, trace}; use std::collections::HashMap; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -54,7 +54,7 @@ pub enum HandlerMessage { /// A Node message has been received. Message(PeerId, NodeMessage), /// An RPC response/request has been received. - RPC(RPCEvent), + RPC(PeerId, RPCEvent), } impl MessageHandler { @@ -98,14 +98,39 @@ impl MessageHandler { fn handle_message(&mut self, message: HandlerMessage) { match message { + // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { self.send_hello_request(peer_id); } + // we have received an RPC message request/response + HandlerMessage::RPC(peer_id, rpc_event) => { + self.handle_rpc_message(peer_id, rpc_event); + } //TODO: Handle all messages _ => {} } } + fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { + match rpc_message { + RPCEvent::Request { + id, + method_id: _, + body, + } => self.handle_rpc_request(peer_id, id, body), + RPCEvent::Response { + id, + method_id: _, + result, + } => self.handle_rpc_response(peer_id, id, result), + } + } + + fn handle_rpc_request(&mut self, peer_id: PeerId, id: u64, request: RPCRequest) {} + + // we match on id and ignore responses past the timeout. + fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) {} + /// Sends a HELLO RPC request to a newly connected peer. fn send_hello_request(&mut self, peer_id: PeerId) { // generate a unique id for the peer @@ -136,10 +161,11 @@ impl MessageHandler { }; // send the hello request to the network + trace!(self.log, "Sending HELLO message to peer {:?}", peer_id); self.send_rpc(peer_id, rpc_event); } - /// Sends and RPC response + /// Sends an RPC request/response to the network server. fn send_rpc(&self, peer_id: PeerId, rpc_event: RPCEvent) { self.network_send .send(NetworkMessage::Send( diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4cb1038d1..84e46e707 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -109,13 +109,13 @@ fn network_service( // poll the swarm loop { match libp2p_service.poll() { - Ok(Async::Ready(Some(Libp2pEvent::RPC(rpc_event)))) => { + Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, rpc_event)))) => { debug!( libp2p_service.log, - "RPC Event: Rpc message received: {:?}", rpc_event + "RPC Event: RPC message received: {:?}", rpc_event ); message_handler_send - .send(HandlerMessage::RPC(rpc_event)) + .send(HandlerMessage::RPC(peer_id, rpc_event)) .map_err(|_| "failed to send rpc to handler")?; } Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id)))) => { From 67c09021f01cf9bba0b5e990c7a21405425e5a28 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 13:03:12 +1100 Subject: [PATCH 125/154] Initial handling RPC responses --- beacon_node/network/src/message_handler.rs | 26 +++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 11ce3d4c0..14be9acdc 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -8,7 +8,7 @@ use futures::future; use futures::prelude::*; use libp2p::{ rpc::{RPCMethod, RPCRequest, RPCResponse}, - PeerId, RPCEvent, + HelloMessage, PeerId, RPCEvent, }; use slog::warn; use slog::{debug, trace}; @@ -115,7 +115,7 @@ impl MessageHandler { match rpc_message { RPCEvent::Request { id, - method_id: _, + method_id: _, // TODO: Clean up RPC Message types, have a cleaner type by this point. body, } => self.handle_rpc_request(peer_id, id, body), RPCEvent::Response { @@ -126,11 +126,31 @@ impl MessageHandler { } } - fn handle_rpc_request(&mut self, peer_id: PeerId, id: u64, request: RPCRequest) {} + /// A new RPC request has been received from the network. + fn handle_rpc_request(&mut self, peer_id: PeerId, id: u64, request: RPCRequest) { + match request { + RPCRequest::Hello(hello_message) => { + self.handle_hello_response(peer_id, id, hello_message) + } + } + } + /// An RPC response has been received from the network. // we match on id and ignore responses past the timeout. fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) {} + fn handle_hello_response(&mut self, peer_id: PeerId, id: u64, response: HelloMessage) { + /* + // if response id is not in our list, ignore (likely RPC timeout) + match self.requests.get(peer_id) { + None => return; + Some(rpc_info) => { + if rpc_info.con + + */ + + } + /// Sends a HELLO RPC request to a newly connected peer. fn send_hello_request(&mut self, peer_id: PeerId) { // generate a unique id for the peer From 5ae8079b446791819d0ddc448554b52ebf8909e0 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 13:25:25 +1100 Subject: [PATCH 126/154] Basic node handshake --- beacon_node/network/src/message_handler.rs | 50 +++++++++------------- 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 14be9acdc..a685e3324 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -19,6 +19,8 @@ use types::Hash256; /// Timeout for RPC requests. const REQUEST_TIMEOUT: Duration = Duration::from_secs(30); +/// Timeout before banning a peer for non-identification. +const HELLO_TIMEOUT: Duration = Duration::from_secs(30); /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -28,22 +30,17 @@ pub struct MessageHandler { sync: SimpleSync, /// The network channel to relay messages to the Network service. network_send: crossbeam_channel::Sender, - /// A mapping of peers we have sent an RPC request to. - requests: HashMap>, + /// A mapping of peers and the RPC id we have sent an RPC request to. + requests: HashMap<(PeerId, u64), Instant>, + /// A mapping of HELLO requests we have sent. We drop/ban peers if they do not response + /// within the timeout + hello_requests: HashMap, /// A counter of request id for each peer. request_ids: HashMap, /// The `MessageHandler` logger. log: slog::Logger, } -/// RPC request information -pub struct RPCRequestInfo { - /// The id of the request - id: u64, - /// The time the request was sent, to check ttl. - request_time: Instant, -} - /// Types of messages the handler can receive. #[derive(Debug, Clone)] pub enum HandlerMessage { @@ -79,7 +76,9 @@ impl MessageHandler { sync, network_send, requests: HashMap::new(), + hello_requests: HashMap::new(), request_ids: HashMap::new(), + log: log.clone(), }; @@ -140,15 +139,13 @@ impl MessageHandler { fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) {} fn handle_hello_response(&mut self, peer_id: PeerId, id: u64, response: HelloMessage) { - /* - // if response id is not in our list, ignore (likely RPC timeout) - match self.requests.get(peer_id) { - None => return; - Some(rpc_info) => { - if rpc_info.con - - */ + if self.hello_requests.remove(&peer_id).is_none() { + // if response id is not in our list, ignore (likely RPC timeout) + return; + } + debug!(self.log, "Hello response received from peer: {:?}", peer_id); + // validate peer - decide whether to drop/ban or add to sync } /// Sends a HELLO RPC request to a newly connected peer. @@ -161,17 +158,12 @@ impl MessageHandler { *borrowed_id += 1; id }; - // register RPC request - { - let requests = self - .requests - .entry(peer_id.clone()) - .or_insert_with(|| vec![]); - requests.push(RPCRequestInfo { - id: id.clone(), - request_time: Instant::now(), - }); - } + // register RPC Hello request + self.requests.insert((peer_id.clone(), id), Instant::now()); + debug!( + self.log, + "Hello request registered with peer: {:?}", peer_id + ); // build the rpc request let rpc_event = RPCEvent::Request { From 752c784534f83cbb6dbb8f6d29d0f50fe3fdf8f7 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 14:40:08 +1100 Subject: [PATCH 127/154] Initial handling of RPC HELLO requests --- beacon_node/network/src/message_handler.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index a685e3324..f1a114db1 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -129,7 +129,7 @@ impl MessageHandler { fn handle_rpc_request(&mut self, peer_id: PeerId, id: u64, request: RPCRequest) { match request { RPCRequest::Hello(hello_message) => { - self.handle_hello_response(peer_id, id, hello_message) + // self.handle_hello_request(peer_id, id, hello_message) } } } @@ -146,6 +146,7 @@ impl MessageHandler { debug!(self.log, "Hello response received from peer: {:?}", peer_id); // validate peer - decide whether to drop/ban or add to sync + // TODO: Peer validation } /// Sends a HELLO RPC request to a newly connected peer. From d20fb93f0cd927526881dbaad439c3d827e2d644 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 19 Mar 2019 17:16:33 +1100 Subject: [PATCH 128/154] Update rewards processing to v0.5.0 --- .../src/per_epoch_processing.rs | 309 +++-------------- .../src/per_epoch_processing/apply_rewards.rs | 317 ++++++++++++++++++ .../src/per_epoch_processing/errors.rs | 1 + .../validator_statuses.rs | 41 ++- 4 files changed, 393 insertions(+), 275 deletions(-) create mode 100644 eth2/state_processing/src/per_epoch_processing/apply_rewards.rs diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 8e03457d3..24f4a1e1f 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,16 +1,16 @@ +use apply_rewards::apply_rewards; use errors::EpochProcessingError as Error; -use integer_sqrt::IntegerSquareRoot; use process_ejections::process_ejections; use process_exit_queue::process_exit_queue; use process_slashings::process_slashings; use process_validator_registry::process_validator_registry; -use rayon::prelude::*; use ssz::TreeHash; use std::collections::HashMap; use types::*; use validator_statuses::{TotalBalances, ValidatorStatuses}; use winning_root::{winning_root, WinningRoot}; +pub mod apply_rewards; pub mod errors; pub mod get_attestation_participants; pub mod inclusion_distance; @@ -43,13 +43,13 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result process_eth1_data(state, spec); - process_justification(state, &statuses.total_balances, spec); + update_justification_and_finalization(state, &statuses.total_balances, spec)?; // Crosslinks let winning_root_for_shards = process_crosslinks(state, spec)?; // Rewards and Penalities - process_rewards_and_penalities(state, &mut statuses, &winning_root_for_shards, spec)?; + apply_rewards(state, &mut statuses, &winning_root_for_shards, spec)?; // Ejections process_ejections(state, spec)?; @@ -62,7 +62,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result // Final updates update_active_tree_index_roots(state, spec)?; update_latest_slashed_balances(state, spec)?; - clean_attestations(state); + state.previous_epoch_attestations = vec![]; // Rotate the epoch caches to suit the epoch transition. state.advance_caches(); @@ -113,83 +113,68 @@ pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { /// - `justified_epoch` /// - `previous_justified_epoch` /// -/// Spec v0.4.0 -pub fn process_justification( +/// Spec v0.5.0 +pub fn update_justification_and_finalization( state: &mut BeaconState, total_balances: &TotalBalances, spec: &ChainSpec, -) { +) -> Result<(), Error> { let previous_epoch = state.previous_epoch(spec); let current_epoch = state.current_epoch(spec); let mut new_justified_epoch = state.current_justified_epoch; + let mut new_finalized_epoch = state.finalized_epoch; + + // Rotate the justification bitfield up one epoch to make room for the current epoch. state.justification_bitfield <<= 1; - // If > 2/3 of the total balance attested to the previous epoch boundary - // - // - Set the 2nd bit of the bitfield. - // - Set the previous epoch to be justified. - if (3 * total_balances.previous_epoch_boundary_attesters) >= (2 * total_balances.previous_epoch) + // If the previous epoch gets justified, full the second last bit. + if (total_balances.previous_epoch_boundary_attesters * 3) >= (total_balances.previous_epoch * 2) { - state.justification_bitfield |= 2; new_justified_epoch = previous_epoch; + state.justification_bitfield |= 2; } - // If > 2/3 of the total balance attested to the previous epoch boundary - // - // - Set the 1st bit of the bitfield. - // - Set the current epoch to be justified. - if (3 * total_balances.current_epoch_boundary_attesters) >= (2 * total_balances.current_epoch) { - state.justification_bitfield |= 1; + // If the current epoch gets justified, fill the last bit. + if (total_balances.current_epoch_boundary_attesters * 3) >= (total_balances.current_epoch * 2) { new_justified_epoch = current_epoch; + state.justification_bitfield |= 1; } - // If: - // - // - All three epochs prior to this epoch have been justified. - // - The previous justified justified epoch was three epochs ago. - // - // Then, set the finalized epoch to be three epochs ago. - if ((state.justification_bitfield >> 1) % 8 == 0b111) - & (state.previous_justified_epoch == previous_epoch - 2) - { - state.finalized_epoch = state.previous_justified_epoch; + let bitfield = state.justification_bitfield; + + // The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source. + if ((bitfield >> 1) % 8 == 0b111) & (state.previous_justified_epoch == current_epoch - 3) { + new_finalized_epoch = state.previous_justified_epoch; } - // If: - // - // - Both two epochs prior to this epoch have been justified. - // - The previous justified epoch was two epochs ago. - // - // Then, set the finalized epoch to two epochs ago. - if ((state.justification_bitfield >> 1) % 4 == 0b11) - & (state.previous_justified_epoch == previous_epoch - 1) - { - state.finalized_epoch = state.previous_justified_epoch; + // The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source. + if ((bitfield >> 1) % 4 == 0b11) & (state.previous_justified_epoch == current_epoch - 2) { + new_finalized_epoch = state.previous_justified_epoch; } - // If: - // - // - This epoch and the two prior have been justified. - // - The presently justified epoch was two epochs ago. - // - // Then, set the finalized epoch to two epochs ago. - if (state.justification_bitfield % 8 == 0b111) - & (state.current_justified_epoch == previous_epoch - 1) - { - state.finalized_epoch = state.current_justified_epoch; + // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 2nd as source. + if (bitfield % 8 == 0b111) & (state.current_justified_epoch == current_epoch - 2) { + new_finalized_epoch = state.current_justified_epoch; } - // If: - // - // - This epoch and the epoch prior to it have been justified. - // - Set the previous epoch to be justified. - // - // Then, set the finalized epoch to be the previous epoch. - if (state.justification_bitfield % 4 == 0b11) - & (state.current_justified_epoch == previous_epoch) - { - state.finalized_epoch = state.current_justified_epoch; + // The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source. + if (bitfield % 4 == 0b11) & (state.current_justified_epoch == current_epoch - 1) { + new_finalized_epoch = state.current_justified_epoch; } state.previous_justified_epoch = state.current_justified_epoch; - state.current_justified_epoch = new_justified_epoch; + state.previous_justified_root = state.current_justified_root; + + if new_justified_epoch != state.current_justified_epoch { + state.current_justified_epoch = new_justified_epoch; + state.current_justified_root = + *state.get_block_root(new_justified_epoch.start_slot(spec.slots_per_epoch), spec)?; + } + + if new_finalized_epoch != state.finalized_epoch { + state.finalized_epoch = new_finalized_epoch; + state.finalized_root = + *state.get_block_root(new_finalized_epoch.start_slot(spec.slots_per_epoch), spec)?; + } + + Ok(()) } /// Updates the following fields on the `BeaconState`: @@ -239,201 +224,6 @@ pub fn process_crosslinks( Ok(winning_root_for_shards) } -/// Updates the following fields on the BeaconState: -/// -/// - `validator_balances` -/// -/// Spec v0.4.0 -pub fn process_rewards_and_penalities( - state: &mut BeaconState, - statuses: &mut ValidatorStatuses, - winning_root_for_shards: &WinningRootHashSet, - spec: &ChainSpec, -) -> Result<(), Error> { - let next_epoch = state.next_epoch(spec); - - statuses.process_winning_roots(state, winning_root_for_shards, spec)?; - - let total_balances = &statuses.total_balances; - - let base_reward_quotient = - total_balances.previous_epoch.integer_sqrt() / spec.base_reward_quotient; - - // Guard against a divide-by-zero during the validator balance update. - if base_reward_quotient == 0 { - return Err(Error::BaseRewardQuotientIsZero); - } - // Guard against a divide-by-zero during the validator balance update. - if total_balances.previous_epoch == 0 { - return Err(Error::PreviousTotalBalanceIsZero); - } - // Guard against an out-of-bounds during the validator balance update. - if statuses.statuses.len() != state.validator_balances.len() { - return Err(Error::ValidatorStatusesInconsistent); - } - - // Justification and finalization - - let epochs_since_finality = next_epoch - state.finalized_epoch; - - state.validator_balances = state - .validator_balances - .par_iter() - .enumerate() - .map(|(index, &balance)| { - let mut balance = balance; - let status = &statuses.statuses[index]; - let base_reward = get_base_reward(state, index, total_balances.previous_epoch, spec) - .expect( - "Cannot fail to access a validator balance when iterating validator balances.", - ); - - if epochs_since_finality <= 4 { - // Expected FFG source - if status.is_previous_epoch_attester { - safe_add_assign!( - balance, - base_reward * total_balances.previous_epoch_attesters - / total_balances.previous_epoch - ); - } else if status.is_active_in_previous_epoch { - safe_sub_assign!(balance, base_reward); - } - - // Expected FFG target - if status.is_previous_epoch_boundary_attester { - safe_add_assign!( - balance, - base_reward * total_balances.previous_epoch_boundary_attesters - / total_balances.previous_epoch - ); - } else if status.is_active_in_previous_epoch { - safe_sub_assign!(balance, base_reward); - } - - // Expected beacon chain head - if status.is_previous_epoch_head_attester { - safe_add_assign!( - balance, - base_reward * total_balances.previous_epoch_head_attesters - / total_balances.previous_epoch - ); - } else if status.is_active_in_previous_epoch { - safe_sub_assign!(balance, base_reward); - }; - } else { - let inactivity_penalty = get_inactivity_penalty( - state, - index, - epochs_since_finality.as_u64(), - total_balances.previous_epoch, - spec, - ) - .expect( - "Cannot fail to access a validator balance when iterating validator balances.", - ); - - if status.is_active_in_previous_epoch { - if !status.is_previous_epoch_attester { - safe_sub_assign!(balance, inactivity_penalty); - } - if !status.is_previous_epoch_boundary_attester { - safe_sub_assign!(balance, inactivity_penalty); - } - if !status.is_previous_epoch_head_attester { - safe_sub_assign!(balance, inactivity_penalty); - } - - if state.validator_registry[index].slashed { - let base_reward = - get_base_reward(state, index, total_balances.previous_epoch, spec).expect( - "Cannot fail to access a validator balance when iterating validator balances.", - ); - safe_sub_assign!(balance, 2 * inactivity_penalty + base_reward); - } - } - } - - // Crosslinks - - if let Some(ref info) = status.winning_root_info { - safe_add_assign!( - balance, - base_reward * info.total_attesting_balance / info.total_committee_balance - ); - } else { - safe_sub_assign!(balance, base_reward); - } - - balance - }) - .collect(); - - // Attestation inclusion - - // Guard against an out-of-bounds during the attester inclusion balance update. - if statuses.statuses.len() != state.validator_registry.len() { - return Err(Error::ValidatorStatusesInconsistent); - } - - for (index, _validator) in state.validator_registry.iter().enumerate() { - let status = &statuses.statuses[index]; - - if status.is_previous_epoch_attester { - let proposer_index = status.inclusion_info.proposer_index; - let inclusion_distance = status.inclusion_info.distance; - - let base_reward = - get_base_reward(state, proposer_index, total_balances.previous_epoch, spec).expect( - "Cannot fail to access a validator balance when iterating validator balances.", - ); - - if inclusion_distance > 0 && inclusion_distance < Slot::max_value() { - safe_add_assign!( - state.validator_balances[proposer_index], - base_reward * spec.min_attestation_inclusion_delay - / inclusion_distance.as_u64() - ) - } - } - } - - Ok(()) -} - -/// Returns the base reward for some validator. -/// -/// Spec v0.5.0 -pub fn get_base_reward( - state: &BeaconState, - index: usize, - previous_total_balance: u64, - spec: &ChainSpec, -) -> Result { - if previous_total_balance == 0 { - Ok(0) - } else { - let adjusted_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; - Ok(state.get_effective_balance(index, spec)? / adjusted_quotient / 5) - } -} - -/// Returns the inactivity penalty for some validator. -/// -/// Spec v0.5.0 -pub fn get_inactivity_penalty( - state: &BeaconState, - index: usize, - epochs_since_finality: u64, - previous_total_balance: u64, - spec: &ChainSpec, -) -> Result { - Ok(get_base_reward(state, index, previous_total_balance, spec)? - + state.get_effective_balance(index, spec)? * epochs_since_finality - / spec.inactivity_penalty_quotient - / 2) -} - /// Updates the state's `latest_active_index_roots` field with a tree hash the active validator /// indices for the next epoch. /// @@ -472,10 +262,3 @@ pub fn update_latest_slashed_balances( Ok(()) } - -/// Removes all pending attestations from the previous epoch. -/// -/// Spec v0.4.0 -pub fn clean_attestations(state: &mut BeaconState) { - state.previous_epoch_attestations = vec![]; -} diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs new file mode 100644 index 000000000..5254e0710 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -0,0 +1,317 @@ +use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; +use super::{Error, WinningRootHashSet}; +use integer_sqrt::IntegerSquareRoot; +use types::*; + +#[derive(Default, Clone)] +pub struct Delta { + pub rewards: u64, + pub penalties: u64, +} + +impl std::ops::AddAssign for Delta { + /// Use wrapping addition as that is how it's defined in the spec. + fn add_assign(&mut self, other: Delta) { + self.rewards += other.rewards; + self.penalties += other.penalties; + } +} + +/// Apply attester and proposer rewards. +/// +/// Spec v0.5.0 +pub fn apply_rewards( + state: &mut BeaconState, + validator_statuses: &mut ValidatorStatuses, + winning_root_for_shards: &WinningRootHashSet, + spec: &ChainSpec, +) -> Result<(), Error> { + // Guard against an out-of-bounds during the validator balance update. + if validator_statuses.statuses.len() != state.validator_balances.len() { + return Err(Error::ValidatorStatusesInconsistent); + } + // Guard against an out-of-bounds during the attester inclusion balance update. + if validator_statuses.statuses.len() != state.validator_registry.len() { + return Err(Error::ValidatorStatusesInconsistent); + } + + let mut deltas = vec![Delta::default(); state.validator_balances.len()]; + + get_justification_and_finalization_deltas(&mut deltas, state, &validator_statuses, spec)?; + get_crosslink_deltas(&mut deltas, state, &validator_statuses, spec)?; + + // Apply the proposer deltas if we are finalizing normally. + // + // This is executed slightly differently to the spec because of the way our functions are + // structured. It should be functionally equivalent. + if epochs_since_finality(state, spec) <= 4 { + get_proposer_deltas( + &mut deltas, + state, + validator_statuses, + winning_root_for_shards, + spec, + )?; + } + + // Apply the deltas, over-flowing but not under-flowing (saturating at 0 instead). + for (i, delta) in deltas.iter().enumerate() { + state.validator_balances[i] += delta.rewards; + state.validator_balances[i] = state.validator_balances[i].saturating_sub(delta.penalties); + } + + Ok(()) +} + +/// Applies the attestation inclusion reward to each proposer for every validator who included an +/// attestation in the previous epoch. +/// +/// Spec v0.5.0 +fn get_proposer_deltas( + deltas: &mut Vec, + state: &mut BeaconState, + validator_statuses: &mut ValidatorStatuses, + winning_root_for_shards: &WinningRootHashSet, + spec: &ChainSpec, +) -> Result<(), Error> { + // Update statuses with the information from winning roots. + validator_statuses.process_winning_roots(state, winning_root_for_shards, spec)?; + + for (index, validator) in validator_statuses.statuses.iter().enumerate() { + let mut delta = Delta::default(); + + if validator.is_previous_epoch_attester { + let inclusion = validator + .inclusion_info + .expect("It is a logic error for an attester not to have an inclusion distance."); + + let base_reward = get_base_reward( + state, + inclusion.proposer_index, + validator_statuses.total_balances.previous_epoch, + spec, + )?; + + if inclusion.proposer_index >= deltas.len() { + return Err(Error::ValidatorStatusesInconsistent); + } + + delta.rewards += base_reward / spec.attestation_inclusion_reward_quotient; + } + + deltas[index] += delta; + } + + Ok(()) +} + +/// Apply rewards for participation in attestations during the previous epoch. +/// +/// Spec v0.5.0 +fn get_justification_and_finalization_deltas( + deltas: &mut Vec, + state: &BeaconState, + validator_statuses: &ValidatorStatuses, + spec: &ChainSpec, +) -> Result<(), Error> { + let epochs_since_finality = epochs_since_finality(state, spec); + + for (index, validator) in validator_statuses.statuses.iter().enumerate() { + let base_reward = get_base_reward( + state, + index, + validator_statuses.total_balances.previous_epoch, + spec, + )?; + let inactivity_penalty = get_inactivity_penalty( + state, + index, + epochs_since_finality.as_u64(), + validator_statuses.total_balances.previous_epoch, + spec, + )?; + + let delta = if epochs_since_finality <= 4 { + compute_normal_justification_and_finalization_delta( + &validator, + &validator_statuses.total_balances, + base_reward, + spec, + ) + } else { + compute_inactivity_leak_delta(&validator, base_reward, inactivity_penalty, spec) + }; + + deltas[index] += delta; + } + + Ok(()) +} + +/// Determine the delta for a single validator, if the chain is finalizing normally. +/// +/// Spec v0.5.0 +fn compute_normal_justification_and_finalization_delta( + validator: &ValidatorStatus, + total_balances: &TotalBalances, + base_reward: u64, + spec: &ChainSpec, +) -> Delta { + let mut delta = Delta::default(); + + let boundary_attesting_balance = total_balances.previous_epoch_boundary_attesters; + let total_balance = total_balances.previous_epoch; + let total_attesting_balance = total_balances.previous_epoch_attesters; + let matching_head_balance = total_balances.previous_epoch_boundary_attesters; + + // Expected FFG source. + if validator.is_previous_epoch_attester { + delta.rewards += base_reward * total_attesting_balance / total_balance; + // Inclusion speed bonus + let inclusion = validator + .inclusion_info + .expect("It is a logic error for an attester not to have an inclusion distance."); + delta.rewards += + base_reward * spec.min_attestation_inclusion_delay / inclusion.distance.as_u64(); + } else if validator.is_active_in_previous_epoch { + delta.penalties += base_reward; + } + + // Expected FFG target. + if validator.is_previous_epoch_boundary_attester { + delta.rewards += base_reward / boundary_attesting_balance / total_balance; + } else if validator.is_active_in_previous_epoch { + delta.penalties += base_reward; + } + + // Expected head. + if validator.is_previous_epoch_head_attester { + delta.rewards += base_reward * matching_head_balance / total_balance; + } else if validator.is_active_in_previous_epoch { + delta.penalties += base_reward; + }; + + // Proposer bonus is handled in `apply_proposer_deltas`. + // + // This function only computes the delta for a single validator, so it cannot also return a + // delta for a validator. + + delta +} + +/// Determine the delta for a single delta, assuming the chain is _not_ finalizing normally. +/// +/// Spec v0.5.0 +fn compute_inactivity_leak_delta( + validator: &ValidatorStatus, + base_reward: u64, + inactivity_penalty: u64, + spec: &ChainSpec, +) -> Delta { + let mut delta = Delta::default(); + + if validator.is_active_in_previous_epoch { + if !validator.is_previous_epoch_attester { + delta.penalties += inactivity_penalty; + } else { + // If a validator did attest, apply a small penalty for getting attestations included + // late. + let inclusion = validator + .inclusion_info + .expect("It is a logic error for an attester not to have an inclusion distance."); + delta.rewards += + base_reward * spec.min_attestation_inclusion_delay / inclusion.distance.as_u64(); + delta.penalties += base_reward; + } + + if !validator.is_previous_epoch_boundary_attester { + delta.penalties += inactivity_penalty; + } + + if !validator.is_previous_epoch_head_attester { + delta.penalties += inactivity_penalty; + } + } + + // Penalize slashed-but-inactive validators as though they were active but offline. + if !validator.is_active_in_previous_epoch + & validator.is_slashed + & !validator.is_withdrawable_in_current_epoch + { + delta.penalties += 2 * inactivity_penalty + base_reward; + } + + delta +} + +/// Calculate the deltas based upon the winning roots for attestations during the previous epoch. +/// +/// Spec v0.5.0 +fn get_crosslink_deltas( + deltas: &mut Vec, + state: &BeaconState, + validator_statuses: &ValidatorStatuses, + spec: &ChainSpec, +) -> Result<(), Error> { + for (index, validator) in validator_statuses.statuses.iter().enumerate() { + let mut delta = Delta::default(); + + let base_reward = get_base_reward( + state, + index, + validator_statuses.total_balances.previous_epoch, + spec, + )?; + + if let Some(ref winning_root) = validator.winning_root_info { + delta.rewards += base_reward * winning_root.total_attesting_balance + / winning_root.total_committee_balance + } else { + delta.penalties += base_reward; + } + + deltas[index] += delta; + } + + Ok(()) +} + +/// Returns the base reward for some validator. +/// +/// Spec v0.5.0 +fn get_base_reward( + state: &BeaconState, + index: usize, + previous_total_balance: u64, + spec: &ChainSpec, +) -> Result { + if previous_total_balance == 0 { + Ok(0) + } else { + let adjusted_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; + Ok(state.get_effective_balance(index, spec)? / adjusted_quotient / 5) + } +} + +/// Returns the inactivity penalty for some validator. +/// +/// Spec v0.5.0 +fn get_inactivity_penalty( + state: &BeaconState, + index: usize, + epochs_since_finality: u64, + previous_total_balance: u64, + spec: &ChainSpec, +) -> Result { + Ok(get_base_reward(state, index, previous_total_balance, spec)? + + state.get_effective_balance(index, spec)? * epochs_since_finality + / spec.inactivity_penalty_quotient + / 2) +} + +/// Returns the epochs since the last finalized epoch. +/// +/// Spec v0.5.0 +fn epochs_since_finality(state: &BeaconState, spec: &ChainSpec) -> Epoch { + state.current_epoch(spec) + 1 - state.finalized_epoch +} diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index 94fc0cca5..4632e83bb 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -9,6 +9,7 @@ pub enum EpochProcessingError { PreviousTotalBalanceIsZero, InclusionDistanceZero, ValidatorStatusesInconsistent, + DeltasInconsistent, /// Unable to get the inclusion distance for a validator that should have an inclusion /// distance. This indicates an internal inconsistency. /// diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index bcbca8244..50f3ec372 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -23,7 +23,7 @@ pub struct WinningRootInfo { } /// The information required to reward a block producer for including an attestation in a block. -#[derive(Clone)] +#[derive(Clone, Copy)] pub struct InclusionInfo { /// The earliest slot a validator had an attestation included in the previous epoch. pub slot: Slot, @@ -59,7 +59,11 @@ impl InclusionInfo { /// Information required to reward some validator during the current and previous epoch. #[derive(Default, Clone)] -pub struct AttesterStatus { +pub struct ValidatorStatus { + /// True if the validator has been slashed, ever. + pub is_slashed: bool, + /// True if the validator can withdraw in the current epoch. + pub is_withdrawable_in_current_epoch: bool, /// True if the validator was active in the state's _current_ epoch. pub is_active_in_current_epoch: bool, /// True if the validator was active in the state's _previous_ epoch. @@ -81,14 +85,14 @@ pub struct AttesterStatus { /// Information used to reward the block producer of this validators earliest-included /// attestation. - pub inclusion_info: InclusionInfo, + pub inclusion_info: Option, /// Information used to reward/penalize the validator if they voted in the super-majority for /// some shard block. pub winning_root_info: Option, } -impl AttesterStatus { - /// Accepts some `other` `AttesterStatus` and updates `self` if required. +impl ValidatorStatus { + /// Accepts some `other` `ValidatorStatus` and updates `self` if required. /// /// Will never set one of the `bool` fields to `false`, it will only set it to `true` if other /// contains a `true` field. @@ -97,6 +101,8 @@ impl AttesterStatus { pub fn update(&mut self, other: &Self) { // Update all the bool fields, only updating `self` if `other` is true (never setting // `self` to false). + set_self_if_other_is_true!(self, other, is_slashed); + set_self_if_other_is_true!(self, other, is_withdrawable_in_current_epoch); set_self_if_other_is_true!(self, other, is_active_in_current_epoch); set_self_if_other_is_true!(self, other, is_active_in_previous_epoch); set_self_if_other_is_true!(self, other, is_current_epoch_attester); @@ -105,7 +111,13 @@ impl AttesterStatus { set_self_if_other_is_true!(self, other, is_previous_epoch_boundary_attester); set_self_if_other_is_true!(self, other, is_previous_epoch_head_attester); - self.inclusion_info.update(&other.inclusion_info); + if let Some(other_info) = other.inclusion_info { + if let Some(self_info) = self.inclusion_info.as_mut() { + self_info.update(&other_info); + } else { + self.inclusion_info = other.inclusion_info; + } + } } } @@ -137,7 +149,7 @@ pub struct TotalBalances { #[derive(Clone)] pub struct ValidatorStatuses { /// Information about each individual validator from the state's validator registy. - pub statuses: Vec, + pub statuses: Vec, /// Summed balances for various sets of validators. pub total_balances: TotalBalances, } @@ -154,7 +166,12 @@ impl ValidatorStatuses { let mut total_balances = TotalBalances::default(); for (i, validator) in state.validator_registry.iter().enumerate() { - let mut status = AttesterStatus::default(); + let mut status = ValidatorStatus { + is_slashed: validator.slashed, + is_withdrawable_in_current_epoch: validator + .is_withdrawable_at(state.current_epoch(spec)), + ..ValidatorStatus::default() + }; if validator.is_active_at(state.current_epoch(spec)) { status.is_active_in_current_epoch = true; @@ -193,10 +210,10 @@ impl ValidatorStatuses { get_attestation_participants(state, &a.data, &a.aggregation_bitfield, spec)?; let attesting_balance = state.get_total_balance(&attesting_indices, spec)?; - let mut status = AttesterStatus::default(); + let mut status = ValidatorStatus::default(); // Profile this attestation, updating the total balances and generating an - // `AttesterStatus` object that applies to all participants in the attestation. + // `ValidatorStatus` object that applies to all participants in the attestation. if is_from_epoch(a, state.current_epoch(spec), spec) { self.total_balances.current_epoch_attesters += attesting_balance; status.is_current_epoch_attester = true; @@ -211,7 +228,7 @@ impl ValidatorStatuses { // The inclusion slot and distance are only required for previous epoch attesters. let relative_epoch = RelativeEpoch::from_slot(state.slot, a.data.slot, spec)?; - status.inclusion_info = InclusionInfo { + status.inclusion_info = Some(InclusionInfo { slot: a.inclusion_slot, distance: inclusion_distance(a), proposer_index: state.get_beacon_proposer_index( @@ -219,7 +236,7 @@ impl ValidatorStatuses { relative_epoch, spec, )?, - }; + }); if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { self.total_balances.previous_epoch_boundary_attesters += attesting_balance; From 61f6fe25e7760ef60d1ee58207211a7493096a66 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 19 Mar 2019 17:26:20 +1100 Subject: [PATCH 129/154] Tidy reward processing --- .../src/per_epoch_processing/apply_rewards.rs | 59 ++++++++++++------- 1 file changed, 38 insertions(+), 21 deletions(-) diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 5254e0710..ce5fccb21 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -3,10 +3,23 @@ use super::{Error, WinningRootHashSet}; use integer_sqrt::IntegerSquareRoot; use types::*; +/// Use to track the changes to a validators balance. #[derive(Default, Clone)] pub struct Delta { - pub rewards: u64, - pub penalties: u64, + rewards: u64, + penalties: u64, +} + +impl Delta { + /// Reward the validator with the `reward`. + pub fn reward(&mut self, reward: u64) { + self.rewards += reward; + } + + /// Penalize the validator with the `penalty`. + pub fn penalize(&mut self, penalty: u64) { + self.penalties += penalty; + } } impl std::ops::AddAssign for Delta { @@ -96,7 +109,7 @@ fn get_proposer_deltas( return Err(Error::ValidatorStatusesInconsistent); } - delta.rewards += base_reward / spec.attestation_inclusion_reward_quotient; + delta.reward(base_reward / spec.attestation_inclusion_reward_quotient); } deltas[index] += delta; @@ -166,29 +179,30 @@ fn compute_normal_justification_and_finalization_delta( // Expected FFG source. if validator.is_previous_epoch_attester { - delta.rewards += base_reward * total_attesting_balance / total_balance; + delta.reward(base_reward * total_attesting_balance / total_balance); // Inclusion speed bonus let inclusion = validator .inclusion_info .expect("It is a logic error for an attester not to have an inclusion distance."); - delta.rewards += - base_reward * spec.min_attestation_inclusion_delay / inclusion.distance.as_u64(); + delta.reward( + base_reward * spec.min_attestation_inclusion_delay / inclusion.distance.as_u64(), + ); } else if validator.is_active_in_previous_epoch { - delta.penalties += base_reward; + delta.penalize(base_reward); } // Expected FFG target. if validator.is_previous_epoch_boundary_attester { - delta.rewards += base_reward / boundary_attesting_balance / total_balance; + delta.reward(base_reward / boundary_attesting_balance / total_balance); } else if validator.is_active_in_previous_epoch { - delta.penalties += base_reward; + delta.penalize(base_reward); } // Expected head. if validator.is_previous_epoch_head_attester { - delta.rewards += base_reward * matching_head_balance / total_balance; + delta.reward(base_reward * matching_head_balance / total_balance); } else if validator.is_active_in_previous_epoch { - delta.penalties += base_reward; + delta.penalize(base_reward); }; // Proposer bonus is handled in `apply_proposer_deltas`. @@ -212,24 +226,25 @@ fn compute_inactivity_leak_delta( if validator.is_active_in_previous_epoch { if !validator.is_previous_epoch_attester { - delta.penalties += inactivity_penalty; + delta.penalize(inactivity_penalty); } else { // If a validator did attest, apply a small penalty for getting attestations included // late. let inclusion = validator .inclusion_info .expect("It is a logic error for an attester not to have an inclusion distance."); - delta.rewards += - base_reward * spec.min_attestation_inclusion_delay / inclusion.distance.as_u64(); - delta.penalties += base_reward; + delta.reward( + base_reward * spec.min_attestation_inclusion_delay / inclusion.distance.as_u64(), + ); + delta.penalize(base_reward); } if !validator.is_previous_epoch_boundary_attester { - delta.penalties += inactivity_penalty; + delta.reward(inactivity_penalty); } if !validator.is_previous_epoch_head_attester { - delta.penalties += inactivity_penalty; + delta.penalize(inactivity_penalty); } } @@ -238,7 +253,7 @@ fn compute_inactivity_leak_delta( & validator.is_slashed & !validator.is_withdrawable_in_current_epoch { - delta.penalties += 2 * inactivity_penalty + base_reward; + delta.penalize(2 * inactivity_penalty + base_reward); } delta @@ -264,10 +279,12 @@ fn get_crosslink_deltas( )?; if let Some(ref winning_root) = validator.winning_root_info { - delta.rewards += base_reward * winning_root.total_attesting_balance - / winning_root.total_committee_balance + delta.reward( + base_reward * winning_root.total_attesting_balance + / winning_root.total_committee_balance, + ); } else { - delta.penalties += base_reward; + delta.penalize(base_reward); } deltas[index] += delta; From c0bc45f1f3f35ce66cfa6383745ffb322f7b615b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 18:28:42 +1100 Subject: [PATCH 130/154] Implement node connection validation structure --- beacon_node/network/src/message_handler.rs | 59 ++++++++++++++------- beacon_node/network/src/sync/simple_sync.rs | 42 +++++++++++++-- 2 files changed, 79 insertions(+), 22 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index f1a114db1..17e74fda2 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -32,9 +32,6 @@ pub struct MessageHandler { network_send: crossbeam_channel::Sender, /// A mapping of peers and the RPC id we have sent an RPC request to. requests: HashMap<(PeerId, u64), Instant>, - /// A mapping of HELLO requests we have sent. We drop/ban peers if they do not response - /// within the timeout - hello_requests: HashMap, /// A counter of request id for each peer. request_ids: HashMap, /// The `MessageHandler` logger. @@ -76,7 +73,6 @@ impl MessageHandler { sync, network_send, requests: HashMap::new(), - hello_requests: HashMap::new(), request_ids: HashMap::new(), log: log.clone(), @@ -99,7 +95,8 @@ impl MessageHandler { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { - self.send_hello_request(peer_id); + let id = self.generate_request_id(&peer_id); + self.send_hello(peer_id, id, true); } // we have received an RPC message request/response HandlerMessage::RPC(peer_id, rpc_event) => { @@ -129,28 +126,41 @@ impl MessageHandler { fn handle_rpc_request(&mut self, peer_id: PeerId, id: u64, request: RPCRequest) { match request { RPCRequest::Hello(hello_message) => { - // self.handle_hello_request(peer_id, id, hello_message) + self.handle_hello_request(peer_id, id, hello_message) } } } /// An RPC response has been received from the network. // we match on id and ignore responses past the timeout. - fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) {} - - fn handle_hello_response(&mut self, peer_id: PeerId, id: u64, response: HelloMessage) { - if self.hello_requests.remove(&peer_id).is_none() { - // if response id is not in our list, ignore (likely RPC timeout) + fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) { + // if response id is related to a request, ignore (likely RPC timeout) + if self.requests.remove(&(peer_id, id)).is_none() { return; } + } + fn handle_hello_request(&mut self, peer_id: PeerId, id: u64, hello_message: HelloMessage) { + // send back a HELLO message + self.send_hello(peer_id.clone(), id, false); + // validate the peer + if !self.sync.validate_peer(peer_id.clone(), hello_message) { + debug!( + self.log, + "Peer dropped due to mismatching HELLO messages: {:?}", peer_id + ); + //TODO: block/ban the peer + } + } + + fn handle_hello_response(&mut self, peer_id: PeerId, id: u64, response: HelloMessage) { debug!(self.log, "Hello response received from peer: {:?}", peer_id); // validate peer - decide whether to drop/ban or add to sync // TODO: Peer validation } - /// Sends a HELLO RPC request to a newly connected peer. - fn send_hello_request(&mut self, peer_id: PeerId) { + /// Generates a new request id for a peer. + fn generate_request_id(&mut self, peer_id: &PeerId) -> u64 { // generate a unique id for the peer let id = { let borrowed_id = self.request_ids.entry(peer_id.clone()).or_insert_with(|| 0); @@ -159,18 +169,29 @@ impl MessageHandler { *borrowed_id += 1; id }; - // register RPC Hello request + // register RPC request self.requests.insert((peer_id.clone(), id), Instant::now()); debug!( self.log, "Hello request registered with peer: {:?}", peer_id ); + id + } - // build the rpc request - let rpc_event = RPCEvent::Request { - id, - method_id: RPCMethod::Hello.into(), - body: RPCRequest::Hello(self.sync.generate_hello()), + /// Sends a HELLO RPC request or response to a newly connected peer. + //TODO: The boolean determines if sending request/respond, will be cleaner in the RPC re-write + fn send_hello(&mut self, peer_id: PeerId, id: u64, request: bool) { + let rpc_event = match request { + true => RPCEvent::Request { + id, + method_id: RPCMethod::Hello.into(), + body: RPCRequest::Hello(self.sync.generate_hello()), + }, + false => RPCEvent::Response { + id, + method_id: RPCMethod::Hello.into(), + result: RPCResponse::Hello(self.sync.generate_hello()), + }, }; // send the hello request to the network diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 336f225b2..2a3cc7089 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -32,15 +32,22 @@ pub struct SimpleSync { state: SyncState, /// The network id, for quick HELLO RPC message lookup. network_id: u8, + /// The latest epoch of the syncing chain. + latest_finalized_epoch: Epoch, + /// The latest block of the syncing chain. + latest_block: Hash256, } impl SimpleSync { pub fn new(beacon_chain: Arc) -> Self { + let state = beacon_chain.get_state(); SimpleSync { + chain: beacon_chain.clone(), known_peers: HashMap::new(), state: SyncState::Idle, network_id: beacon_chain.get_spec().network_id, - chain: beacon_chain, + latest_finalized_epoch: state.finalized_epoch, + latest_block: state.finalized_root, //TODO: Build latest block function into Beacon chain and correct this } } @@ -52,8 +59,37 @@ impl SimpleSync { network_id: self.network_id, latest_finalized_root: state.finalized_root.clone(), latest_finalized_epoch: state.finalized_epoch, - best_root: state.latest_block_roots[0], // 0 or len of vec? - best_slot: state.slot, + best_root: state.latest_block_roots[0], //TODO: build correct value as a beacon chain function + best_slot: state.slot - 1, } } + + pub fn validate_peer(&mut self, peer_id: PeerId, hello_message: HelloMessage) -> bool { + // network id must match + if hello_message.network_id != self.network_id { + return false; + } + // compare latest epoch and finalized root to see if they exist in our chain + if hello_message.latest_finalized_epoch <= self.latest_finalized_epoch { + // ensure their finalized root is in our chain + // TODO: Get the finalized root at hello_message.latest_epoch and ensure they match + //if (hello_message.latest_finalized_root == self.chain.get_state() { + // return false; + // } + } + + // the client is valid, add it to our list of known_peers and request sync if required + // update peer list if peer already exists + let peer_info = PeerSyncInfo { + latest_finalized_root: hello_message.latest_finalized_root, + latest_finalized_epoch: hello_message.latest_finalized_epoch, + best_root: hello_message.best_root, + best_slot: hello_message.best_slot, + }; + + self.known_peers.insert(peer_id, peer_info); + //TODO: Start syncing + + true + } } From 35b90728c72dfc043ed4aacba8919014f01a4c08 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 19 Mar 2019 19:27:10 +1100 Subject: [PATCH 131/154] Push more epoch processing fns to 0.5.0 --- .../src/common/verify_bitfield.rs | 2 +- .../src/per_epoch_processing.rs | 122 +++++++------- .../process_validator_registry.rs | 70 -------- .../update_registry_and_shuffling_data.rs | 151 ++++++++++++++++++ .../update_validator_registry.rs | 52 ------ eth2/types/src/beacon_state.rs | 74 +++++---- 6 files changed, 252 insertions(+), 219 deletions(-) delete mode 100644 eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs create mode 100644 eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs delete mode 100644 eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs diff --git a/eth2/state_processing/src/common/verify_bitfield.rs b/eth2/state_processing/src/common/verify_bitfield.rs index 8ff5c96ca..03fcdbb67 100644 --- a/eth2/state_processing/src/common/verify_bitfield.rs +++ b/eth2/state_processing/src/common/verify_bitfield.rs @@ -4,7 +4,7 @@ use types::*; /// /// Is title `verify_bitfield` in spec. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> bool { if bitfield.num_bytes() != ((committee_size + 7) / 8) { return false; diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 24f4a1e1f..b917510f2 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -3,10 +3,10 @@ use errors::EpochProcessingError as Error; use process_ejections::process_ejections; use process_exit_queue::process_exit_queue; use process_slashings::process_slashings; -use process_validator_registry::process_validator_registry; use ssz::TreeHash; use std::collections::HashMap; use types::*; +use update_registry_and_shuffling_data::update_registry_and_shuffling_data; use validator_statuses::{TotalBalances, ValidatorStatuses}; use winning_root::{winning_root, WinningRoot}; @@ -17,9 +17,8 @@ pub mod inclusion_distance; pub mod process_ejections; pub mod process_exit_queue; pub mod process_slashings; -pub mod process_validator_registry; pub mod tests; -pub mod update_validator_registry; +pub mod update_registry_and_shuffling_data; pub mod validator_statuses; pub mod winning_root; @@ -39,30 +38,34 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result state.build_epoch_cache(RelativeEpoch::Previous, spec)?; state.build_epoch_cache(RelativeEpoch::Current, spec)?; - let mut statuses = initialize_validator_statuses(&state, spec)?; + // Load the struct we use to assign validators into sets based on their participation. + // + // E.g., attestation in the previous epoch, attested to the head, etc. + let mut statuses = ValidatorStatuses::new(state, spec)?; + statuses.process_attestations(&state, spec)?; process_eth1_data(state, spec); update_justification_and_finalization(state, &statuses.total_balances, spec)?; - // Crosslinks + // Crosslinks. let winning_root_for_shards = process_crosslinks(state, spec)?; - // Rewards and Penalities + // Rewards and Penalities. apply_rewards(state, &mut statuses, &winning_root_for_shards, spec)?; - // Ejections + // Ejections. process_ejections(state, spec)?; - // Validator Registry - process_validator_registry(state, spec)?; + // Validator Registry. + update_registry_and_shuffling_data(state, statuses.total_balances.current_epoch, spec)?; + + // Slashings and exit queue. process_slashings(state, spec)?; process_exit_queue(state, spec); - // Final updates - update_active_tree_index_roots(state, spec)?; - update_latest_slashed_balances(state, spec)?; - state.previous_epoch_attestations = vec![]; + // Final updates. + finish_epoch_update(state, spec)?; // Rotate the epoch caches to suit the epoch transition. state.advance_caches(); @@ -70,25 +73,6 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result Ok(()) } -/// Calculates various sets of attesters, including: -/// -/// - current epoch attesters -/// - current epoch boundary attesters -/// - previous epoch attesters -/// - etc. -/// -/// Spec v0.5.0 -pub fn initialize_validator_statuses( - state: &BeaconState, - spec: &ChainSpec, -) -> Result { - let mut statuses = ValidatorStatuses::new(state, spec)?; - - statuses.process_attestations(&state, spec)?; - - Ok(statuses) -} - /// Maybe resets the eth1 period. /// /// Spec v0.5.0 @@ -224,41 +208,53 @@ pub fn process_crosslinks( Ok(winning_root_for_shards) } -/// Updates the state's `latest_active_index_roots` field with a tree hash the active validator -/// indices for the next epoch. +/// Finish up an epoch update. /// -/// Spec v0.4.0 -pub fn update_active_tree_index_roots( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), Error> { - let next_epoch = state.next_epoch(spec); - - let active_tree_root = state - .get_active_validator_indices(next_epoch + Epoch::from(spec.activation_exit_delay)) - .to_vec() - .hash_tree_root(); - - state.set_active_index_root(next_epoch, Hash256::from_slice(&active_tree_root[..]), spec)?; - - Ok(()) -} - -/// Advances the state's `latest_slashed_balances` field. -/// -/// Spec v0.4.0 -pub fn update_latest_slashed_balances( - state: &mut BeaconState, - spec: &ChainSpec, -) -> Result<(), Error> { +/// Spec v0.5.0 +pub fn finish_epoch_update(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); let next_epoch = state.next_epoch(spec); - state.set_slashed_balance( - next_epoch, - state.get_slashed_balance(current_epoch, spec)?, - spec, - )?; + // This is a hack to allow us to update index roots and slashed balances for the next epoch. + // + // The indentation here is to make it obvious where the weird stuff happens. + { + state.slot += 1; + + // Set active index root + let active_index_root = Hash256::from_slice( + &state + .get_active_validator_indices(next_epoch + spec.activation_exit_delay) + .hash_tree_root()[..], + ); + state.set_active_index_root(next_epoch, active_index_root, spec)?; + + // Set total slashed balances + state.set_slashed_balance( + next_epoch, + state.get_slashed_balance(current_epoch, spec)?, + spec, + )?; + + // Set randao mix + state.set_randao_mix( + next_epoch, + *state.get_randao_mix(current_epoch, spec)?, + spec, + )?; + + state.slot -= 1; + } + + if next_epoch.as_u64() % (spec.slots_per_historical_root as u64 / spec.slots_per_epoch) == 0 { + let historical_batch: HistoricalBatch = state.historical_batch(); + state + .historical_roots + .push(Hash256::from_slice(&historical_batch.hash_tree_root()[..])); + } + + state.previous_epoch_attestations = state.current_epoch_attestations.clone(); + state.current_epoch_attestations = vec![]; Ok(()) } diff --git a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs deleted file mode 100644 index 85d6c37f6..000000000 --- a/eth2/state_processing/src/per_epoch_processing/process_validator_registry.rs +++ /dev/null @@ -1,70 +0,0 @@ -use super::update_validator_registry::update_validator_registry; -use super::Error; -use types::*; - -/// Peforms a validator registry update, if required. -/// -/// Spec v0.4.0 -pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - let current_epoch = state.current_epoch(spec); - let next_epoch = state.next_epoch(spec); - - state.previous_shuffling_epoch = state.current_shuffling_epoch; - state.previous_shuffling_start_shard = state.current_shuffling_start_shard; - - state.previous_shuffling_seed = state.current_shuffling_seed; - - if should_update_validator_registry(state, spec)? { - update_validator_registry(state, spec)?; - - state.current_shuffling_epoch = next_epoch; - state.current_shuffling_start_shard = (state.current_shuffling_start_shard - + spec.get_epoch_committee_count( - state - .get_cached_active_validator_indices(RelativeEpoch::Current, spec)? - .len(), - ) as u64) - % spec.shard_count; - state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)? - } else { - let epochs_since_last_registry_update = - current_epoch - state.validator_registry_update_epoch; - if (epochs_since_last_registry_update > 1) - & epochs_since_last_registry_update.is_power_of_two() - { - state.current_shuffling_epoch = next_epoch; - state.current_shuffling_seed = - state.generate_seed(state.current_shuffling_epoch, spec)? - } - } - - Ok(()) -} - -/// Returns `true` if the validator registry should be updated during an epoch processing. -/// -/// Spec v0.5.0 -pub fn should_update_validator_registry( - state: &BeaconState, - spec: &ChainSpec, -) -> Result { - if state.finalized_epoch <= state.validator_registry_update_epoch { - return Ok(false); - } - - let num_active_validators = state - .get_cached_active_validator_indices(RelativeEpoch::Current, spec)? - .len(); - let current_epoch_committee_count = spec.get_epoch_committee_count(num_active_validators); - - for shard in (0..current_epoch_committee_count) - .into_iter() - .map(|i| (state.current_shuffling_start_shard + i as u64) % spec.shard_count) - { - if state.latest_crosslinks[shard as usize].epoch <= state.validator_registry_update_epoch { - return Ok(false); - } - } - - Ok(true) -} diff --git a/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs b/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs new file mode 100644 index 000000000..286ad8140 --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs @@ -0,0 +1,151 @@ +use super::super::common::exit_validator; +use super::Error; +use types::*; + +/// Peforms a validator registry update, if required. +/// +/// Spec v0.5.0 +pub fn update_registry_and_shuffling_data( + state: &mut BeaconState, + current_total_balance: u64, + spec: &ChainSpec, +) -> Result<(), Error> { + // First set previous shuffling data to current shuffling data. + state.previous_shuffling_epoch = state.current_shuffling_epoch; + state.previous_shuffling_start_shard = state.previous_shuffling_start_shard; + state.previous_shuffling_seed = state.previous_shuffling_seed; + + let current_epoch = state.current_epoch(spec); + let next_epoch = current_epoch + 1; + + // Check we should update, and if so, update. + if should_update_validator_registry(state, spec)? { + update_validator_registry(state, current_total_balance, spec)?; + + // If we update the registry, update the shuffling data and shards as well. + state.current_shuffling_epoch = next_epoch; + state.current_shuffling_start_shard = { + let active_validators = + state.get_cached_active_validator_indices(RelativeEpoch::Current, spec)?; + let epoch_committee_count = spec.get_epoch_committee_count(active_validators.len()); + + (state.current_shuffling_start_shard + epoch_committee_count) % spec.shard_count + }; + state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)?; + } else { + // If processing at least on crosslink keeps failing, the reshuffle every power of two, but + // don't update the current_shuffling_start_shard. + let epochs_since_last_update = current_epoch - state.validator_registry_update_epoch; + + if epochs_since_last_update > 1 && epochs_since_last_update.is_power_of_two() { + state.current_shuffling_epoch = next_epoch; + state.current_shuffling_seed = + state.generate_seed(state.current_shuffling_epoch, spec)?; + } + } + + Ok(()) +} + +/// Returns `true` if the validator registry should be updated during an epoch processing. +/// +/// Spec v0.5.0 +pub fn should_update_validator_registry( + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + if state.finalized_epoch <= state.validator_registry_update_epoch { + return Ok(false); + } + + let num_active_validators = state + .get_cached_active_validator_indices(RelativeEpoch::Current, spec)? + .len(); + let current_epoch_committee_count = spec.get_epoch_committee_count(num_active_validators); + + for shard in (0..current_epoch_committee_count) + .into_iter() + .map(|i| (state.current_shuffling_start_shard + i as u64) % spec.shard_count) + { + if state.latest_crosslinks[shard as usize].epoch <= state.validator_registry_update_epoch { + return Ok(false); + } + } + + Ok(true) +} + +/// Update validator registry, activating/exiting validators if possible. +/// +/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. +/// +/// Spec v0.5.0 +pub fn update_validator_registry( + state: &mut BeaconState, + current_total_balance: u64, + spec: &ChainSpec, +) -> Result<(), Error> { + let current_epoch = state.current_epoch(spec); + + let max_balance_churn = std::cmp::max( + spec.max_deposit_amount, + current_total_balance / (2 * spec.max_balance_churn_quotient), + ); + + // Activate validators within the allowable balance churn. + let mut balance_churn = 0; + for index in 0..state.validator_registry.len() { + let not_activated = + state.validator_registry[index].activation_epoch == spec.far_future_epoch; + let has_enough_balance = state.validator_balances[index] >= spec.max_deposit_amount; + + if not_activated && has_enough_balance { + // Check the balance churn would be within the allowance. + balance_churn += state.get_effective_balance(index, spec)?; + if balance_churn > max_balance_churn { + break; + } + + activate_validator(state, index, false, spec); + } + } + + // Exit validators within the allowable balance churn. + let mut balance_churn = 0; + for index in 0..state.validator_registry.len() { + let not_exited = state.validator_registry[index].exit_epoch == spec.far_future_epoch; + let has_initiated_exit = state.validator_registry[index].initiated_exit; + + if not_exited && has_initiated_exit { + // Check the balance churn would be within the allowance. + balance_churn += state.get_effective_balance(index, spec)?; + if balance_churn > max_balance_churn { + break; + } + + exit_validator(state, index, spec)?; + } + } + + state.validator_registry_update_epoch = current_epoch; + + Ok(()) +} + +/// Activate the validator of the given ``index``. +/// +/// Spec v0.5.0 +pub fn activate_validator( + state: &mut BeaconState, + validator_index: usize, + is_genesis: bool, + spec: &ChainSpec, +) { + let current_epoch = state.current_epoch(spec); + + state.validator_registry[validator_index].activation_epoch = if is_genesis { + spec.genesis_epoch + } else { + state.get_delayed_activation_exit_epoch(current_epoch, spec) + } +} diff --git a/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs b/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs deleted file mode 100644 index ecf05ce6f..000000000 --- a/eth2/state_processing/src/per_epoch_processing/update_validator_registry.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::common::exit_validator; -use types::{BeaconStateError as Error, *}; - -/// Update validator registry, activating/exiting validators if possible. -/// -/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. -/// -/// Spec v0.4.0 -pub fn update_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - let current_epoch = state.current_epoch(spec); - let active_validator_indices = - state.get_cached_active_validator_indices(RelativeEpoch::Current, spec)?; - let total_balance = state.get_total_balance(&active_validator_indices[..], spec)?; - - let max_balance_churn = std::cmp::max( - spec.max_deposit_amount, - total_balance / (2 * spec.max_balance_churn_quotient), - ); - - let mut balance_churn = 0; - for index in 0..state.validator_registry.len() { - let validator = &state.validator_registry[index]; - - if (validator.activation_epoch == spec.far_future_epoch) - & (state.validator_balances[index] == spec.max_deposit_amount) - { - balance_churn += state.get_effective_balance(index, spec)?; - if balance_churn > max_balance_churn { - break; - } - state.activate_validator(index, false, spec); - } - } - - let mut balance_churn = 0; - for index in 0..state.validator_registry.len() { - let validator = &state.validator_registry[index]; - - if (validator.exit_epoch == spec.far_future_epoch) & (validator.initiated_exit) { - balance_churn += state.get_effective_balance(index, spec)?; - if balance_churn > max_balance_churn { - break; - } - - exit_validator(state, index, spec)?; - } - } - - state.validator_registry_update_epoch = current_epoch; - - Ok(()) -} diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 22e7c6ecf..1a165c9a9 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -193,6 +193,13 @@ impl BeaconState { Hash256::from_slice(&self.hash_tree_root()[..]) } + pub fn historical_batch(&self) -> HistoricalBatch { + HistoricalBatch { + block_roots: self.latest_block_roots.clone(), + state_roots: self.latest_state_roots.clone(), + } + } + /// If a validator pubkey exists in the validator registry, returns `Some(i)`, otherwise /// returns `None`. /// @@ -382,6 +389,26 @@ impl BeaconState { Ok(self.latest_block_roots[i] = block_root) } + /// Safely obtains the index for `latest_randao_mixes` + /// + /// Spec v0.5.0 + fn get_randao_mix_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + let current_epoch = self.current_epoch(spec); + + if (current_epoch - (spec.latest_randao_mixes_length as u64) < epoch) + & (epoch <= current_epoch) + { + let i = epoch.as_usize() % spec.latest_randao_mixes_length; + if i < self.latest_randao_mixes.len() { + Ok(i) + } else { + Err(Error::InsufficientRandaoMixes) + } + } else { + Err(Error::EpochOutOfBounds) + } + } + /// XOR-assigns the existing `epoch` randao mix with the hash of the `signature`. /// /// # Errors: @@ -406,24 +433,23 @@ impl BeaconState { /// Return the randao mix at a recent ``epoch``. /// - /// # Errors: - /// - `InsufficientRandaoMixes` if `self.latest_randao_mixes` is shorter than - /// `spec.latest_randao_mixes_length`. - /// - `EpochOutOfBounds` if the state no longer stores randao mixes for the given `epoch`. - /// /// Spec v0.5.0 pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Result<&Hash256, Error> { - let current_epoch = self.current_epoch(spec); + let i = self.get_randao_mix_index(epoch, spec)?; + Ok(&self.latest_randao_mixes[i]) + } - if (current_epoch - (spec.latest_randao_mixes_length as u64) < epoch) - & (epoch <= current_epoch) - { - self.latest_randao_mixes - .get(epoch.as_usize() % spec.latest_randao_mixes_length) - .ok_or_else(|| Error::InsufficientRandaoMixes) - } else { - Err(Error::EpochOutOfBounds) - } + /// Set the randao mix at a recent ``epoch``. + /// + /// Spec v0.5.0 + pub fn set_randao_mix( + &mut self, + epoch: Epoch, + mix: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = self.get_randao_mix_index(epoch, spec)?; + Ok(self.latest_randao_mixes[i] = mix) } /// Safely obtains the index for `latest_active_index_roots`, given some `epoch`. @@ -588,24 +614,6 @@ impl BeaconState { epoch + 1 + spec.activation_exit_delay } - /// Activate the validator of the given ``index``. - /// - /// Spec v0.5.0 - pub fn activate_validator( - &mut self, - validator_index: usize, - is_genesis: bool, - spec: &ChainSpec, - ) { - let current_epoch = self.current_epoch(spec); - - self.validator_registry[validator_index].activation_epoch = if is_genesis { - spec.genesis_epoch - } else { - self.get_delayed_activation_exit_epoch(current_epoch, spec) - } - } - /// Initiate an exit for the validator of the given `index`. /// /// Spec v0.5.0 From baca2c90abacfc8fafcaf3bfa590242a0ca8f084 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 19 Mar 2019 19:43:31 +1100 Subject: [PATCH 132/154] Add last of 0.5.0 upgrades. Woo! --- .../src/per_epoch_processing.rs | 31 ++++++++++------ .../process_exit_queue.rs | 4 +-- .../per_epoch_processing/process_slashings.rs | 36 +++++++++---------- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index b917510f2..fcdc668f4 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -32,7 +32,7 @@ pub type WinningRootHashSet = HashMap; /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is /// returned, a state might be "half-processed" and therefore in an invalid state. /// -/// Spec v0.4.0 +/// Spec v0.5.0 pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { // Ensure the previous and next epoch caches are built. state.build_epoch_cache(RelativeEpoch::Previous, spec)?; @@ -41,27 +41,38 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result // Load the struct we use to assign validators into sets based on their participation. // // E.g., attestation in the previous epoch, attested to the head, etc. - let mut statuses = ValidatorStatuses::new(state, spec)?; - statuses.process_attestations(&state, spec)?; + let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(&state, spec)?; - process_eth1_data(state, spec); - - update_justification_and_finalization(state, &statuses.total_balances, spec)?; + // Justification. + update_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; // Crosslinks. let winning_root_for_shards = process_crosslinks(state, spec)?; + // Eth1 data. + maybe_reset_eth1_period(state, spec); + // Rewards and Penalities. - apply_rewards(state, &mut statuses, &winning_root_for_shards, spec)?; + apply_rewards( + state, + &mut validator_statuses, + &winning_root_for_shards, + spec, + )?; // Ejections. process_ejections(state, spec)?; // Validator Registry. - update_registry_and_shuffling_data(state, statuses.total_balances.current_epoch, spec)?; + update_registry_and_shuffling_data( + state, + validator_statuses.total_balances.current_epoch, + spec, + )?; // Slashings and exit queue. - process_slashings(state, spec)?; + process_slashings(state, validator_statuses.total_balances.current_epoch, spec)?; process_exit_queue(state, spec); // Final updates. @@ -76,7 +87,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result /// Maybe resets the eth1 period. /// /// Spec v0.5.0 -pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { +pub fn maybe_reset_eth1_period(state: &mut BeaconState, spec: &ChainSpec) { let next_epoch = state.next_epoch(spec); let voting_period = spec.epochs_per_eth1_voting_period; diff --git a/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs b/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs index f672c97be..074db1d08 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs @@ -21,8 +21,8 @@ pub fn process_exit_queue(state: &mut BeaconState, spec: &ChainSpec) { .collect(); eligable_indices.sort_by_key(|i| state.validator_registry[*i].exit_epoch); - for (withdrawn_so_far, index) in eligable_indices.iter().enumerate() { - if withdrawn_so_far as u64 >= spec.max_exit_dequeues_per_epoch { + for (dequeues, index) in eligable_indices.iter().enumerate() { + if dequeues as u64 >= spec.max_exit_dequeues_per_epoch { break; } prepare_validator_for_withdrawal(state, *index, spec); diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index 19c1e519b..88777472c 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -2,34 +2,32 @@ use types::{BeaconStateError as Error, *}; /// Process slashings. /// -/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. -/// -/// Spec v0.4.0 -pub fn process_slashings(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { +/// Spec v0.5.0 +pub fn process_slashings( + state: &mut BeaconState, + current_total_balance: u64, + spec: &ChainSpec, +) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); - let active_validator_indices = - state.get_cached_active_validator_indices(RelativeEpoch::Current, spec)?; - let total_balance = state.get_total_balance(&active_validator_indices[..], spec)?; + + let total_at_start = state.get_slashed_balance(current_epoch + 1, spec)?; + let total_at_end = state.get_slashed_balance(current_epoch, spec)?; + let total_penalities = total_at_end - total_at_start; for (index, validator) in state.validator_registry.iter().enumerate() { - if validator.slashed - && (current_epoch - == validator.withdrawable_epoch - Epoch::from(spec.latest_slashed_exit_length / 2)) - { - // TODO: check the following two lines are correct. - let total_at_start = state.get_slashed_balance(current_epoch + 1, spec)?; - let total_at_end = state.get_slashed_balance(current_epoch, spec)?; - - let total_penalities = total_at_end.saturating_sub(total_at_start); + let should_penalize = current_epoch.as_usize() + == validator.withdrawable_epoch.as_usize() - spec.latest_slashed_exit_length / 2; + if validator.slashed && should_penalize { let effective_balance = state.get_effective_balance(index, spec)?; + let penalty = std::cmp::max( - effective_balance * std::cmp::min(total_penalities * 3, total_balance) - / total_balance, + effective_balance * std::cmp::min(total_penalities * 3, current_total_balance) + / current_total_balance, effective_balance / spec.min_penalty_quotient, ); - safe_sub_assign!(state.validator_balances[index], penalty); + state.validator_balances[index] -= penalty; } } From 8f23aefb29f997e46de59ce8b571196b203c3ef6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 19 Mar 2019 19:55:17 +1100 Subject: [PATCH 133/154] Adds comments to new epoch cache fns. --- eth2/types/src/beacon_state/epoch_cache.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index ca8bcc70e..6eebf1da3 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -107,6 +107,7 @@ impl EpochCache { }) } + /// Return a vec of `CrosslinkCommittee` for a given slot. pub fn get_crosslink_committees_at_slot( &self, slot: Slot, @@ -116,6 +117,8 @@ impl EpochCache { .get_crosslink_committees_at_slot(slot, spec) } + /// Return `Some(CrosslinkCommittee)` if the given shard has a committee during the given + /// `epoch`. pub fn get_crosslink_committee_for_shard( &self, shard: Shard, @@ -131,6 +134,10 @@ impl EpochCache { } } +/// Returns a list of all `validator_registry` indices where the validator is active at the given +/// `epoch`. +/// +/// Spec v0.5.0 pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { let mut active = Vec::with_capacity(validators.len()); @@ -145,13 +152,17 @@ pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> V active } +/// Contains all `CrosslinkCommittees` for an epoch. #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] pub struct EpochCrosslinkCommittees { + /// The epoch the committees are present in. epoch: Epoch, + /// Each commitee for each slot of the epoch. pub crosslink_committees: Vec>, } impl EpochCrosslinkCommittees { + /// Return a new instances where all slots have zero committees. fn new(epoch: Epoch, spec: &ChainSpec) -> Self { Self { epoch, @@ -159,6 +170,7 @@ impl EpochCrosslinkCommittees { } } + /// Return a vec of `CrosslinkCommittee` for a given slot. fn get_crosslink_committees_at_slot( &self, slot: Slot, @@ -176,6 +188,7 @@ impl EpochCrosslinkCommittees { } } +/// Builds an `EpochCrosslinkCommittees` object. pub struct EpochCrosslinkCommitteesBuilder { epoch: Epoch, shuffling_start_shard: Shard, @@ -185,6 +198,7 @@ pub struct EpochCrosslinkCommitteesBuilder { } impl EpochCrosslinkCommitteesBuilder { + /// Instantiates a builder that will build for the `state`'s previous epoch. pub fn for_previous_epoch( state: &BeaconState, active_validator_indices: Vec, @@ -199,6 +213,7 @@ impl EpochCrosslinkCommitteesBuilder { } } + /// Instantiates a builder that will build for the `state`'s next epoch. pub fn for_current_epoch( state: &BeaconState, active_validator_indices: Vec, @@ -213,6 +228,10 @@ impl EpochCrosslinkCommitteesBuilder { } } + /// Instantiates a builder that will build for the `state`'s next epoch. + /// + /// Note: there are two possible epoch builds for the next epoch, one where there is a registry + /// change and one where there is not. pub fn for_next_epoch( state: &BeaconState, active_validator_indices: Vec, @@ -257,6 +276,7 @@ impl EpochCrosslinkCommitteesBuilder { }) } + /// Consumes the builder, returning a fully-build `EpochCrosslinkCommittee`. pub fn build(self, spec: &ChainSpec) -> Result { // The shuffler fails on a empty list, so if there are no active validator indices, simply // return an empty list. From 6e10ce93d4e3a69cf61e6128f654a2b85ee28818 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 21:44:52 +1100 Subject: [PATCH 134/154] Tidy message handler --- beacon_node/network/src/message_handler.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 17e74fda2..3b95c1263 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -91,6 +91,7 @@ impl MessageHandler { Ok(handler_send) } + /// Handle all messages incoming from the network service. fn handle_message(&mut self, message: HandlerMessage) { match message { // we have initiated a connection to a peer @@ -107,6 +108,9 @@ impl MessageHandler { } } + /* RPC - Related functionality */ + + /// Handle RPC messages fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { match rpc_message { RPCEvent::Request { @@ -140,6 +144,7 @@ impl MessageHandler { } } + /// Handle a HELLO RPC request message. fn handle_hello_request(&mut self, peer_id: PeerId, id: u64, hello_message: HelloMessage) { // send back a HELLO message self.send_hello(peer_id.clone(), id, false); @@ -153,12 +158,15 @@ impl MessageHandler { } } + /// Handle a HELLO RPC response message. fn handle_hello_response(&mut self, peer_id: PeerId, id: u64, response: HelloMessage) { debug!(self.log, "Hello response received from peer: {:?}", peer_id); // validate peer - decide whether to drop/ban or add to sync // TODO: Peer validation } + /* General RPC helper functions */ + /// Generates a new request id for a peer. fn generate_request_id(&mut self, peer_id: &PeerId) -> u64 { // generate a unique id for the peer From b30d72501cfb57f4937b72ff4a8c1826a3e27a79 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 21:55:57 +1100 Subject: [PATCH 135/154] Add logger to sync module --- beacon_node/network/src/message_handler.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 3b95c1263..bf0df1aa6 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -65,7 +65,7 @@ impl MessageHandler { // Initialise sync and begin processing in thread // generate the Message handler - let sync = SimpleSync::new(beacon_chain.clone()); + let sync = SimpleSync::new(beacon_chain.clone(), &log); let mut handler = MessageHandler { // TODO: The handler may not need a chain, perhaps only sync? diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 2a3cc7089..50f6f4a50 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,6 +1,7 @@ use crate::beacon_chain::BeaconChain; use libp2p::rpc::HelloMessage; use libp2p::PeerId; +use slog::{debug, o}; use std::collections::HashMap; use std::sync::Arc; use types::{Epoch, Hash256, Slot}; @@ -36,11 +37,14 @@ pub struct SimpleSync { latest_finalized_epoch: Epoch, /// The latest block of the syncing chain. latest_block: Hash256, + /// Sync logger. + log: slog::Logger, } impl SimpleSync { - pub fn new(beacon_chain: Arc) -> Self { + pub fn new(beacon_chain: Arc, log: &slog::Logger) -> Self { let state = beacon_chain.get_state(); + let sync_logger = log.new(o!("Service"=> "Sync")); SimpleSync { chain: beacon_chain.clone(), known_peers: HashMap::new(), @@ -48,6 +52,7 @@ impl SimpleSync { network_id: beacon_chain.get_spec().network_id, latest_finalized_epoch: state.finalized_epoch, latest_block: state.finalized_root, //TODO: Build latest block function into Beacon chain and correct this + log: sync_logger, } } @@ -87,7 +92,9 @@ impl SimpleSync { best_slot: hello_message.best_slot, }; + debug!(self.log, "Handshake successful. Peer: {:?}", peer_id); self.known_peers.insert(peer_id, peer_info); + //TODO: Start syncing true From 0a8b0069dc3ef7efad6dd542fd1b706305a45087 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 22:18:01 +1100 Subject: [PATCH 136/154] Add peer validation and successful handshake --- beacon_node/network/src/message_handler.rs | 28 +++++++++++++++------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index bf0df1aa6..1b9dc3369 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -139,9 +139,20 @@ impl MessageHandler { // we match on id and ignore responses past the timeout. fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) { // if response id is related to a request, ignore (likely RPC timeout) - if self.requests.remove(&(peer_id, id)).is_none() { + if self + .requests + .remove(&(peer_id.clone(), id.clone())) + .is_none() + { + debug!(self.log, "Unrecognized response from peer: {:?}", peer_id); return; } + match response { + RPCResponse::Hello(hello_message) => { + debug!(self.log, "Hello response received from peer: {:?}", peer_id); + self.validate_hello(peer_id, hello_message); + } + } } /// Handle a HELLO RPC request message. @@ -149,7 +160,13 @@ impl MessageHandler { // send back a HELLO message self.send_hello(peer_id.clone(), id, false); // validate the peer - if !self.sync.validate_peer(peer_id.clone(), hello_message) { + self.validate_hello(peer_id, hello_message); + } + + /// Validate a HELLO RPC message. + fn validate_hello(&mut self, peer_id: PeerId, message: HelloMessage) { + // validate the peer + if !self.sync.validate_peer(peer_id.clone(), message) { debug!( self.log, "Peer dropped due to mismatching HELLO messages: {:?}", peer_id @@ -158,13 +175,6 @@ impl MessageHandler { } } - /// Handle a HELLO RPC response message. - fn handle_hello_response(&mut self, peer_id: PeerId, id: u64, response: HelloMessage) { - debug!(self.log, "Hello response received from peer: {:?}", peer_id); - // validate peer - decide whether to drop/ban or add to sync - // TODO: Peer validation - } - /* General RPC helper functions */ /// Generates a new request id for a peer. From dc014d07bc03953b5426bf3eb94809d1d4f0a1b4 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 22:32:56 +1100 Subject: [PATCH 137/154] Enable syncing state when new peer connects --- beacon_node/network/src/service.rs | 5 +++-- beacon_node/network/src/sync/simple_sync.rs | 16 +++++++++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 84e46e707..a62408c0a 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -110,9 +110,10 @@ fn network_service( loop { match libp2p_service.poll() { Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, rpc_event)))) => { - debug!( + trace!( libp2p_service.log, - "RPC Event: RPC message received: {:?}", rpc_event + "RPC Event: RPC message received: {:?}", + rpc_event ); message_handler_send .send(HandlerMessage::RPC(peer_id, rpc_event)) diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 50f6f4a50..a3cd9044c 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -6,6 +6,9 @@ use std::collections::HashMap; use std::sync::Arc; use types::{Epoch, Hash256, Slot}; +/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. +const SLOT_IMPORT_TOLERANCE: u64 = 100; + /// Keeps track of syncing information for known connected peers. pub struct PeerSyncInfo { latest_finalized_root: Hash256, @@ -15,6 +18,7 @@ pub struct PeerSyncInfo { } /// The current syncing state. +#[derive(PartialEq)] pub enum SyncState { Idle, Downloading, @@ -36,7 +40,7 @@ pub struct SimpleSync { /// The latest epoch of the syncing chain. latest_finalized_epoch: Epoch, /// The latest block of the syncing chain. - latest_block: Hash256, + latest_slot: Slot, /// Sync logger. log: slog::Logger, } @@ -51,7 +55,7 @@ impl SimpleSync { state: SyncState::Idle, network_id: beacon_chain.get_spec().network_id, latest_finalized_epoch: state.finalized_epoch, - latest_block: state.finalized_root, //TODO: Build latest block function into Beacon chain and correct this + latest_slot: state.slot - 1, //TODO: Build latest block function into Beacon chain and correct this log: sync_logger, } } @@ -95,7 +99,13 @@ impl SimpleSync { debug!(self.log, "Handshake successful. Peer: {:?}", peer_id); self.known_peers.insert(peer_id, peer_info); - //TODO: Start syncing + // set state to sync + if self.state == SyncState::Idle + && hello_message.best_slot > self.latest_slot + SLOT_IMPORT_TOLERANCE + { + self.state = SyncState::Downloading; + //TODO: Start requesting blocks from known peers. Ideally in batches + } true } From e7f87112fb27b84f4850b4586686a9d492d73a7a Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 22:53:51 +1100 Subject: [PATCH 138/154] Tidy networking crates --- beacon_node/client/src/client_types.rs | 2 +- beacon_node/client/src/lib.rs | 4 ++-- beacon_node/client/src/notifier.rs | 7 ++----- beacon_node/network/src/lib.rs | 2 -- beacon_node/network/src/message_handler.rs | 5 ----- beacon_node/network/src/messages.rs | 15 --------------- beacon_node/network/src/service.rs | 18 +++++++++--------- beacon_node/src/main.rs | 7 +++++-- beacon_node/src/run.rs | 11 +++++++---- 9 files changed, 26 insertions(+), 45 deletions(-) delete mode 100644 beacon_node/network/src/messages.rs diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs index de0678fe7..f5abc77ce 100644 --- a/beacon_node/client/src/client_types.rs +++ b/beacon_node/client/src/client_types.rs @@ -3,7 +3,7 @@ use beacon_chain::{ db::{ClientDB, DiskDB, MemoryDB}, fork_choice::BitwiseLMDGhost, initialise, - slot_clock::{SlotClock, SystemTimeSlotClock, TestingSlotClock}, + slot_clock::{SlotClock, SystemTimeSlotClock}, BeaconChain, }; use fork_choice::ForkChoice; diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index f3178eaa6..327e433af 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -11,7 +11,7 @@ pub use client_types::ClientTypes; //use beacon_chain::BeaconChain; use beacon_chain::BeaconChain; -use exit_future::{Exit, Signal}; +use exit_future::Signal; use network::Service as NetworkService; use slog::o; use std::marker::PhantomData; @@ -55,7 +55,7 @@ impl Client { // TODO: Add beacon_chain reference to network parameters let network_config = &config.net_conf; let network_logger = log.new(o!("Service" => "Network")); - let (network, network_send) = NetworkService::new( + let (network, _network_send) = NetworkService::new( beacon_chain.clone(), network_config, executor, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 6b52e670a..335183c7d 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,12 +1,8 @@ use crate::Client; use crate::ClientTypes; -use db::ClientDB; use exit_future::Exit; -use fork_choice::ForkChoice; use futures::{Future, Stream}; -use network::NodeMessage; use slog::{debug, info, o}; -use slot_clock::SlotClock; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; @@ -27,12 +23,13 @@ pub fn run(client: &Client, executor: TaskExecutor, exit: Exi // build heartbeat logic here let heartbeat = move |_| { info!(log, "Temp heartbeat output"); + //TODO: Remove this logic. Testing only let mut count = counter.lock().unwrap(); *count += 1; if *count % 5 == 0 { debug!(log, "Sending Message"); - network.send_message(String::from("Testing network channel")) + network.send_message(); } Ok(()) diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index c1840f592..1e47b9a73 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -2,10 +2,8 @@ pub mod beacon_chain; pub mod error; mod message_handler; -mod messages; mod service; pub mod sync; pub use libp2p::NetworkConfig; -pub use messages::NodeMessage; pub use service::Service; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 1b9dc3369..bcea28ff8 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,11 +1,9 @@ use crate::beacon_chain::BeaconChain; use crate::error; -use crate::messages::NodeMessage; use crate::service::{NetworkMessage, OutgoingMessage}; use crate::sync::SimpleSync; use crossbeam_channel::{unbounded as channel, Sender}; use futures::future; -use futures::prelude::*; use libp2p::{ rpc::{RPCMethod, RPCRequest, RPCResponse}, HelloMessage, PeerId, RPCEvent, @@ -15,7 +13,6 @@ use slog::{debug, trace}; use std::collections::HashMap; use std::sync::Arc; use std::time::{Duration, Instant}; -use types::Hash256; /// Timeout for RPC requests. const REQUEST_TIMEOUT: Duration = Duration::from_secs(30); @@ -45,8 +42,6 @@ pub enum HandlerMessage { PeerDialed(PeerId), /// Peer has disconnected, PeerDisconnected(PeerId), - /// A Node message has been received. - Message(PeerId, NodeMessage), /// An RPC response/request has been received. RPC(PeerId, RPCEvent), } diff --git a/beacon_node/network/src/messages.rs b/beacon_node/network/src/messages.rs deleted file mode 100644 index 6a69cbb87..000000000 --- a/beacon_node/network/src/messages.rs +++ /dev/null @@ -1,15 +0,0 @@ -use libp2p::PeerId; -use libp2p::{HelloMessage, RPCEvent}; -use types::{Hash256, Slot}; - -//TODO: This module can be entirely replaced in the RPC rewrite - -/// Messages between nodes across the network. -//TODO: Remove this in the RPC rewrite -#[derive(Debug, Clone)] -pub enum NodeMessage { - RPC(RPCEvent), - BlockRequest, - // TODO: only for testing - remove - Message(String), -} diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a62408c0a..4e79a92fe 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,7 +1,6 @@ use crate::beacon_chain::BeaconChain; use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; -use crate::messages::NodeMessage; use crate::NetworkConfig; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use futures::prelude::*; @@ -53,21 +52,22 @@ impl Service { executor, log, )?; - let network = Service { + let network_service = Service { libp2p_exit, network_send: network_send.clone(), }; - Ok((Arc::new(network), network_send)) + Ok((Arc::new(network_service), network_send)) } // TODO: Testing only - pub fn send_message(&self, message: String) { - let node_message = NodeMessage::Message(message); - self.network_send.send(NetworkMessage::Send( - PeerId::random(), - OutgoingMessage::NotifierTest, - )); + pub fn send_message(&self) { + self.network_send + .send(NetworkMessage::Send( + PeerId::random(), + OutgoingMessage::NotifierTest, + )) + .unwrap(); } } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 09cac99b4..9be6136c5 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -4,7 +4,7 @@ mod run; use clap::{App, Arg}; use client::ClientConfig; -use slog::{o, Drain}; +use slog::{error, o, Drain}; fn main() { let decorator = slog_term::TermDecorator::new().build(); @@ -42,5 +42,8 @@ fn main() { // invalid arguments, panic let config = ClientConfig::parse_args(matches, &logger).unwrap(); - run::run_beacon_node(config, logger); + match run::run_beacon_node(config, &logger) { + Ok(_) => {} + Err(e) => error!(logger, "Beacon node failed because {:?}", e), + } } diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index b7cbf5421..b3b284452 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,4 +1,4 @@ -use client::client_types::{StandardClientType, TestingClientType}; +use client::client_types::TestingClientType; use client::error; use client::{notifier, Client, ClientConfig}; use futures::sync::oneshot; @@ -7,7 +7,7 @@ use slog::info; use std::cell::RefCell; use tokio::runtime::Builder; -pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result<()> { +pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Result<()> { let mut runtime = Builder::new() .name_prefix("main-") .build() @@ -25,7 +25,8 @@ pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { ctrlc_send.send(()).expect("Error sending ctrl-c message"); } - }); + }) + .map_err(|e| format!("Could not set ctrlc hander: {:?}", e))?; let (exit_signal, exit) = exit_future::signal(); @@ -35,7 +36,9 @@ pub fn run_beacon_node(config: ClientConfig, log: slog::Logger) -> error::Result let client: Client = Client::new(config, log.clone(), &executor)?; notifier::run(&client, executor, exit); - runtime.block_on(ctrlc); + runtime + .block_on(ctrlc) + .map_err(|e| format!("Ctrlc oneshot failed: {:?}", e))?; // perform global shutdown operations. info!(log, "Shutting down.."); From 4b57d32b60b6a3e3e4648e54bf8f57697c4dd737 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 23:20:39 +1100 Subject: [PATCH 139/154] Apply clippy suggestions --- beacon_node/client/src/lib.rs | 4 +-- beacon_node/libp2p/src/rpc/mod.rs | 4 +-- beacon_node/libp2p/src/rpc/protocol.rs | 8 +++--- beacon_node/libp2p/src/service.rs | 13 ++++----- beacon_node/network/src/message_handler.rs | 32 +++++++-------------- beacon_node/network/src/service.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/version/src/lib.rs | 2 +- eth2/fork_choice/src/slow_lmd_ghost.rs | 6 ++-- 9 files changed, 30 insertions(+), 43 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 327e433af..beba6f4de 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -66,9 +66,9 @@ impl Client { config, beacon_chain, exit, - exit_signal: exit_signal, + exit_signal, log, - network: network, + network, phantom: PhantomData, }) } diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/libp2p/src/rpc/mod.rs index e06f4effc..a1cfadafe 100644 --- a/beacon_node/libp2p/src/rpc/mod.rs +++ b/beacon_node/libp2p/src/rpc/mod.rs @@ -13,7 +13,7 @@ use libp2p::core::swarm::{ use libp2p::{Multiaddr, PeerId}; pub use methods::{HelloMessage, RPCMethod, RPCRequest, RPCResponse}; pub use protocol::{RPCEvent, RPCProtocol}; -use slog::{debug, o}; +use slog::o; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; @@ -65,7 +65,7 @@ where fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { // if initialised the connection, report this upwards to send the HELLO request - if let ConnectedPoint::Dialer { address } = connected_point { + if let ConnectedPoint::Dialer { address: _ } = connected_point { self.events.push(NetworkBehaviourAction::GenerateEvent( RPCMessage::PeerDialed(peer_id), )); diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/libp2p/src/rpc/protocol.rs index dce714429..6cebb7fd2 100644 --- a/beacon_node/libp2p/src/rpc/protocol.rs +++ b/beacon_node/libp2p/src/rpc/protocol.rs @@ -84,11 +84,11 @@ fn decode(packet: Vec) -> Result { RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), }; - return Ok(RPCEvent::Request { + Ok(RPCEvent::Request { id, method_id, body, - }); + }) } // we have received a response else { @@ -99,11 +99,11 @@ fn decode(packet: Vec) -> Result { } RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), }; - return Ok(RPCEvent::Response { + Ok(RPCEvent::Response { id, method_id, result, - }); + }) } } diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/libp2p/src/service.rs index 92e6e8897..e378cd634 100644 --- a/beacon_node/libp2p/src/service.rs +++ b/beacon_node/libp2p/src/service.rs @@ -73,13 +73,12 @@ impl Service { let mut subscribed_topics = vec![]; for topic in config.topics { let t = TopicBuilder::new(topic.to_string()).build(); - match swarm.subscribe(t) { - true => { - trace!(log, "Subscribed to topic: {:?}", topic); - subscribed_topics.push(topic); - } - false => warn!(log, "Could not subscribe to topic: {:?}", topic), - }; + if swarm.subscribe(t) { + trace!(log, "Subscribed to topic: {:?}", topic); + subscribed_topics.push(topic); + } else { + warn!(log, "Could not subscribe to topic: {:?}", topic) + } } info!(log, "Subscribed to topics: {:?}", subscribed_topics); diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index bcea28ff8..2a3f38bc1 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -48,7 +48,7 @@ pub enum HandlerMessage { impl MessageHandler { /// Initializes and runs the MessageHandler. - pub fn new( + pub fn spawn( beacon_chain: Arc, network_send: crossbeam_channel::Sender, executor: &tokio::runtime::TaskExecutor, @@ -108,16 +108,9 @@ impl MessageHandler { /// Handle RPC messages fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { match rpc_message { - RPCEvent::Request { - id, - method_id: _, // TODO: Clean up RPC Message types, have a cleaner type by this point. - body, + RPCEvent::Request { id, body, .. // TODO: Clean up RPC Message types, have a cleaner type by this point. } => self.handle_rpc_request(peer_id, id, body), - RPCEvent::Response { - id, - method_id: _, - result, - } => self.handle_rpc_response(peer_id, id, result), + RPCEvent::Response { id, result, .. } => self.handle_rpc_response(peer_id, id, result), } } @@ -134,11 +127,7 @@ impl MessageHandler { // we match on id and ignore responses past the timeout. fn handle_rpc_response(&mut self, peer_id: PeerId, id: u64, response: RPCResponse) { // if response id is related to a request, ignore (likely RPC timeout) - if self - .requests - .remove(&(peer_id.clone(), id.clone())) - .is_none() - { + if self.requests.remove(&(peer_id.clone(), id)).is_none() { debug!(self.log, "Unrecognized response from peer: {:?}", peer_id); return; } @@ -193,18 +182,19 @@ impl MessageHandler { /// Sends a HELLO RPC request or response to a newly connected peer. //TODO: The boolean determines if sending request/respond, will be cleaner in the RPC re-write - fn send_hello(&mut self, peer_id: PeerId, id: u64, request: bool) { - let rpc_event = match request { - true => RPCEvent::Request { + fn send_hello(&mut self, peer_id: PeerId, id: u64, is_request: bool) { + let rpc_event = if is_request { + RPCEvent::Request { id, method_id: RPCMethod::Hello.into(), body: RPCRequest::Hello(self.sync.generate_hello()), - }, - false => RPCEvent::Response { + } + } else { + RPCEvent::Response { id, method_id: RPCMethod::Hello.into(), result: RPCResponse::Hello(self.sync.generate_hello()), - }, + } }; // send the hello request to the network diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4e79a92fe..c3045d280 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -33,7 +33,7 @@ impl Service { let (network_send, network_recv) = channel::(); // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = MessageHandler::new( + let message_handler_send = MessageHandler::spawn( beacon_chain, network_send.clone(), executor, diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index a3cd9044c..95c7092c3 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -66,7 +66,7 @@ impl SimpleSync { //TODO: Paul to verify the logic of these fields. HelloMessage { network_id: self.network_id, - latest_finalized_root: state.finalized_root.clone(), + latest_finalized_root: state.finalized_root, latest_finalized_epoch: state.finalized_epoch, best_root: state.latest_block_roots[0], //TODO: build correct value as a beacon chain function best_slot: state.slot - 1, diff --git a/beacon_node/version/src/lib.rs b/beacon_node/version/src/lib.rs index 628186aa0..3dcd57bef 100644 --- a/beacon_node/version/src/lib.rs +++ b/beacon_node/version/src/lib.rs @@ -6,7 +6,7 @@ extern crate target_info; use target_info::Target; -const TRACK: &'static str = "unstable"; +const TRACK: &str = "unstable"; /// Provides the current platform pub fn platform() -> String { diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index af58aa7b8..0788ac171 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -219,10 +219,8 @@ impl ForkChoice for SlowLMDGhost { head_vote_count = vote_count; } // resolve ties - choose smaller hash - else if vote_count == head_vote_count { - if *child_hash < head_hash { - head_hash = *child_hash; - } + else if vote_count == head_vote_count && *child_hash < head_hash { + head_hash = *child_hash; } } } From d2f12b7c1805c52b5e3850174638efc603a568b9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 19 Mar 2019 23:47:58 +1100 Subject: [PATCH 140/154] Add standard RPC service --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/client_config.rs | 3 ++- beacon_node/client/src/lib.rs | 7 ++++--- beacon_node/rpc/src/lib.rs | 12 +++++++----- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 11453e4b8..12c1b5c80 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } db = { path = "../db" } +rpc = { path = "../rpc" } fork_choice = { path = "../../eth2/fork_choice" } types = { path = "../../eth2/types" } slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 4fe390cb1..570bd30e4 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -20,7 +20,7 @@ pub struct ClientConfig { pub fork_choice: ForkChoiceAlgorithm, pub db_type: DBType, pub db_name: PathBuf, - //pub rpc_conf: + pub rpc_conf: rpc::RPCConfig, //pub ipc_conf: } @@ -48,6 +48,7 @@ impl Default for ClientConfig { db_type: DBType::Memory, // default db name for disk-based dbs db_name: data_dir.join("chain.db"), + rpc_conf: rpc::RPCConfig::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index beba6f4de..e6d08ac54 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -6,11 +6,9 @@ pub mod client_types; pub mod error; pub mod notifier; +use beacon_chain::BeaconChain; pub use client_config::ClientConfig; pub use client_types::ClientTypes; - -//use beacon_chain::BeaconChain; -use beacon_chain::BeaconChain; use exit_future::Signal; use network::Service as NetworkService; use slog::o; @@ -62,6 +60,9 @@ impl Client { network_logger, )?; + // spawn the RPC server + rpc::start_server(&config.rpc_conf, &log); + Ok(Client { config, beacon_chain, diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index 6a18a4aa8..7f776d7d8 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -1,16 +1,18 @@ mod beacon_block; +pub mod config; mod validator; use self::beacon_block::BeaconBlockServiceInstance; use self::validator::ValidatorServiceInstance; +pub use config::Config as RPCConfig; use grpcio::{Environment, Server, ServerBuilder}; use protos::services_grpc::{create_beacon_block_service, create_validator_service}; use std::sync::Arc; -use slog::{info, Logger}; +use slog::{info, o}; -pub fn start_server(log: Logger) -> Server { - let log_clone = log.clone(); +pub fn start_server(config: &RPCConfig, log: &slog::Logger) -> Server { + let log = log.new(o!("Service"=>"RPC")); let env = Arc::new(Environment::new(1)); let beacon_block_service = { @@ -25,12 +27,12 @@ pub fn start_server(log: Logger) -> Server { let mut server = ServerBuilder::new(env) .register_service(beacon_block_service) .register_service(validator_service) - .bind("127.0.0.1", 50_051) + .bind(config.listen_address.to_string(), config.port) .build() .unwrap(); server.start(); for &(ref host, port) in server.bind_addrs() { - info!(log_clone, "gRPC listening on {}:{}", host, port); + info!(log, "gRPC listening on {}:{}", host, port); } server } From 037c3b830766d7bcb165b9311fd56829e3bf8048 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 00:01:00 +1100 Subject: [PATCH 141/154] Update config and cli for rpc --- beacon_node/client/src/client_config.rs | 26 ++++++++++++++++++++++--- beacon_node/rpc/src/config.rs | 22 +++++++++++++++++++++ beacon_node/src/main.rs | 21 ++++++++++++++++++++ 3 files changed, 66 insertions(+), 3 deletions(-) create mode 100644 beacon_node/rpc/src/config.rs diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 570bd30e4..4a4774282 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -4,8 +4,8 @@ use fork_choice::ForkChoiceAlgorithm; use network::NetworkConfig; use slog::error; use std::fs; -use std::net::IpAddr; use std::net::SocketAddr; +use std::net::{IpAddr, Ipv4Addr}; use std::path::PathBuf; use types::multiaddr::Protocol; use types::multiaddr::ToMultiaddr; @@ -58,7 +58,7 @@ impl ClientConfig { pub fn parse_args(args: ArgMatches, log: &slog::Logger) -> Result { let mut config = ClientConfig::default(); - // Network related args + /* Network related arguments */ // Custom p2p listen port if let Some(port_str) = args.value_of("port") { @@ -88,13 +88,33 @@ impl ClientConfig { } } - // filesystem args + /* Filesystem related arguments */ // Custom datadir if let Some(dir) = args.value_of("datadir") { config.data_dir = PathBuf::from(dir.to_string()); }; + /* RPC related arguments */ + + if let Some(rpc_address) = args.value_of("rpc-address") { + if let Ok(listen_address) = rpc_address.parse::() { + config.rpc_conf.listen_address = listen_address; + } else { + error!(log, "Invalid RPC listen address"; "Address" => rpc_address); + return Err("Invalid RPC listen address"); + } + } + + if let Some(rpc_port) = args.value_of("rpc-port") { + if let Ok(port) = rpc_port.parse::() { + config.rpc_conf.port = port; + } else { + error!(log, "Invalid RPC port"; "port" => rpc_port); + return Err("Invalid RPC port"); + } + } + Ok(config) } } diff --git a/beacon_node/rpc/src/config.rs b/beacon_node/rpc/src/config.rs new file mode 100644 index 000000000..e21c2f7a8 --- /dev/null +++ b/beacon_node/rpc/src/config.rs @@ -0,0 +1,22 @@ +use std::net::Ipv4Addr; + +/// RPC Configuration +#[derive(Debug, Clone)] +pub struct Config { + /// Enable the RPC server. + pub enabled: bool, + /// The IPv4 address the RPC will listen on. + pub listen_address: Ipv4Addr, + /// The port the RPC will listen on. + pub port: u16, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: false, // rpc disabled by default + listen_address: Ipv4Addr::new(127, 0, 0, 1), + port: 5051, + } + } +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 9be6136c5..130353d77 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -37,6 +37,27 @@ fn main() { .help("Network listen port for p2p connections.") .takes_value(true), ) + .arg( + Arg::with_name("rpc") + .long("Enable RPC") + .value_name("RPC") + .help("Enable the RPC server.") + .takes_value(false), + ) + .arg( + Arg::with_name("rpc-address") + .long("rpc address") + .value_name("RPCADDRESS") + .help("Listen address for RPC endpoint.") + .takes_value(true), + ) + .arg( + Arg::with_name("rpc-port") + .long("rpc port") + .value_name("RPCPORT") + .help("Listen port for RPC endpoint.") + .takes_value(true), + ) .get_matches(); // invalid arguments, panic From 4be2eeb7929ed48b31fc49e507f2c68cba796095 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 09:58:31 +1100 Subject: [PATCH 142/154] Correct cli rpc parameters --- beacon_node/client/src/client_config.rs | 4 ++++ beacon_node/client/src/lib.rs | 5 +++-- beacon_node/src/main.rs | 8 ++++---- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 4a4774282..cad287f2c 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -97,6 +97,10 @@ impl ClientConfig { /* RPC related arguments */ + if args.is_present("rpc") { + config.rpc_conf.enabled = true; + } + if let Some(rpc_address) = args.value_of("rpc-address") { if let Ok(listen_address) = rpc_address.parse::() { config.rpc_conf.listen_address = listen_address; diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e6d08ac54..914e47fcf 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,7 +1,6 @@ extern crate slog; mod client_config; - pub mod client_types; pub mod error; pub mod notifier; @@ -61,7 +60,9 @@ impl Client { )?; // spawn the RPC server - rpc::start_server(&config.rpc_conf, &log); + if config.rpc_conf.enabled { + rpc::start_server(&config.rpc_conf, &log); + } Ok(Client { config, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 130353d77..ea74c7376 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -25,7 +25,7 @@ fn main() { ) .arg( Arg::with_name("listen_address") - .long("listen_address") + .long("listen-address") .value_name("Listen Address") .help("The Network address to listen for p2p connections.") .takes_value(true), @@ -39,21 +39,21 @@ fn main() { ) .arg( Arg::with_name("rpc") - .long("Enable RPC") + .long("rpc") .value_name("RPC") .help("Enable the RPC server.") .takes_value(false), ) .arg( Arg::with_name("rpc-address") - .long("rpc address") + .long("rpc-address") .value_name("RPCADDRESS") .help("Listen address for RPC endpoint.") .takes_value(true), ) .arg( Arg::with_name("rpc-port") - .long("rpc port") + .long("rpc-port") .value_name("RPCPORT") .help("Listen port for RPC endpoint.") .takes_value(true), From d229bc9ccb890836b428f86bba1b91c203cade70 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:02:26 +1100 Subject: [PATCH 143/154] Stub possible fields in HandlerMessage --- beacon_node/network/src/message_handler.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 2a3f38bc1..a7d6e3d07 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -44,6 +44,8 @@ pub enum HandlerMessage { PeerDisconnected(PeerId), /// An RPC response/request has been received. RPC(PeerId, RPCEvent), + /// A block has been imported. + BlockImported(), //TODO: This comes from pub-sub - decide its contents } impl MessageHandler { From 0e8b17477070678dd1f770039faab1232cca87d6 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:05:17 +1100 Subject: [PATCH 144/154] Implement Goodbye libp2p rpc request --- beacon_node/libp2p/src/rpc/methods.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index c99994b7c..4ba19befd 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -5,6 +5,7 @@ use types::{Epoch, Hash256, Slot}; #[derive(Debug)] pub enum RPCMethod { Hello, + Goodbye, Unknown, } @@ -12,6 +13,7 @@ impl From for RPCMethod { fn from(method_id: u16) -> Self { match method_id { 0 => RPCMethod::Hello, + 1 => RPCMethod::Goodbye, _ => RPCMethod::Unknown, } } @@ -21,6 +23,7 @@ impl Into for RPCMethod { fn into(self) -> u16 { match self { RPCMethod::Hello => 0, + RPCMethod::Goodbye => 1, _ => 0, } } @@ -29,6 +32,7 @@ impl Into for RPCMethod { #[derive(Debug, Clone)] pub enum RPCRequest { Hello(HelloMessage), + Goodbye(u64), } #[derive(Debug, Clone)] From 8acfb260d158ac24bfed0b3251618ee56ecc1328 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:18:04 +1100 Subject: [PATCH 145/154] Implement RequestBeaconBlockRoots RPC method --- beacon_node/libp2p/src/rpc/methods.rs | 39 ++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index 4ba19befd..ddfbf80b5 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -6,6 +6,7 @@ use types::{Epoch, Hash256, Slot}; pub enum RPCMethod { Hello, Goodbye, + RequestBeaconBlockRoots, Unknown, } @@ -14,6 +15,7 @@ impl From for RPCMethod { match method_id { 0 => RPCMethod::Hello, 1 => RPCMethod::Goodbye, + 10 => RPCMethod::RequestBeaconBlockRoots, _ => RPCMethod::Unknown, } } @@ -24,6 +26,7 @@ impl Into for RPCMethod { match self { RPCMethod::Hello => 0, RPCMethod::Goodbye => 1, + RPCMethod::RequestBeaconBlockRoots => 10, _ => 0, } } @@ -33,19 +36,53 @@ impl Into for RPCMethod { pub enum RPCRequest { Hello(HelloMessage), Goodbye(u64), + RequestBeaconBlockRoots(BeaconBlockRootsRequest), } #[derive(Debug, Clone)] pub enum RPCResponse { Hello(HelloMessage), + RequestBeaconBlockRoots(BeaconBlockRootsResponse), } -// request/response structs for RPC methods +/* Request/Response data structures for RPC methods */ + +/// The HELLO request/response handshake message. #[derive(Encode, Decode, Clone, Debug)] pub struct HelloMessage { + /// The network ID of the peer. pub network_id: u8, + /// The peers last finalized root. pub latest_finalized_root: Hash256, + /// The peers last finalized epoch. pub latest_finalized_epoch: Epoch, + /// The peers last block root. pub best_root: Hash256, + /// The peers last slot. pub best_slot: Slot, } + +/// Request a number of beacon block roots from a peer. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconBlockRootsRequest { + /// The starting slot of the requested blocks. + start_slot: Slot, + /// The number of blocks from the start slot. + count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers +} + +/// Response a number of beacon block roots from a peer. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconBlockRootsResponse { + /// List of requested blocks and associated slots. + roots: Vec, +} + +/// Contains a block root and associated slot. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BlockRootSlot { + /// The block root. + block_root: Hash256, + /// The block slot. + slot: Slot, +} From 450b2cfb81912c833488aab3cbd0fe8b22b98df1 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:19:45 +1100 Subject: [PATCH 146/154] Rename RequestBeaconBlockRoots to BeaconBlockRoots for consistency --- beacon_node/libp2p/src/rpc/methods.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index ddfbf80b5..1a35763ff 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -6,7 +6,7 @@ use types::{Epoch, Hash256, Slot}; pub enum RPCMethod { Hello, Goodbye, - RequestBeaconBlockRoots, + BeaconBlockRoots, Unknown, } @@ -15,7 +15,7 @@ impl From for RPCMethod { match method_id { 0 => RPCMethod::Hello, 1 => RPCMethod::Goodbye, - 10 => RPCMethod::RequestBeaconBlockRoots, + 10 => RPCMethod::BeaconBlockRoots, _ => RPCMethod::Unknown, } } @@ -26,7 +26,7 @@ impl Into for RPCMethod { match self { RPCMethod::Hello => 0, RPCMethod::Goodbye => 1, - RPCMethod::RequestBeaconBlockRoots => 10, + RPCMethod::BeaconBlockRoots => 10, _ => 0, } } @@ -36,13 +36,13 @@ impl Into for RPCMethod { pub enum RPCRequest { Hello(HelloMessage), Goodbye(u64), - RequestBeaconBlockRoots(BeaconBlockRootsRequest), + BeaconBlockRoots(BeaconBlockRootsRequest), } #[derive(Debug, Clone)] pub enum RPCResponse { Hello(HelloMessage), - RequestBeaconBlockRoots(BeaconBlockRootsResponse), + BeaconBlockRoots(BeaconBlockRootsResponse), } /* Request/Response data structures for RPC methods */ From 8fa70f64ecc3a234b6f8733c377aab79bf90f1de Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:28:05 +1100 Subject: [PATCH 147/154] Implement BeaconBlockHeaders RPC method --- beacon_node/libp2p/src/rpc/methods.rs | 29 +++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index 1a35763ff..4ee73761d 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -1,12 +1,13 @@ /// Available RPC methods types and ids. use ssz_derive::{Decode, Encode}; -use types::{Epoch, Hash256, Slot}; +use types::{BeaconBlockHeader, Epoch, Hash256, Slot}; #[derive(Debug)] pub enum RPCMethod { Hello, Goodbye, BeaconBlockRoots, + BeaconBlockHeaders, Unknown, } @@ -16,6 +17,7 @@ impl From for RPCMethod { 0 => RPCMethod::Hello, 1 => RPCMethod::Goodbye, 10 => RPCMethod::BeaconBlockRoots, + 11 => RPCMethod::BeaconBlockHeaders, _ => RPCMethod::Unknown, } } @@ -27,6 +29,7 @@ impl Into for RPCMethod { RPCMethod::Hello => 0, RPCMethod::Goodbye => 1, RPCMethod::BeaconBlockRoots => 10, + RPCMethod::BeaconBlockHeaders => 11, _ => 0, } } @@ -37,12 +40,14 @@ pub enum RPCRequest { Hello(HelloMessage), Goodbye(u64), BeaconBlockRoots(BeaconBlockRootsRequest), + BeaconBlockHeaders(BeaconBlockHeadersRequest), } #[derive(Debug, Clone)] pub enum RPCResponse { Hello(HelloMessage), BeaconBlockRoots(BeaconBlockRootsResponse), + BeaconBlockHeaders(BeaconBlockHeadersResponse), } /* Request/Response data structures for RPC methods */ @@ -71,7 +76,7 @@ pub struct BeaconBlockRootsRequest { count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers } -/// Response a number of beacon block roots from a peer. +/// Response containing a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug)] pub struct BeaconBlockRootsResponse { /// List of requested blocks and associated slots. @@ -86,3 +91,23 @@ pub struct BlockRootSlot { /// The block slot. slot: Slot, } + +/// Request a number of beacon block headers from a peer. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconBlockHeadersRequest { + /// The starting header hash of the requested headers. + start_root: Hash256, + /// The starting slot of the requested headers. + start_slot: Slot, + /// The maximum number of headers than can be returned. + max_headers: u64, + /// The maximum number of slots to skip between blocks. + skip_slots: u64, +} + +/// Response containing requested block headers. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconBlockHeadersResponse { + /// The list of requested beacon block headers. + headers: Vec, +} From fd04431d548002258e3da84053a8bcef6bad12fe Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:36:37 +1100 Subject: [PATCH 148/154] Implement BeaconBlockBody RPC method --- beacon_node/libp2p/src/rpc/methods.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index 4ee73761d..7ae136eaf 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -1,6 +1,6 @@ /// Available RPC methods types and ids. use ssz_derive::{Decode, Encode}; -use types::{BeaconBlockHeader, Epoch, Hash256, Slot}; +use types::{BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot}; #[derive(Debug)] pub enum RPCMethod { @@ -8,6 +8,7 @@ pub enum RPCMethod { Goodbye, BeaconBlockRoots, BeaconBlockHeaders, + BeaconBlockBodies, Unknown, } @@ -18,6 +19,7 @@ impl From for RPCMethod { 1 => RPCMethod::Goodbye, 10 => RPCMethod::BeaconBlockRoots, 11 => RPCMethod::BeaconBlockHeaders, + 12 => RPCMethod::BeaconBlockBodies, _ => RPCMethod::Unknown, } } @@ -30,6 +32,7 @@ impl Into for RPCMethod { RPCMethod::Goodbye => 1, RPCMethod::BeaconBlockRoots => 10, RPCMethod::BeaconBlockHeaders => 11, + RPCMethod::BeaconBlockBodies => 12, _ => 0, } } @@ -41,6 +44,7 @@ pub enum RPCRequest { Goodbye(u64), BeaconBlockRoots(BeaconBlockRootsRequest), BeaconBlockHeaders(BeaconBlockHeadersRequest), + BeaconBlockBodies(BeaconBlockBodiesRequest), } #[derive(Debug, Clone)] @@ -48,6 +52,7 @@ pub enum RPCResponse { Hello(HelloMessage), BeaconBlockRoots(BeaconBlockRootsResponse), BeaconBlockHeaders(BeaconBlockHeadersResponse), + BeaconBlockBodies(BeaconBlockBodiesResponse), } /* Request/Response data structures for RPC methods */ @@ -111,3 +116,17 @@ pub struct BeaconBlockHeadersResponse { /// The list of requested beacon block headers. headers: Vec, } + +/// Request a number of beacon block bodies from a peer. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconBlockBodiesRequest { + /// The list of beacon block bodies being requested. + block_roots: Hash256, +} + +/// Response containing the list of requested beacon block bodies. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconBlockBodiesResponse { + /// The list of beacon block bodies being requested. + block_bodies: Vec, +} From ae1a7a2a25df50e5c7a5bf9d3a46049e8ff6503e Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:43:40 +1100 Subject: [PATCH 149/154] Implement BeaconChainState RPC method --- beacon_node/libp2p/src/rpc/methods.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index 7ae136eaf..45e9f35ab 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -9,6 +9,7 @@ pub enum RPCMethod { BeaconBlockRoots, BeaconBlockHeaders, BeaconBlockBodies, + BeaconChainState, // Note: experimental, not complete. Unknown, } @@ -20,6 +21,8 @@ impl From for RPCMethod { 10 => RPCMethod::BeaconBlockRoots, 11 => RPCMethod::BeaconBlockHeaders, 12 => RPCMethod::BeaconBlockBodies, + 13 => RPCMethod::BeaconChainState, + _ => RPCMethod::Unknown, } } @@ -33,6 +36,7 @@ impl Into for RPCMethod { RPCMethod::BeaconBlockRoots => 10, RPCMethod::BeaconBlockHeaders => 11, RPCMethod::BeaconBlockBodies => 12, + RPCMethod::BeaconChainState => 13, _ => 0, } } @@ -45,6 +49,7 @@ pub enum RPCRequest { BeaconBlockRoots(BeaconBlockRootsRequest), BeaconBlockHeaders(BeaconBlockHeadersRequest), BeaconBlockBodies(BeaconBlockBodiesRequest), + BeaconChainState(BeaconChainStateRequest), } #[derive(Debug, Clone)] @@ -53,6 +58,7 @@ pub enum RPCResponse { BeaconBlockRoots(BeaconBlockRootsResponse), BeaconBlockHeaders(BeaconBlockHeadersResponse), BeaconBlockBodies(BeaconBlockBodiesResponse), + BeaconChainState(BeaconChainStateResponse), } /* Request/Response data structures for RPC methods */ @@ -130,3 +136,18 @@ pub struct BeaconBlockBodiesResponse { /// The list of beacon block bodies being requested. block_bodies: Vec, } + +/// Request values for tree hashes which yield a blocks `state_root`. +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconChainStateRequest { + /// The tree hashes that a value is requested for. + hashes: Vec, +} + +/// Request values for tree hashes which yield a blocks `state_root`. +// Note: TBD +#[derive(Encode, Decode, Clone, Debug)] +pub struct BeaconChainStateResponse { + /// The values corresponding the to the requested tree hashes. + values: bool, //TBD - stubbed with encodeable bool +} From 9db36f15bf4dee0d92478a043f4ec4061def24a1 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:47:05 +1100 Subject: [PATCH 150/154] Tidy RPC Methods --- beacon_node/libp2p/src/rpc/methods.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/libp2p/src/rpc/methods.rs index 45e9f35ab..3014afd0f 100644 --- a/beacon_node/libp2p/src/rpc/methods.rs +++ b/beacon_node/libp2p/src/rpc/methods.rs @@ -3,13 +3,21 @@ use ssz_derive::{Decode, Encode}; use types::{BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot}; #[derive(Debug)] +/// Available Serenity Libp2p RPC methods pub enum RPCMethod { + /// Initialise handshake between connecting peers. Hello, + /// Terminate a connection providing a reason. Goodbye, + /// Requests a number of beacon block roots. BeaconBlockRoots, + /// Requests a number of beacon block headers. BeaconBlockHeaders, + /// Requests a number of beacon block bodies. BeaconBlockBodies, + /// Requests values for a merkle proof for the current blocks state root. BeaconChainState, // Note: experimental, not complete. + /// Unknown method received. Unknown, } From 84f373fcc2ca1619dc0e3c708dbaa7a04bfa03b8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 20 Mar 2019 10:51:53 +1100 Subject: [PATCH 151/154] Fix clippy lints --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++--- eth2/fork_choice/src/slow_lmd_ghost.rs | 6 ++-- .../src/per_block_processing.rs | 4 +-- .../validate_attestation.rs | 34 ++++++------------- .../per_block_processing/verify_deposit.rs | 4 +-- .../get_attestation_participants.rs | 2 +- .../update_registry_and_shuffling_data.rs | 1 - eth2/types/src/beacon_block.rs | 6 ++-- eth2/types/src/beacon_state.rs | 17 ++++++---- eth2/types/src/beacon_state/epoch_cache.rs | 1 - eth2/types/src/relative_epoch.rs | 2 +- .../testing_beacon_state_builder.rs | 2 +- .../src/test_utils/testing_deposit_builder.rs | 2 +- 13 files changed, 38 insertions(+), 51 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 01787f95b..816a570c0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -82,7 +82,7 @@ where let state_root = genesis_state.canonical_root(); state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; - let block_root = genesis_block.into_header().canonical_root(); + let block_root = genesis_block.block_header().canonical_root(); block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; let finalized_head = RwLock::new(CheckPoint::new( @@ -189,7 +189,7 @@ where pub fn advance_state(&self, slot: Slot) -> Result<(), SlotProcessingError> { let state_slot = self.state.read().slot; - let latest_block_header = self.head().beacon_block.into_header(); + let latest_block_header = self.head().beacon_block.block_header(); for _ in state_slot.as_u64()..slot.as_u64() { per_slot_processing(&mut *self.state.write(), &latest_block_header, &self.spec)?; @@ -561,7 +561,7 @@ where pub fn process_block(&self, block: BeaconBlock) -> Result { debug!("Processing block with slot {}...", block.slot); - let block_root = block.into_header().canonical_root(); + let block_root = block.block_header().canonical_root(); let present_slot = self.present_slot(); @@ -596,7 +596,7 @@ where // Transition the parent state to the present slot. let mut state = parent_state; - let previous_block_header = parent_block.into_header(); + let previous_block_header = parent_block.block_header(); for _ in state.slot.as_u64()..present_slot.as_u64() { if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) { return Ok(BlockProcessingOutcome::InvalidBlock( diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index 25d137089..4b236cba4 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -215,10 +215,8 @@ impl ForkChoice for SlowLMDGhost { head_vote_count = vote_count; } // resolve ties - choose smaller hash - else if vote_count == head_vote_count { - if *child_hash < head_hash { - head_hash = *child_hash; - } + else if vote_count == head_vote_count && *child_hash < head_hash { + head_hash = *child_hash; } } } diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 33f953b71..dc83abb3f 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -109,7 +109,7 @@ pub fn process_block_header( Invalid::ParentBlockRootMismatch ); - state.latest_block_header = block.into_temporary_header(spec); + state.latest_block_header = block.temporary_block_header(spec); Ok(()) } @@ -388,7 +388,7 @@ pub fn process_deposits( // Create a new validator. let validator = Validator { pubkey: deposit_input.pubkey.clone(), - withdrawal_credentials: deposit_input.withdrawal_credentials.clone(), + withdrawal_credentials: deposit_input.withdrawal_credentials, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch, diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index 113dbc4ce..2143988a4 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -176,17 +176,7 @@ fn validate_attestation_signature_optional( ); if verify_signature { - let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); - verify_attestation_signature( - state, - committee, - attestation_epoch, - &attestation.aggregation_bitfield, - &attestation.custody_bitfield, - &attestation.data, - &attestation.aggregate_signature, - spec, - )?; + verify_attestation_signature(state, committee, attestation, spec)?; } // Crosslink data root is zero (to be removed in phase 1). @@ -210,32 +200,29 @@ fn validate_attestation_signature_optional( fn verify_attestation_signature( state: &BeaconState, committee: &[usize], - attestation_epoch: Epoch, - aggregation_bitfield: &Bitfield, - custody_bitfield: &Bitfield, - attestation_data: &AttestationData, - aggregate_signature: &AggregateSignature, + a: &Attestation, spec: &ChainSpec, ) -> Result<(), Error> { let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2]; let mut message_exists = vec![false; 2]; + let attestation_epoch = a.data.slot.epoch(spec.slots_per_epoch); for (i, v) in committee.iter().enumerate() { - let validator_signed = aggregation_bitfield.get(i).map_err(|_| { + let validator_signed = a.aggregation_bitfield.get(i).map_err(|_| { Error::Invalid(Invalid::BadAggregationBitfieldLength { committee_len: committee.len(), - bitfield_len: aggregation_bitfield.len(), + bitfield_len: a.aggregation_bitfield.len(), }) })?; if validator_signed { - let custody_bit: bool = match custody_bitfield.get(i) { + let custody_bit: bool = match a.custody_bitfield.get(i) { Ok(bit) => bit, // Invalidate signature if custody_bitfield.len() < committee Err(_) => { return Err(Error::Invalid(Invalid::BadCustodyBitfieldLength { committee_len: committee.len(), - bitfield_len: aggregation_bitfield.len(), + bitfield_len: a.aggregation_bitfield.len(), })); } }; @@ -254,14 +241,14 @@ fn verify_attestation_signature( // Message when custody bitfield is `false` let message_0 = AttestationDataAndCustodyBit { - data: attestation_data.clone(), + data: a.data.clone(), custody_bit: false, } .hash_tree_root(); // Message when custody bitfield is `true` let message_1 = AttestationDataAndCustodyBit { - data: attestation_data.clone(), + data: a.data.clone(), custody_bit: true, } .hash_tree_root(); @@ -283,7 +270,8 @@ fn verify_attestation_signature( let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork); verify!( - aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]), + a.aggregate_signature + .verify_multiple(&messages[..], domain, &keys[..]), Invalid::BadSignature ); diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 80d8bc24f..a3a0f5734 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -71,9 +71,7 @@ pub fn get_existing_validator_index( ) -> Result, Error> { let deposit_input = &deposit.deposit_data.deposit_input; - let validator_index = state - .get_validator_index(&deposit_input.pubkey)? - .and_then(|i| Some(i)); + let validator_index = state.get_validator_index(&deposit_input.pubkey)?; match validator_index { None => Ok(None), diff --git a/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs index 3e52776b1..52ba0274b 100644 --- a/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs +++ b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs @@ -28,7 +28,7 @@ pub fn get_attestation_participants( let mut participants = Vec::with_capacity(committee.len()); for (i, validator_index) in committee.iter().enumerate() { match bitfield.get(i) { - Ok(bit) if bit == true => participants.push(*validator_index), + Ok(bit) if bit => participants.push(*validator_index), _ => {} } } diff --git a/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs b/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs index 286ad8140..0b18c2571 100644 --- a/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs +++ b/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs @@ -64,7 +64,6 @@ pub fn should_update_validator_registry( let current_epoch_committee_count = spec.get_epoch_committee_count(num_active_validators); for shard in (0..current_epoch_committee_count) - .into_iter() .map(|i| (state.current_shuffling_start_shard + i as u64) % spec.shard_count) { if state.latest_crosslinks[shard as usize].epoch <= state.validator_registry_update_epoch { diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index b966751ed..6a3f1a354 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -71,7 +71,7 @@ impl BeaconBlock { /// Note: performs a full tree-hash of `self.body`. /// /// Spec v0.5.0 - pub fn into_header(&self) -> BeaconBlockHeader { + pub fn block_header(&self) -> BeaconBlockHeader { BeaconBlockHeader { slot: self.slot, previous_block_root: self.previous_block_root, @@ -84,11 +84,11 @@ impl BeaconBlock { /// Returns a "temporary" header, where the `state_root` is `spec.zero_hash`. /// /// Spec v0.5.0 - pub fn into_temporary_header(&self, spec: &ChainSpec) -> BeaconBlockHeader { + pub fn temporary_block_header(&self, spec: &ChainSpec) -> BeaconBlockHeader { BeaconBlockHeader { state_root: spec.zero_hash, signature: spec.empty_signature.clone(), - ..self.into_header() + ..self.block_header() } } } diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 1a165c9a9..1e5278124 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -162,7 +162,7 @@ impl BeaconState { latest_state_roots: vec![spec.zero_hash; spec.slots_per_historical_root], latest_active_index_roots: vec![spec.zero_hash; spec.latest_active_index_roots_length], latest_slashed_balances: vec![0; spec.latest_slashed_exit_length], - latest_block_header: BeaconBlock::empty(spec).into_temporary_header(spec), + latest_block_header: BeaconBlock::empty(spec).temporary_block_header(spec), historical_roots: vec![], /* @@ -386,7 +386,8 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), BeaconStateError> { let i = self.get_latest_block_roots_index(slot, spec)?; - Ok(self.latest_block_roots[i] = block_root) + self.latest_block_roots[i] = block_root; + Ok(()) } /// Safely obtains the index for `latest_randao_mixes` @@ -449,7 +450,8 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let i = self.get_randao_mix_index(epoch, spec)?; - Ok(self.latest_randao_mixes[i] = mix) + self.latest_randao_mixes[i] = mix; + Ok(()) } /// Safely obtains the index for `latest_active_index_roots`, given some `epoch`. @@ -492,7 +494,8 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let i = self.get_active_index_root_index(epoch, spec)?; - Ok(self.latest_active_index_roots[i] = index_root) + self.latest_active_index_roots[i] = index_root; + Ok(()) } /// Replace `active_index_roots` with clones of `index_root`. @@ -537,7 +540,8 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let i = self.get_latest_state_roots_index(slot, spec)?; - Ok(self.latest_state_roots[i] = state_root) + self.latest_state_roots[i] = state_root; + Ok(()) } /// Safely obtains the index for `latest_slashed_balances`, given some `epoch`. @@ -573,7 +577,8 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let i = self.get_slashed_balance_index(epoch, spec)?; - Ok(self.latest_slashed_balances[i] = balance) + self.latest_slashed_balances[i] = balance; + Ok(()) } /// Generate a seed for the given `epoch`. diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 6eebf1da3..32d9a643e 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -304,7 +304,6 @@ impl EpochCrosslinkCommitteesBuilder { for (i, slot) in self.epoch.slot_iter(spec.slots_per_epoch).enumerate() { for j in (0..committees.len()) - .into_iter() .skip(i * committees_per_slot) .take(committees_per_slot) { diff --git a/eth2/types/src/relative_epoch.rs b/eth2/types/src/relative_epoch.rs index 6c135b1a6..8f895e97a 100644 --- a/eth2/types/src/relative_epoch.rs +++ b/eth2/types/src/relative_epoch.rs @@ -33,7 +33,7 @@ impl RelativeEpoch { /// Returns the `epoch` that `self` refers to, with respect to the `base` epoch. /// /// Spec v0.5.0 - pub fn into_epoch(&self, base: Epoch) -> Epoch { + pub fn into_epoch(self, base: Epoch) -> Epoch { match self { RelativeEpoch::Previous => base - 1, RelativeEpoch::Current => base, diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index e76a01e49..6945769aa 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -214,7 +214,7 @@ impl TestingBeaconStateBuilder { - spec.min_attestation_inclusion_delay; let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); - for slot in first_slot..last_slot + 1 { + for slot in first_slot..=last_slot { let slot = Slot::from(slot); let committees = state diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index ee258e7fe..326858c31 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -47,7 +47,7 @@ impl TestingDepositBuilder { self.deposit .deposit_data .deposit_input - .withdrawal_credentials = withdrawal_credentials.clone(); + .withdrawal_credentials = withdrawal_credentials; self.deposit.deposit_data.deposit_input.proof_of_possession = self .deposit From 4105b869e1f239cb5fbbcc5a4dce17184c389e58 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 10:54:19 +1100 Subject: [PATCH 152/154] Fix all matches relating to new RPC methods --- beacon_node/libp2p/src/rpc/protocol.rs | 12 ++++++++---- beacon_node/network/src/message_handler.rs | 4 ++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/libp2p/src/rpc/protocol.rs index 6cebb7fd2..c19aca8ff 100644 --- a/beacon_node/libp2p/src/rpc/protocol.rs +++ b/beacon_node/libp2p/src/rpc/protocol.rs @@ -81,7 +81,7 @@ fn decode(packet: Vec) -> Result { let (hello_body, _index) = HelloMessage::ssz_decode(&packet, index)?; RPCRequest::Hello(hello_body) } - RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), + RPCMethod::Unknown | _ => return Err(DecodeError::UnknownRPCMethod), }; Ok(RPCEvent::Request { @@ -97,7 +97,7 @@ fn decode(packet: Vec) -> Result { let (body, _index) = HelloMessage::ssz_decode(&packet, index)?; RPCResponse::Hello(body) } - RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), + RPCMethod::Unknown | _ => return Err(DecodeError::UnknownRPCMethod), }; Ok(RPCEvent::Response { id, @@ -134,8 +134,11 @@ impl Encodable for RPCEvent { s.append(id); s.append(method_id); match body { - RPCRequest::Hello(body) => s.append(body), - }; + RPCRequest::Hello(body) => { + s.append(body); + } + _ => {} + } } RPCEvent::Response { id, @@ -149,6 +152,7 @@ impl Encodable for RPCEvent { RPCResponse::Hello(response) => { s.append(response); } + _ => {} } } } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index a7d6e3d07..4cd0ab951 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -122,6 +122,8 @@ impl MessageHandler { RPCRequest::Hello(hello_message) => { self.handle_hello_request(peer_id, id, hello_message) } + // TODO: Handle all requests + _ => {} } } @@ -138,6 +140,8 @@ impl MessageHandler { debug!(self.log, "Hello response received from peer: {:?}", peer_id); self.validate_hello(peer_id, hello_message); } + // TODO: Handle all responses + _ => {} } } From 7c7f81d188ce29aac083f8ba1c9bd0762bfa1631 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 14:36:09 +1100 Subject: [PATCH 153/154] Fix issue with merging v0.5.0 --- beacon_node/network/src/sync/simple_sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 95c7092c3..ea09f9c0c 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -68,7 +68,7 @@ impl SimpleSync { network_id: self.network_id, latest_finalized_root: state.finalized_root, latest_finalized_epoch: state.finalized_epoch, - best_root: state.latest_block_roots[0], //TODO: build correct value as a beacon chain function + best_root: Hash256::zero(), //TODO: build correct value as a beacon chain function best_slot: state.slot - 1, } } From e080f6381128320df9cf3a475a5f971415df49df Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 20 Mar 2019 15:09:24 +1100 Subject: [PATCH 154/154] Rename libp2p to eth2-libp2p --- Cargo.toml | 1 + .../{libp2p => eth2-libp2p}/Cargo.toml | 2 +- .../{libp2p => eth2-libp2p}/src/behaviour.rs | 0 .../{libp2p => eth2-libp2p}/src/error.rs | 0 .../{libp2p => eth2-libp2p}/src/lib.rs | 0 .../src/network_config.rs | 0 .../src/rpc/methods.rs | 0 .../{libp2p => eth2-libp2p}/src/rpc/mod.rs | 0 .../src/rpc/protocol.rs | 0 .../{libp2p => eth2-libp2p}/src/service.rs | 0 beacon_node/network/Cargo.toml | 2 +- beacon_node/network/src/error.rs | 4 +- beacon_node/network/src/lib.rs | 2 +- beacon_node/network/src/message_handler.rs | 4 +- beacon_node/network/src/service.rs | 52 ++++++++++--------- beacon_node/network/src/sync/simple_sync.rs | 4 +- 16 files changed, 37 insertions(+), 34 deletions(-) rename beacon_node/{libp2p => eth2-libp2p}/Cargo.toml (95%) rename beacon_node/{libp2p => eth2-libp2p}/src/behaviour.rs (100%) rename beacon_node/{libp2p => eth2-libp2p}/src/error.rs (100%) rename beacon_node/{libp2p => eth2-libp2p}/src/lib.rs (100%) rename beacon_node/{libp2p => eth2-libp2p}/src/network_config.rs (100%) rename beacon_node/{libp2p => eth2-libp2p}/src/rpc/methods.rs (100%) rename beacon_node/{libp2p => eth2-libp2p}/src/rpc/mod.rs (100%) rename beacon_node/{libp2p => eth2-libp2p}/src/rpc/protocol.rs (100%) rename beacon_node/{libp2p => eth2-libp2p}/src/service.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index d34f6fd30..cb070cc2d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,7 @@ members = [ "beacon_node/db", "beacon_node/client", "beacon_node/network", + "beacon_node/eth2-libp2p", "beacon_node/rpc", "beacon_node/version", "beacon_node/beacon_chain", diff --git a/beacon_node/libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml similarity index 95% rename from beacon_node/libp2p/Cargo.toml rename to beacon_node/eth2-libp2p/Cargo.toml index dcbc04d0b..4dd2e9c7b 100644 --- a/beacon_node/libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "libp2p" +name = "eth2-libp2p" version = "0.1.0" authors = ["Age Manning "] edition = "2018" diff --git a/beacon_node/libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs similarity index 100% rename from beacon_node/libp2p/src/behaviour.rs rename to beacon_node/eth2-libp2p/src/behaviour.rs diff --git a/beacon_node/libp2p/src/error.rs b/beacon_node/eth2-libp2p/src/error.rs similarity index 100% rename from beacon_node/libp2p/src/error.rs rename to beacon_node/eth2-libp2p/src/error.rs diff --git a/beacon_node/libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs similarity index 100% rename from beacon_node/libp2p/src/lib.rs rename to beacon_node/eth2-libp2p/src/lib.rs diff --git a/beacon_node/libp2p/src/network_config.rs b/beacon_node/eth2-libp2p/src/network_config.rs similarity index 100% rename from beacon_node/libp2p/src/network_config.rs rename to beacon_node/eth2-libp2p/src/network_config.rs diff --git a/beacon_node/libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs similarity index 100% rename from beacon_node/libp2p/src/rpc/methods.rs rename to beacon_node/eth2-libp2p/src/rpc/methods.rs diff --git a/beacon_node/libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs similarity index 100% rename from beacon_node/libp2p/src/rpc/mod.rs rename to beacon_node/eth2-libp2p/src/rpc/mod.rs diff --git a/beacon_node/libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs similarity index 100% rename from beacon_node/libp2p/src/rpc/protocol.rs rename to beacon_node/eth2-libp2p/src/rpc/protocol.rs diff --git a/beacon_node/libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs similarity index 100% rename from beacon_node/libp2p/src/service.rs rename to beacon_node/eth2-libp2p/src/service.rs diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 8b87a9d50..5275ed82f 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } -libp2p = { path = "../libp2p" } +eth2-libp2p = { path = "../eth2-libp2p" } version = { path = "../version" } types = { path = "../../eth2/types" } slog = "2.4.1" diff --git a/beacon_node/network/src/error.rs b/beacon_node/network/src/error.rs index 2005f76ae..cdd6b6209 100644 --- a/beacon_node/network/src/error.rs +++ b/beacon_node/network/src/error.rs @@ -1,5 +1,5 @@ // generates error types -use libp2p; +use eth2_libp2p; use error_chain::{ error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed, @@ -8,6 +8,6 @@ use error_chain::{ error_chain! { links { - Libp2p(libp2p::error::Error, libp2p::error::ErrorKind); + Libp2p(eth2_libp2p::error::Error, eth2_libp2p::error::ErrorKind); } } diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index 1e47b9a73..61a29ed35 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -5,5 +5,5 @@ mod message_handler; mod service; pub mod sync; -pub use libp2p::NetworkConfig; +pub use eth2_libp2p::NetworkConfig; pub use service::Service; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 4cd0ab951..dbf8c7d9d 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -3,11 +3,11 @@ use crate::error; use crate::service::{NetworkMessage, OutgoingMessage}; use crate::sync::SimpleSync; use crossbeam_channel::{unbounded as channel, Sender}; -use futures::future; -use libp2p::{ +use eth2_libp2p::{ rpc::{RPCMethod, RPCRequest, RPCResponse}, HelloMessage, PeerId, RPCEvent, }; +use futures::future; use slog::warn; use slog::{debug, trace}; use std::collections::HashMap; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index c3045d280..14f994e4a 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -3,20 +3,20 @@ use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::NetworkConfig; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; +use eth2_libp2p::RPCEvent; +use eth2_libp2p::Service as LibP2PService; +use eth2_libp2p::{Libp2pEvent, PeerId}; use futures::prelude::*; use futures::sync::oneshot; use futures::Stream; -use libp2p::RPCEvent; -use libp2p::Service as LibP2PService; -use libp2p::{Libp2pEvent, PeerId}; use slog::{debug, info, o, trace}; use std::sync::Arc; use tokio::runtime::TaskExecutor; -/// Service that handles communication between internal services and the libp2p network service. +/// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { - //libp2p_service: Arc>, - libp2p_exit: oneshot::Sender<()>, + //eth2_libp2p_service: Arc>, + eth2_libp2p_exit: oneshot::Sender<()>, network_send: crossbeam_channel::Sender, //message_handler: MessageHandler, //message_handler_send: Sender, @@ -40,20 +40,20 @@ impl Service { message_handler_log, )?; - // launch libp2p service - let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = LibP2PService::new(config.clone(), libp2p_log)?; + // launch eth2_libp2p service + let eth2_libp2p_log = log.new(o!("Service" => "Libp2p")); + let eth2_libp2p_service = LibP2PService::new(config.clone(), eth2_libp2p_log)?; - // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. - let libp2p_exit = spawn_service( - libp2p_service, + // TODO: Spawn thread to handle eth2_libp2p messages and pass to message handler thread. + let eth2_libp2p_exit = spawn_service( + eth2_libp2p_service, network_recv, message_handler_send, executor, log, )?; let network_service = Service { - libp2p_exit, + eth2_libp2p_exit, network_send: network_send.clone(), }; @@ -72,7 +72,7 @@ impl Service { } fn spawn_service( - libp2p_service: LibP2PService, + eth2_libp2p_service: LibP2PService, network_recv: crossbeam_channel::Receiver, message_handler_send: crossbeam_channel::Sender, executor: &TaskExecutor, @@ -83,7 +83,7 @@ fn spawn_service( // spawn on the current executor executor.spawn( network_service( - libp2p_service, + eth2_libp2p_service, network_recv, message_handler_send, log.clone(), @@ -100,18 +100,18 @@ fn spawn_service( } fn network_service( - mut libp2p_service: LibP2PService, + mut eth2_libp2p_service: LibP2PService, network_recv: crossbeam_channel::Receiver, message_handler_send: crossbeam_channel::Sender, log: slog::Logger, -) -> impl futures::Future { - futures::future::poll_fn(move || -> Result<_, libp2p::error::Error> { +) -> impl futures::Future { + futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> { // poll the swarm loop { - match libp2p_service.poll() { + match eth2_libp2p_service.poll() { Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, rpc_event)))) => { trace!( - libp2p_service.log, + eth2_libp2p_service.log, "RPC Event: RPC message received: {:?}", rpc_event ); @@ -120,13 +120,13 @@ fn network_service( .map_err(|_| "failed to send rpc to handler")?; } Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id)))) => { - debug!(libp2p_service.log, "Peer Dialed: {:?}", peer_id); + debug!(eth2_libp2p_service.log, "Peer Dialed: {:?}", peer_id); message_handler_send .send(HandlerMessage::PeerDialed(peer_id)) .map_err(|_| "failed to send rpc to handler")?; } Ok(Async::Ready(Some(Libp2pEvent::Message(m)))) => debug!( - libp2p_service.log, + eth2_libp2p_service.log, "Network Service: Message received: {}", m ), _ => break, @@ -143,7 +143,7 @@ fn network_service( trace!(log, "Sending RPC Event: {:?}", rpc_event); //TODO: Make swarm private //TODO: Implement correct peer id topic message handling - libp2p_service.swarm.send_rpc(peer_id, rpc_event); + eth2_libp2p_service.swarm.send_rpc(peer_id, rpc_event); } OutgoingMessage::NotifierTest => { debug!(log, "Received message from notifier"); @@ -152,7 +152,9 @@ fn network_service( } Err(TryRecvError::Empty) => break, Err(TryRecvError::Disconnected) => { - return Err(libp2p::error::Error::from("Network channel disconnected")); + return Err(eth2_libp2p::error::Error::from( + "Network channel disconnected", + )); } } } @@ -163,7 +165,7 @@ fn network_service( /// Types of messages that the network service can receive. #[derive(Debug, Clone)] pub enum NetworkMessage { - /// Send a message to libp2p service. + /// Send a message to eth2_libp2p service. //TODO: Define typing for messages across the wire Send(PeerId, OutgoingMessage), } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index ea09f9c0c..0f7de6ab9 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,6 +1,6 @@ use crate::beacon_chain::BeaconChain; -use libp2p::rpc::HelloMessage; -use libp2p::PeerId; +use eth2_libp2p::rpc::HelloMessage; +use eth2_libp2p::PeerId; use slog::{debug, o}; use std::collections::HashMap; use std::sync::Arc;