Merge branch 'master' into shuffling

This commit is contained in:
Age Manning 2018-10-03 15:05:18 +10:00
commit 9fac739df1
No known key found for this signature in database
GPG Key ID: 05EED64B79E06A93
10 changed files with 57 additions and 56 deletions

View File

@ -37,6 +37,7 @@ members = [
"beacon_chain/utils/bls",
"beacon_chain/utils/boolean-bitfield",
"beacon_chain/utils/hashing",
"beacon_chain/utils/shuffling",
"beacon_chain/utils/ssz",
"beacon_chain/utils/ssz_helpers",
"lighthouse/db",

View File

@ -1,2 +0,0 @@
This module includes the fundamental shuffling function. It does not do the
full validator delegation amongst slots.

View File

@ -0,0 +1,7 @@
[package]
name = "shuffling"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
[dependencies]
hashing = { path = "../hashing" }

View File

@ -1,4 +1,8 @@
extern crate blake2_rfc;
/// A library for performing deterministic, pseudo-random shuffling on a vector.
///
/// This library is designed to confirm to the Ethereum 2.0 specification.
extern crate hashing;
mod rng;
@ -9,13 +13,16 @@ pub enum ShuffleErr {
ExceedsListLength,
}
/// Performs a deterministic, in-place shuffle of a vector of bytes.
/// Performs a deterministic, in-place shuffle of a vector.
///
/// The final order of the shuffle is determined by successive hashes
/// of the supplied `seed`.
pub fn shuffle(
///
/// This is a Fisher-Yates-Durtstenfeld shuffle.
pub fn shuffle<T>(
seed: &[u8],
mut list: Vec<usize>)
-> Result<Vec<usize>, ShuffleErr>
mut list: Vec<T>)
-> Result<Vec<T>, ShuffleErr>
{
let mut rng = ShuffleRng::new(seed);
if list.len() > rng.rand_max as usize {
@ -33,20 +40,16 @@ pub fn shuffle(
#[cfg(test)]
mod tests {
use super::*;
use super::blake2_rfc::blake2s::{ blake2s, Blake2sResult };
fn hash(seed: &[u8]) -> Blake2sResult {
blake2s(32, &[], seed)
}
use super::hashing::canonical_hash;
#[test]
fn test_shuffling() {
let seed = hash(b"4kn4driuctg8");
let seed = canonical_hash(b"4kn4driuctg8");
let list: Vec<usize> = (0..12).collect();
let s = shuffle(seed.as_bytes(), list).unwrap();
let s = shuffle(&seed, list).unwrap();
assert_eq!(
s,
vec![7, 4, 8, 6, 5, 3, 0, 11, 1, 2, 10, 9],
vec![7, 3, 2, 5, 11, 9, 1, 0, 4, 6, 10, 8],
)
}
}

View File

@ -1,4 +1,4 @@
use super::blake2_rfc::blake2s::{ Blake2s, Blake2sResult };
use super::hashing::canonical_hash;
const SEED_SIZE_BYTES: usize = 32;
const RAND_BYTES: usize = 3; // 24 / 8
@ -7,7 +7,7 @@ const RAND_MAX: u32 = 16_777_216; // 2**24
/// A pseudo-random number generator which given a seed
/// uses successive blake2s hashing to generate "entropy".
pub struct ShuffleRng {
seed: Blake2sResult,
seed: Vec<u8>,
idx: usize,
pub rand_max: u32,
}
@ -16,7 +16,7 @@ impl ShuffleRng {
/// Create a new instance given some "seed" bytes.
pub fn new(initial_seed: &[u8]) -> Self {
Self {
seed: hash(initial_seed),
seed: canonical_hash(initial_seed),
idx: 0,
rand_max: RAND_MAX,
}
@ -24,7 +24,7 @@ impl ShuffleRng {
/// "Regenerates" the seed by hashing it.
fn rehash_seed(&mut self) {
self.seed = hash(self.seed.as_bytes());
self.seed = canonical_hash(&self.seed);
self.idx = 0;
}
@ -36,7 +36,7 @@ impl ShuffleRng {
self.rand()
} else {
int_from_byte_slice(
self.seed.as_bytes(),
&self.seed,
self.idx - RAND_BYTES,
)
}
@ -68,13 +68,6 @@ fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 {
)
}
/// Peform a blake2s hash on the given bytes.
fn hash(bytes: &[u8]) -> Blake2sResult {
let mut hasher = Blake2s::new(SEED_SIZE_BYTES);
hasher.update(bytes);
hasher.finalize()
}
#[cfg(test)]
mod tests {
@ -115,15 +108,12 @@ mod tests {
#[test]
fn test_shuffling_hash_fn() {
let digest = hash(hash(b"4kn4driuctg8").as_bytes()); // double-hash is intentional
let digest_bytes = digest.as_bytes();
let digest = canonical_hash(&canonical_hash(&"4kn4driuctg8".as_bytes())); // double-hash is intentional
let expected = [
0xff, 0xff, 0xff, 0x8f, 0xbb, 0xc7, 0xab, 0x64, 0x43, 0x9a,
0xe5, 0x12, 0x44, 0xd8, 0x70, 0xcf, 0xe5, 0x79, 0xf6, 0x55,
0x6b, 0xbd, 0x81, 0x43, 0xc5, 0xcd, 0x70, 0x2b, 0xbe, 0xe3,
0x87, 0xc7,
103, 21, 99, 143, 60, 75, 116, 81, 248, 175, 190, 114, 54, 65, 23, 8, 3, 116,
160, 178, 7, 75, 63, 47, 180, 239, 191, 247, 57, 194, 144, 88
];
assert_eq!(digest_bytes.len(), expected.len());
assert_eq!(digest_bytes, expected)
assert_eq!(digest.len(), expected.len());
assert_eq!(digest, expected)
}
}

View File

@ -38,7 +38,7 @@ impl Client {
// Start the network thread
let network_state = NetworkState::new(
&config.data_dir,
&config.p2p_listen_port,
config.p2p_listen_port,
&log).expect("Network setup failed"); let (network_thread, network_tx, network_rx) = {
let (message_sender, message_receiver) = unbounded();
let (event_sender, event_receiver) = unbounded();
@ -46,9 +46,9 @@ impl Client {
let thread = thread::spawn(move || {
network_listen(
network_state,
event_sender,
&event_sender,
message_receiver,
network_log,
&network_log,
);
});
(thread, message_sender, event_receiver)

View File

@ -70,7 +70,6 @@ fn handle_network_message(
);
Ok(())
}
_ => Ok(())
}
}
Err(_) => {

View File

@ -4,13 +4,16 @@ pub enum WireMessageDecodeError {
}
pub enum WireMessageHeader {
Blocks,
/*
// Leave out until used
Status,
NewBlockHashes,
GetBlockHashes,
BlockHashes,
GetBlocks,
Blocks,
NewBlock,
*/
}
pub struct WireMessage<'a> {

View File

@ -37,9 +37,9 @@ use self::bytes::Bytes;
pub use self::libp2p_floodsub::Message;
pub fn listen(state: NetworkState,
events_to_app: UnboundedSender<NetworkEvent>,
events_to_app: &UnboundedSender<NetworkEvent>,
raw_rx: UnboundedReceiver<OutgoingMessage>,
log: Logger)
log: &Logger)
{
let peer_store = state.peer_store;
let peer_id = state.peer_id;
@ -83,7 +83,7 @@ pub fn listen(state: NetworkState,
let kad_config = libp2p_kad::KademliaConfig {
parallelism: 3,
record_store: (),
peer_store: peer_store,
peer_store,
local_peer_id: peer_id.clone(),
timeout: Duration::from_secs(2)
};

View File

@ -31,17 +31,17 @@ pub struct NetworkState {
}
impl NetworkState {
/// Create a new libp2p network state. Used to initialize
/// Create a new libp2p network state. Used to initialize
/// network service.
pub fn new(
// config: LighthouseConfig,
// config: LighthouseConfig,
base_dir: &Path,
listen_port: &u16,
log: &Logger)
listen_port: u16,
log: &Logger)
-> Result <Self, Box<Error>>
{
let curve = Secp256k1::new();
let seckey = match
let seckey = match
NetworkState::load_secret_key_from_pem_file(base_dir, &curve)
{
Ok(k) => k,
@ -71,23 +71,23 @@ impl NetworkState {
/// Return a TCP multiaddress on 0.0.0.0 for a given port.
pub fn multiaddr_on_port(port: &str) -> Multiaddr {
return format!("/ip4/0.0.0.0/tcp/{}", port)
format!("/ip4/0.0.0.0/tcp/{}", port)
.parse::<Multiaddr>().unwrap()
}
pub fn add_peer(&mut self,
peer_id: PeerId,
peer_id: &PeerId,
multiaddr: Multiaddr,
duration_secs: u64) {
self.peer_store.peer_or_create(&peer_id)
.add_addr(multiaddr, Duration::from_secs(duration_secs));
}
/// Instantiate a SecretKey from a .pem file on disk.
/// Instantiate a SecretKey from a .pem file on disk.
pub fn load_secret_key_from_pem_file(
base_dir: &Path,
curve: &Secp256k1)
-> Result<SecretKey, Box<Error>>
-> Result<SecretKey, Box<Error>>
{
let path = base_dir.join(LOCAL_PEM_FILE);
let mut contents = String::new();
@ -97,12 +97,12 @@ impl NetworkState {
let key = SecretKey::from_slice(curve, &pem_key.contents)?;
Ok(key)
}
/// Generate a new SecretKey and store it on disk as a .pem file.
/// Generate a new SecretKey and store it on disk as a .pem file.
pub fn generate_new_secret_key(
base_dir: &Path,
curve: &Secp256k1)
-> Result<SecretKey, Box<Error>>
-> Result<SecretKey, Box<Error>>
{
let mut rng = rand::thread_rng();
let sk = SecretKey::new(&curve, &mut rng);
@ -113,7 +113,7 @@ impl NetworkState {
let s_string = pem::encode(&pem_key);
let path = base_dir.join(LOCAL_PEM_FILE);
let mut s_file = File::create(path)?;
s_file.write(s_string.as_bytes())?;
s_file.write_all(s_string.as_bytes())?;
Ok(sk)
}
}