Fix some clippy lints

This commit is contained in:
Paul Hauner 2018-09-22 08:17:31 +10:00
parent 091379f011
commit 616cc616db
No known key found for this signature in database
GPG Key ID: 303E4494BB28068C
13 changed files with 85 additions and 111 deletions

View File

@ -25,8 +25,8 @@ impl Client {
/// ///
/// Presently, this means starting network and sync threads /// Presently, this means starting network and sync threads
/// and plumbing them together. /// and plumbing them together.
pub fn new(config: LighthouseConfig, pub fn new(config: &LighthouseConfig,
log: Logger) log: &Logger)
-> Self -> Self
{ {
// Open the local db // Open the local db
@ -65,8 +65,8 @@ impl Client {
sync_db, sync_db,
network_tx.clone(), network_tx.clone(),
network_rx, network_rx,
sync_out_sender, &sync_out_sender,
sync_in_receiver, &sync_in_receiver,
sync_log, sync_log,
); );
}); });
@ -75,7 +75,7 @@ impl Client {
// Return the client struct // Return the client struct
Self { Self {
db: db, db,
network_thread, network_thread,
sync_thread, sync_thread,
} }

View File

@ -23,7 +23,7 @@ impl LighthouseConfig {
home.join(DEFAULT_LIGHTHOUSE_DIR) home.join(DEFAULT_LIGHTHOUSE_DIR)
}; };
fs::create_dir_all(&data_dir) fs::create_dir_all(&data_dir)
.expect(&format!("Unable to create {:?}", &data_dir)); .unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir));
let p2p_listen_port = 0; let p2p_listen_port = 0;
Self { Self {
data_dir, data_dir,

View File

@ -40,7 +40,7 @@ impl DiskDB {
* Initialise the path * Initialise the path
*/ */
fs::create_dir_all(&path) fs::create_dir_all(&path)
.expect(&format!("Unable to create {:?}", &path)); .unwrap_or_else(|_| panic!("Unable to create {:?}", &path));
let db_path = path.join("database"); let db_path = path.join("database");
/* /*

View File

@ -53,12 +53,11 @@ impl ClientDB for MemoryDB {
let db = self.db.read().unwrap(); let db = self.db.read().unwrap();
let known_columns = self.known_columns.read().unwrap(); let known_columns = self.known_columns.read().unwrap();
match known_columns.contains(&col.to_string()) { if known_columns.contains(&col.to_string()) {
false => Err(DBError{ message: "Unknown column".to_string() }), let column_key = MemoryDB::get_key_for_col(col, key);
true => { Ok(db.get(&column_key).and_then(|val| Some(val.clone())))
let column_key = MemoryDB::get_key_for_col(col, key); } else {
Ok(db.get(&column_key).and_then(|val| Some(val.clone()))) Err(DBError{ message: "Unknown column".to_string() })
}
} }
} }
@ -70,13 +69,12 @@ impl ClientDB for MemoryDB {
let mut db = self.db.write().unwrap(); let mut db = self.db.write().unwrap();
let known_columns = self.known_columns.read().unwrap(); let known_columns = self.known_columns.read().unwrap();
match known_columns.contains(&col.to_string()) { if known_columns.contains(&col.to_string()) {
false => Err(DBError{ message: "Unknown column".to_string() }), let column_key = MemoryDB::get_key_for_col(col, key);
true => { db.insert(column_key, val.to_vec());
let column_key = MemoryDB::get_key_for_col(col, key); Ok(())
db.insert(column_key, val.to_vec()); } else {
Ok(()) Err(DBError{ message: "Unknown column".to_string() })
}
} }
} }
} }

View File

@ -64,7 +64,7 @@ fn main() {
"data_dir" => &config.data_dir.to_str(), "data_dir" => &config.data_dir.to_str(),
"port" => &config.p2p_listen_port); "port" => &config.p2p_listen_port);
let client = Client::new(config, log.new(o!())); let client = Client::new(&config, &log);
client.sync_thread.join().unwrap(); client.sync_thread.join().unwrap();
info!(log, "Exiting."); info!(log, "Exiting.");

View File

@ -13,16 +13,16 @@ use super::TransitionError;
/// See this slide for more information: /// See this slide for more information:
/// https://tinyurl.com/ybzn2spw /// https://tinyurl.com/ybzn2spw
pub fn attestation_parent_hashes( pub fn attestation_parent_hashes(
cycle_length: &u8, cycle_length: u8,
block_slot: &u64, block_slot: u64,
attestation_slot: &u64, attestation_slot: u64,
current_hashes: &Vec<Hash256>, current_hashes: &[Hash256],
oblique_hashes: &Vec<Hash256>) oblique_hashes: &[Hash256])
-> Result<Vec<Hash256>, TransitionError> -> Result<Vec<Hash256>, TransitionError>
{ {
// This cast places a limit on cycle_length. If you change it, check math // This cast places a limit on cycle_length. If you change it, check math
// for overflow. // for overflow.
let cycle_length: u64 = *cycle_length as u64; let cycle_length: u64 = u64::from(cycle_length);
if current_hashes.len() as u64 != (cycle_length * 2) { if current_hashes.len() as u64 != (cycle_length * 2) {
return Err(TransitionError::InvalidInput(String::from( return Err(TransitionError::InvalidInput(String::from(
@ -69,7 +69,7 @@ pub fn attestation_parent_hashes(
let mut hashes = Vec::new(); let mut hashes = Vec::new();
hashes.extend_from_slice( hashes.extend_from_slice(
&current_hashes[(start as usize)..(end as usize)]); &current_hashes[(start as usize)..(end as usize)]);
hashes.append(&mut oblique_hashes.clone()); hashes.extend_from_slice(oblique_hashes);
Ok(hashes) Ok(hashes)
} }
@ -98,9 +98,9 @@ mod tests {
let current_hashes = get_range_of_hashes(3, 19); let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = get_range_of_hashes(100, 102); let oblique_hashes = get_range_of_hashes(100, 102);
let result = attestation_parent_hashes( let result = attestation_parent_hashes(
&cycle_length, cycle_length,
&block_slot, block_slot,
&attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes);
assert!(result.is_ok()); assert!(result.is_ok());
@ -123,9 +123,9 @@ mod tests {
let current_hashes = get_range_of_hashes(3, 19); let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = get_range_of_hashes(100, 108); let oblique_hashes = get_range_of_hashes(100, 108);
let result = attestation_parent_hashes( let result = attestation_parent_hashes(
&cycle_length, cycle_length,
&block_slot, block_slot,
&attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes);
assert!(result.is_ok()); assert!(result.is_ok());
@ -148,9 +148,9 @@ mod tests {
let current_hashes = get_range_of_hashes(3, 19); let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = vec![]; let oblique_hashes = vec![];
let result = attestation_parent_hashes( let result = attestation_parent_hashes(
&cycle_length, cycle_length,
&block_slot, block_slot,
&attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes);
assert!(result.is_ok()); assert!(result.is_ok());
@ -171,9 +171,9 @@ mod tests {
let current_hashes = get_range_of_hashes(0, 16); let current_hashes = get_range_of_hashes(0, 16);
let oblique_hashes = vec![]; let oblique_hashes = vec![];
let result = attestation_parent_hashes( let result = attestation_parent_hashes(
&cycle_length, cycle_length,
&block_slot, block_slot,
&attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes);
assert!(result.is_ok()); assert!(result.is_ok());
@ -194,9 +194,9 @@ mod tests {
let current_hashes = get_range_of_hashes(0, 16); let current_hashes = get_range_of_hashes(0, 16);
let oblique_hashes = vec![]; let oblique_hashes = vec![];
let result = attestation_parent_hashes( let result = attestation_parent_hashes(
&cycle_length, cycle_length,
&block_slot, block_slot,
&attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes);
assert!(result.is_err()); assert!(result.is_err());
@ -213,9 +213,9 @@ mod tests {
let current_hashes = get_range_of_hashes(0, 15); let current_hashes = get_range_of_hashes(0, 15);
let oblique_hashes = vec![]; let oblique_hashes = vec![];
let result = attestation_parent_hashes( let result = attestation_parent_hashes(
&cycle_length, cycle_length,
&block_slot, block_slot,
&attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes);
assert!(result.is_err()); assert!(result.is_err());

View File

@ -2,7 +2,7 @@ use super::blake2_rfc::blake2s::{ Blake2s, Blake2sResult };
const SEED_SIZE_BYTES: usize = 32; const SEED_SIZE_BYTES: usize = 32;
const RAND_BYTES: usize = 3; // 24 / 8 const RAND_BYTES: usize = 3; // 24 / 8
const RAND_MAX: u32 = 16777216; // 2**24 const RAND_MAX: u32 = 16_777_216; // 2**24
/// A pseudo-random number generator which given a seed /// A pseudo-random number generator which given a seed
/// uses successive blake2s hashing to generate "entropy". /// uses successive blake2s hashing to generate "entropy".
@ -31,17 +31,14 @@ impl ShuffleRng {
/// Extracts 3 bytes from the `seed`. Rehashes seed if required. /// Extracts 3 bytes from the `seed`. Rehashes seed if required.
fn rand(&mut self) -> u32 { fn rand(&mut self) -> u32 {
self.idx += RAND_BYTES; self.idx += RAND_BYTES;
match self.idx >= SEED_SIZE_BYTES { if self.idx >= SEED_SIZE_BYTES {
true => { self.rehash_seed();
self.rehash_seed(); self.rand()
self.rand() } else {
} int_from_byte_slice(
false => { self.seed.as_bytes(),
int_from_byte_slice( self.idx - RAND_BYTES,
self.seed.as_bytes(), )
self.idx - RAND_BYTES,
)
}
} }
} }
@ -65,9 +62,9 @@ impl ShuffleRng {
/// Returns that integer. /// Returns that integer.
fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 { fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 {
( (
source[offset + 2] as u32) | u32::from(source[offset + 2])) |
((source[offset + 1] as u32) << 8) | (u32::from(source[offset + 1]) << 8) |
((source[offset ] as u32) << 16 (u32::from(source[offset ]) << 16
) )
} }

View File

@ -1,24 +0,0 @@
use std::sync::Arc;
use super::db::ClientDB;
use slog::Logger;
pub enum BlockStatus {
Valid,
AlreadyKnown,
TooOld,
TimeInvalid,
UnknownPoWHash,
NoAttestations,
InvalidAttestation,
NotProposerSigned,
}
pub fn process_unverified_blocks(
_serialized_block: &[u8],
_db: Arc<ClientDB>,
_log: Logger)
{
//
}

View File

@ -3,7 +3,6 @@ extern crate slog;
extern crate tokio; extern crate tokio;
extern crate network_libp2p; extern crate network_libp2p;
pub mod block;
pub mod network; pub mod network;
pub mod sync_future; pub mod sync_future;
pub mod wire_protocol; pub mod wire_protocol;

View File

@ -8,8 +8,6 @@ use super::network_libp2p::message::{
NetworkEventType, NetworkEventType,
}; };
use super::block::process_unverified_blocks;
use super::wire_protocol::{ use super::wire_protocol::{
WireMessage, WireMessage,
WireMessageHeader, WireMessageHeader,
@ -25,9 +23,9 @@ use super::futures::sync::mpsc::{
/// (e.g., libp2p) has an event to push up to the sync process. /// (e.g., libp2p) has an event to push up to the sync process.
pub fn handle_network_event( pub fn handle_network_event(
event: NetworkEvent, event: NetworkEvent,
db: Arc<ClientDB>, db: &Arc<ClientDB>,
network_tx: UnboundedSender<OutgoingMessage>, network_tx: &UnboundedSender<OutgoingMessage>,
log: Logger) log: &Logger)
-> Result<(), ()> -> Result<(), ()>
{ {
debug!(&log, ""; debug!(&log, "";
@ -38,10 +36,10 @@ pub fn handle_network_event(
NetworkEventType::Message => { NetworkEventType::Message => {
if let Some(data) = event.data { if let Some(data) = event.data {
handle_network_message( handle_network_message(
data, &data,
db, &db,
network_tx, &network_tx,
log) &log)
} else { } else {
Ok(()) Ok(())
} }
@ -55,10 +53,10 @@ pub fn handle_network_event(
/// This function should be called whenever a peer from a network /// This function should be called whenever a peer from a network
/// (e.g., libp2p) has sent a message to us. /// (e.g., libp2p) has sent a message to us.
fn handle_network_message( fn handle_network_message(
message: Vec<u8>, message: &[u8],
db: Arc<ClientDB>, db: &Arc<ClientDB>,
_network_tx: UnboundedSender<OutgoingMessage>, _network_tx: &UnboundedSender<OutgoingMessage>,
log: Logger) log: &Logger)
-> Result<(), ()> -> Result<(), ()>
{ {
match WireMessage::decode(&message) { match WireMessage::decode(&message) {
@ -67,8 +65,8 @@ fn handle_network_message(
WireMessageHeader::Blocks => { WireMessageHeader::Blocks => {
process_unverified_blocks( process_unverified_blocks(
msg.body, msg.body,
db, &db,
log &log
); );
Ok(()) Ok(())
} }
@ -76,7 +74,14 @@ fn handle_network_message(
} }
} }
Err(_) => { Err(_) => {
return Ok(()) // No need to pass the error back Ok(()) // No need to pass the error back
} }
} }
} }
fn process_unverified_blocks(_message: &[u8],
_db: &Arc<ClientDB>,
_log: &Logger)
{
//
}

View File

@ -28,8 +28,8 @@ pub fn run_sync_future(
db: Arc<ClientDB>, db: Arc<ClientDB>,
network_tx: NetworkSender, network_tx: NetworkSender,
network_rx: NetworkReceiver, network_rx: NetworkReceiver,
_sync_tx: SyncSender, _sync_tx: &SyncSender,
_sync_rx: SyncReceiver, _sync_rx: &SyncReceiver,
log: Logger) log: Logger)
{ {
let network_future = { let network_future = {
@ -37,9 +37,9 @@ pub fn run_sync_future(
.for_each(move |event| { .for_each(move |event| {
handle_network_event( handle_network_event(
event, event,
db.clone(), &db.clone(),
network_tx.clone(), &network_tx.clone(),
log.clone()) &log.clone())
}) })
.map_err(|_| panic!("rx failed")) .map_err(|_| panic!("rx failed"))
}; };

View File

@ -19,7 +19,7 @@ pub struct WireMessage<'a> {
} }
impl<'a> WireMessage<'a> { impl<'a> WireMessage<'a> {
pub fn decode(bytes: &'a Vec<u8>) pub fn decode(bytes: &'a [u8])
-> Result<Self, WireMessageDecodeError> -> Result<Self, WireMessageDecodeError>
{ {
if let Some((header_byte, body)) = bytes.split_first() { if let Some((header_byte, body)) = bytes.split_first() {

View File

@ -7,11 +7,10 @@ pub use slog::Logger;
pub fn test_logger() -> slog::Logger { pub fn test_logger() -> slog::Logger {
let plain = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter); let plain = slog_term::PlainSyncDecorator::new(slog_term::TestStdoutWriter);
let logger = Logger::root( Logger::root(
slog_term::FullFormat::new(plain) slog_term::FullFormat::new(plain)
.build().fuse(), o!() .build().fuse(), o!()
); )
logger
} }
pub fn get_logger() -> slog::Logger { pub fn get_logger() -> slog::Logger {