rust 1.53.0 updates (#2411)
## Issue Addressed `make lint` failing on rust 1.53.0. ## Proposed Changes 1.53.0 updates ## Additional Info I haven't figure out why yet, we were now hitting the recursion limit in a few crates. So I had to add `#![recursion_limit = "256"]` in a few places Co-authored-by: realbigsean <seananderson33@gmail.com> Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
parent
3dc1eb5eb6
commit
b84ff9f793
3
.github/custom/config.toml
vendored
Normal file
3
.github/custom/config.toml
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# Custom Cargo config to be used for the udeps CI job
|
||||
[http]
|
||||
multiplexing = false
|
6
.github/workflows/test-suite.yml
vendored
6
.github/workflows/test-suite.yml
vendored
@ -12,7 +12,7 @@ env:
|
||||
# Deny warnings in CI
|
||||
RUSTFLAGS: "-D warnings"
|
||||
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
||||
PINNED_NIGHTLY: nightly-2021-03-01
|
||||
PINNED_NIGHTLY: nightly-2021-06-09
|
||||
jobs:
|
||||
target-branch-check:
|
||||
name: target-branch-check
|
||||
@ -198,6 +198,10 @@ jobs:
|
||||
run: rustup toolchain install $PINNED_NIGHTLY
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install cargo-udeps --locked
|
||||
- name: Create Cargo config dir
|
||||
run: mkdir -p .cargo
|
||||
- name: Install custom Cargo config
|
||||
run: cp -f .github/custom/config.toml .cargo/config.toml
|
||||
- name: Run cargo udeps to identify unused crates in the dependency graph
|
||||
run: make udeps
|
||||
env:
|
||||
|
@ -236,7 +236,7 @@ pub fn cli_run<T: EthSpec>(
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Error registering validator {}: {:?}",
|
||||
voting_pubkey.to_hex_string(),
|
||||
voting_pubkey.as_hex_string(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
@ -250,7 +250,7 @@ pub fn cli_run<T: EthSpec>(
|
||||
.build()
|
||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||
|
||||
println!("{}/{}\t{}", i + 1, n, voting_pubkey.to_hex_string());
|
||||
println!("{}/{}\t{}", i + 1, n, voting_pubkey.as_hex_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -242,7 +242,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Error registering validator {}: {:?}",
|
||||
voting_pubkey.to_hex_string(),
|
||||
voting_pubkey.as_hex_string(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
@ -762,8 +762,8 @@ mod test {
|
||||
|
||||
let eth1_chain = get_eth1_chain();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
@ -795,8 +795,8 @@ mod test {
|
||||
let eth1_chain = get_eth1_chain();
|
||||
let max_deposits = <E as EthSpec>::MaxDeposits::to_u64();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
@ -877,8 +877,8 @@ mod test {
|
||||
|
||||
let eth1_chain = get_eth1_chain();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
@ -901,8 +901,8 @@ mod test {
|
||||
|
||||
let eth1_chain = get_eth1_chain();
|
||||
|
||||
assert_eq!(
|
||||
eth1_chain.use_dummy_backend, false,
|
||||
assert!(
|
||||
!eth1_chain.use_dummy_backend,
|
||||
"test should not use dummy backend"
|
||||
);
|
||||
|
||||
|
@ -353,11 +353,11 @@ mod tests {
|
||||
#[test]
|
||||
fn lock() {
|
||||
let lock = Lock::new();
|
||||
assert_eq!(lock.lock(), false);
|
||||
assert_eq!(lock.lock(), true);
|
||||
assert_eq!(lock.lock(), true);
|
||||
assert!(!lock.lock());
|
||||
assert!(lock.lock());
|
||||
assert!(lock.lock());
|
||||
lock.unlock();
|
||||
assert_eq!(lock.lock(), false);
|
||||
assert_eq!(lock.lock(), true);
|
||||
assert!(!lock.lock());
|
||||
assert!(lock.lock());
|
||||
}
|
||||
}
|
||||
|
@ -88,14 +88,14 @@ impl EnrExt for Enr {
|
||||
if let Some(udp) = self.udp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Udp(udp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
@ -103,7 +103,7 @@ impl EnrExt for Enr {
|
||||
if let Some(udp6) = self.udp6() {
|
||||
let mut multiaddr: Multiaddr = ip6.into();
|
||||
multiaddr.push(Protocol::Udp(udp6));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ impl EnrExt for Enr {
|
||||
if let Some(tcp) = self.tcp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Tcp(tcp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
@ -154,7 +154,7 @@ impl EnrExt for Enr {
|
||||
if let Some(udp) = self.udp() {
|
||||
let mut multiaddr: Multiaddr = ip.into();
|
||||
multiaddr.push(Protocol::Udp(udp));
|
||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||
multiaddrs.push(multiaddr);
|
||||
}
|
||||
}
|
||||
|
@ -556,20 +556,18 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
if known_meta_data.seq_number < meta_data.seq_number {
|
||||
debug!(self.log, "Updating peer's metadata";
|
||||
"peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
} else {
|
||||
debug!(self.log, "Received old metadata";
|
||||
"peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||
// Updating metadata even in this case to prevent storing
|
||||
// incorrect `metadata.attnets` for a peer
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
}
|
||||
} else {
|
||||
// we have no meta-data for this peer, update
|
||||
debug!(self.log, "Obtained peer's metadata";
|
||||
"peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number);
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
}
|
||||
peer_info.meta_data = Some(meta_data);
|
||||
} else {
|
||||
crit!(self.log, "Received METADATA from an unknown peer";
|
||||
"peer_id" => %peer_id);
|
||||
@ -583,11 +581,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
// port is removed, which is assumed to be associated with the discv5 protocol (and
|
||||
// therefore irrelevant for other libp2p components).
|
||||
let mut out_list = enr.multiaddr();
|
||||
out_list.retain(|addr| {
|
||||
addr.iter()
|
||||
.find(|v| matches!(v, MProtocol::Udp(_)))
|
||||
.is_none()
|
||||
});
|
||||
out_list.retain(|addr| !addr.iter().any(|v| matches!(v, MProtocol::Udp(_))));
|
||||
|
||||
out_list
|
||||
} else {
|
||||
|
@ -62,14 +62,14 @@ impl PeerSyncStatus {
|
||||
matches!(self, PeerSyncStatus::Behind { .. })
|
||||
}
|
||||
|
||||
/// Updates the peer's sync status, returning whether the status transitioned.
|
||||
///
|
||||
/// E.g. returns `true` if the state changed from `Synced` to `Advanced`, but not if
|
||||
/// the status remained `Synced` with different `SyncInfo` within.
|
||||
pub fn update(&mut self, new_state: PeerSyncStatus) -> bool {
|
||||
if *self == new_state {
|
||||
*self = new_state;
|
||||
false // state was not updated
|
||||
} else {
|
||||
*self = new_state;
|
||||
true
|
||||
}
|
||||
let changed_status = *self != new_state;
|
||||
*self = new_state;
|
||||
changed_status
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
|
@ -587,7 +587,7 @@ where
|
||||
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
|
||||
InboundState::Idle(substream) if !deactivated => {
|
||||
if !info.pending_items.is_empty() {
|
||||
let to_send = std::mem::replace(&mut info.pending_items, vec![]);
|
||||
let to_send = std::mem::take(&mut info.pending_items);
|
||||
let fut = process_inbound_substream(
|
||||
substream,
|
||||
info.remaining_chunks,
|
||||
@ -665,8 +665,7 @@ where
|
||||
// elements
|
||||
|
||||
if !deactivated && !info.pending_items.is_empty() {
|
||||
let to_send =
|
||||
std::mem::replace(&mut info.pending_items, vec![]);
|
||||
let to_send = std::mem::take(&mut info.pending_items);
|
||||
let fut = process_inbound_substream(
|
||||
substream,
|
||||
info.remaining_chunks,
|
||||
|
@ -1254,7 +1254,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
|
||||
// GET config/fork_schedule
|
||||
let get_config_fork_schedule = config_path
|
||||
.clone()
|
||||
.and(warp::path("fork_schedule"))
|
||||
.and(warp::path::end())
|
||||
.and(chain_filter.clone())
|
||||
@ -1268,7 +1267,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
|
||||
// GET config/spec
|
||||
let get_config_spec = config_path
|
||||
.clone()
|
||||
.and(warp::path("spec"))
|
||||
.and(warp::path::end())
|
||||
.and(chain_filter.clone())
|
||||
@ -1284,7 +1282,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
|
||||
// GET config/deposit_contract
|
||||
let get_config_deposit_contract = config_path
|
||||
.clone()
|
||||
.and(warp::path("deposit_contract"))
|
||||
.and(warp::path::end())
|
||||
.and(chain_filter.clone())
|
||||
|
@ -1,4 +1,5 @@
|
||||
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||
@ -2333,7 +2334,7 @@ async fn poll_events<S: Stream<Item = Result<EventKind<T>, eth2::Error>> + Unpin
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = collect_stream_fut => {return events}
|
||||
_ = collect_stream_fut => {events}
|
||||
_ = tokio::time::sleep(timeout) => { return events; }
|
||||
}
|
||||
}
|
||||
|
@ -559,11 +559,10 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
return;
|
||||
}
|
||||
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
||||
if self
|
||||
if !self
|
||||
.unsubscriptions
|
||||
.keys()
|
||||
.find(|s| s.subnet_id == subnet_id)
|
||||
.is_none()
|
||||
.any(|s| s.subnet_id == subnet_id)
|
||||
{
|
||||
// we are not at capacity, unsubscribe from the current subnet.
|
||||
debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id);
|
||||
@ -601,11 +600,10 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
|
||||
for subnet_id in to_remove_subnets {
|
||||
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
||||
if self
|
||||
if !self
|
||||
.unsubscriptions
|
||||
.keys()
|
||||
.find(|s| s.subnet_id == *subnet_id)
|
||||
.is_none()
|
||||
.any(|s| s.subnet_id == *subnet_id)
|
||||
{
|
||||
self.events
|
||||
.push_back(AttServiceMessage::Unsubscribe(*subnet_id));
|
||||
|
@ -147,10 +147,10 @@ async fn get_events<S: Stream<Item = AttServiceMessage> + Unpin>(
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = collect_stream_fut => {return events}
|
||||
_ = collect_stream_fut => {events}
|
||||
_ = tokio::time::sleep(
|
||||
Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
|
||||
) => { return events; }
|
||||
) => { events }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ pub fn construct_upnp_mappings<T: EthSpec>(
|
||||
"tcp",
|
||||
&log,
|
||||
).and_then(|_| {
|
||||
let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new(ip.clone().into(), config.tcp_port)).map_err(|_| ());
|
||||
let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new((*ip).into(), config.tcp_port)).map_err(|_| ());
|
||||
info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port));
|
||||
external_socket
|
||||
}).ok();
|
||||
|
@ -933,10 +933,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
// check if we have the batch for our optimistic start. If not, request it first.
|
||||
// We wait for this batch before requesting any other batches.
|
||||
if let Some(epoch) = self.optimistic_start {
|
||||
if !self.batches.contains_key(&epoch) {
|
||||
if let Entry::Vacant(entry) = self.batches.entry(epoch) {
|
||||
if let Some(peer) = idle_peers.pop() {
|
||||
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH);
|
||||
self.batches.insert(epoch, optimistic_batch);
|
||||
entry.insert(optimistic_batch);
|
||||
self.send_batch(network, epoch, peer)?;
|
||||
}
|
||||
}
|
||||
|
@ -285,12 +285,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
|
||||
/// Store a state in the store.
|
||||
pub fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
|
||||
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||
if state.slot < self.get_split_slot() {
|
||||
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||
self.store_cold_state(state_root, &state, &mut ops)?;
|
||||
self.cold_db.do_atomically(ops)
|
||||
} else {
|
||||
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||
self.store_hot_state(state_root, state, &mut ops)?;
|
||||
self.hot_db.do_atomically(ops)
|
||||
}
|
||||
|
@ -251,18 +251,18 @@ mod tests {
|
||||
let key = Hash256::random();
|
||||
let item = StorableThing { a: 1, b: 42 };
|
||||
|
||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
||||
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||
|
||||
store.put(&key, &item).unwrap();
|
||||
|
||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), true);
|
||||
assert!(store.exists::<StorableThing>(&key).unwrap());
|
||||
|
||||
let retrieved = store.get(&key).unwrap().unwrap();
|
||||
assert_eq!(item, retrieved);
|
||||
|
||||
store.delete::<StorableThing>(&key).unwrap();
|
||||
|
||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
||||
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||
|
||||
assert_eq!(store.get::<StorableThing>(&key).unwrap(), None);
|
||||
}
|
||||
@ -289,14 +289,14 @@ mod tests {
|
||||
let key = Hash256::random();
|
||||
let item = StorableThing { a: 1, b: 42 };
|
||||
|
||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
||||
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||
|
||||
store.put(&key, &item).unwrap();
|
||||
|
||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), true);
|
||||
assert!(store.exists::<StorableThing>(&key).unwrap());
|
||||
|
||||
store.delete::<StorableThing>(&key).unwrap();
|
||||
|
||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
||||
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#![cfg(test)]
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
use beacon_chain::StateSkipConfig;
|
||||
use node_test_rig::{
|
||||
|
@ -231,7 +231,7 @@ impl<'a> Builder<'a> {
|
||||
if self.store_withdrawal_keystore {
|
||||
// Write the withdrawal password to file.
|
||||
write_password_to_file(
|
||||
password_dir.join(withdrawal_keypair.pk.to_hex_string()),
|
||||
password_dir.join(withdrawal_keypair.pk.as_hex_string()),
|
||||
withdrawal_password.as_bytes(),
|
||||
)?;
|
||||
|
||||
|
@ -294,9 +294,8 @@ mod tests {
|
||||
"after first push sub should have len {}",
|
||||
len
|
||||
);
|
||||
assert_eq!(
|
||||
sub.is_empty(arena).expect("should exist"),
|
||||
false,
|
||||
assert!(
|
||||
!sub.is_empty(arena).expect("should exist"),
|
||||
"new sub should not be empty"
|
||||
);
|
||||
|
||||
@ -375,9 +374,8 @@ mod tests {
|
||||
0,
|
||||
"new sub should have len 0"
|
||||
);
|
||||
assert_eq!(
|
||||
assert!(
|
||||
sub.is_empty(arena).expect("should exist"),
|
||||
true,
|
||||
"new sub should be empty"
|
||||
);
|
||||
|
||||
@ -397,9 +395,8 @@ mod tests {
|
||||
0,
|
||||
"new sub should have len 0"
|
||||
);
|
||||
assert_eq!(
|
||||
assert!(
|
||||
sub_01.is_empty(arena).expect("should exist"),
|
||||
true,
|
||||
"new sub should be empty"
|
||||
);
|
||||
|
||||
@ -409,9 +406,8 @@ mod tests {
|
||||
0,
|
||||
"new sub should have len 0"
|
||||
);
|
||||
assert_eq!(
|
||||
assert!(
|
||||
sub_02.is_empty(arena).expect("should exist"),
|
||||
true,
|
||||
"new sub should be empty"
|
||||
);
|
||||
|
||||
@ -432,9 +428,8 @@ mod tests {
|
||||
0,
|
||||
"new sub should have len 0"
|
||||
);
|
||||
assert_eq!(
|
||||
assert!(
|
||||
sub_01.is_empty(arena).expect("should exist"),
|
||||
true,
|
||||
"new sub should be empty"
|
||||
);
|
||||
|
||||
@ -446,9 +441,8 @@ mod tests {
|
||||
0,
|
||||
"new sub should have len 0"
|
||||
);
|
||||
assert_eq!(
|
||||
assert!(
|
||||
sub_02.is_empty(arena).expect("should exist"),
|
||||
true,
|
||||
"new sub should be empty"
|
||||
);
|
||||
|
||||
@ -474,9 +468,8 @@ mod tests {
|
||||
0,
|
||||
"new sub should have len 0"
|
||||
);
|
||||
assert_eq!(
|
||||
assert!(
|
||||
sub.is_empty(arena).expect("should exist"),
|
||||
true,
|
||||
"new sub should be empty"
|
||||
);
|
||||
subs.push(sub);
|
||||
@ -492,9 +485,8 @@ mod tests {
|
||||
0,
|
||||
"new sub should have len 0"
|
||||
);
|
||||
assert_eq!(
|
||||
assert!(
|
||||
sub.is_empty(arena).expect("should exist"),
|
||||
true,
|
||||
"new sub should be empty"
|
||||
);
|
||||
subs.push(sub);
|
||||
|
@ -6,7 +6,7 @@ use crate::{test_utils::*, *};
|
||||
fn default_values() {
|
||||
let cache = CommitteeCache::default();
|
||||
|
||||
assert_eq!(cache.is_initialized_at(Epoch::new(0)), false);
|
||||
assert!(!cache.is_initialized_at(Epoch::new(0)));
|
||||
assert!(&cache.active_validator_indices().is_empty());
|
||||
assert_eq!(cache.get_beacon_committee(Slot::new(0), 0), None);
|
||||
assert_eq!(cache.get_attestation_duties(0), None);
|
||||
|
@ -105,10 +105,7 @@ mod tests {
|
||||
let indexed_vote_first = create_indexed_attestation(3, 1);
|
||||
let indexed_vote_second = create_indexed_attestation(3, 2);
|
||||
|
||||
assert_eq!(
|
||||
indexed_vote_first.is_double_vote(&indexed_vote_second),
|
||||
true
|
||||
)
|
||||
assert!(indexed_vote_first.is_double_vote(&indexed_vote_second))
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -116,10 +113,7 @@ mod tests {
|
||||
let indexed_vote_first = create_indexed_attestation(1, 1);
|
||||
let indexed_vote_second = create_indexed_attestation(2, 1);
|
||||
|
||||
assert_eq!(
|
||||
indexed_vote_first.is_double_vote(&indexed_vote_second),
|
||||
false
|
||||
);
|
||||
assert!(!indexed_vote_first.is_double_vote(&indexed_vote_second));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -127,10 +121,7 @@ mod tests {
|
||||
let indexed_vote_first = create_indexed_attestation(2, 1);
|
||||
let indexed_vote_second = create_indexed_attestation(1, 2);
|
||||
|
||||
assert_eq!(
|
||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
||||
true
|
||||
);
|
||||
assert!(indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -138,10 +129,7 @@ mod tests {
|
||||
let indexed_vote_first = create_indexed_attestation(4, 1);
|
||||
let indexed_vote_second = create_indexed_attestation(3, 2);
|
||||
|
||||
assert_eq!(
|
||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
||||
true
|
||||
);
|
||||
assert!(indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -149,10 +137,7 @@ mod tests {
|
||||
let indexed_vote_first = create_indexed_attestation(2, 2);
|
||||
let indexed_vote_second = create_indexed_attestation(1, 1);
|
||||
|
||||
assert_eq!(
|
||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
||||
false
|
||||
);
|
||||
assert!(!indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -160,10 +145,7 @@ mod tests {
|
||||
let indexed_vote_first = create_indexed_attestation(1, 1);
|
||||
let indexed_vote_second = create_indexed_attestation(2, 2);
|
||||
|
||||
assert_eq!(
|
||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
||||
false
|
||||
);
|
||||
assert!(!indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||
}
|
||||
|
||||
ssz_and_tree_hash_tests!(IndexedAttestation<MainnetEthSpec>);
|
||||
|
@ -93,10 +93,10 @@ mod tests {
|
||||
|
||||
let epoch = Epoch::new(0);
|
||||
|
||||
assert_eq!(v.is_active_at(epoch), false);
|
||||
assert_eq!(v.is_exited_at(epoch), false);
|
||||
assert_eq!(v.is_withdrawable_at(epoch), false);
|
||||
assert_eq!(v.slashed, false);
|
||||
assert!(!v.is_active_at(epoch));
|
||||
assert!(!v.is_exited_at(epoch));
|
||||
assert!(!v.is_withdrawable_at(epoch));
|
||||
assert!(!v.slashed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -108,9 +108,9 @@ mod tests {
|
||||
..Validator::default()
|
||||
};
|
||||
|
||||
assert_eq!(v.is_active_at(epoch - 1), false);
|
||||
assert_eq!(v.is_active_at(epoch), true);
|
||||
assert_eq!(v.is_active_at(epoch + 1), true);
|
||||
assert!(!v.is_active_at(epoch - 1));
|
||||
assert!(v.is_active_at(epoch));
|
||||
assert!(v.is_active_at(epoch + 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -122,9 +122,9 @@ mod tests {
|
||||
..Validator::default()
|
||||
};
|
||||
|
||||
assert_eq!(v.is_exited_at(epoch - 1), false);
|
||||
assert_eq!(v.is_exited_at(epoch), true);
|
||||
assert_eq!(v.is_exited_at(epoch + 1), true);
|
||||
assert!(!v.is_exited_at(epoch - 1));
|
||||
assert!(v.is_exited_at(epoch));
|
||||
assert!(v.is_exited_at(epoch + 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -136,9 +136,9 @@ mod tests {
|
||||
..Validator::default()
|
||||
};
|
||||
|
||||
assert_eq!(v.is_withdrawable_at(epoch - 1), false);
|
||||
assert_eq!(v.is_withdrawable_at(epoch), true);
|
||||
assert_eq!(v.is_withdrawable_at(epoch + 1), true);
|
||||
assert!(!v.is_withdrawable_at(epoch - 1));
|
||||
assert!(v.is_withdrawable_at(epoch));
|
||||
assert!(v.is_withdrawable_at(epoch + 1));
|
||||
}
|
||||
|
||||
ssz_and_tree_hash_tests!(Validator);
|
||||
|
@ -51,7 +51,7 @@ where
|
||||
}
|
||||
|
||||
/// Returns `self.serialize()` as a `0x`-prefixed hex string.
|
||||
pub fn to_hex_string(&self) -> String {
|
||||
pub fn as_hex_string(&self) -> String {
|
||||
format!("{:?}", self)
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ impl<Pub> GenericPublicKeyBytes<Pub> {
|
||||
}
|
||||
|
||||
/// Returns `self.serialize()` as a `0x`-prefixed hex string.
|
||||
pub fn to_hex_string(&self) -> String {
|
||||
pub fn as_hex_string(&self) -> String {
|
||||
format!("{:?}", self)
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ impl Keystore {
|
||||
},
|
||||
uuid,
|
||||
path: Some(path),
|
||||
pubkey: keypair.pk.to_hex_string()[2..].to_string(),
|
||||
pubkey: keypair.pk.as_hex_string()[2..].to_string(),
|
||||
version: Version::four(),
|
||||
description: Some(description),
|
||||
name: None,
|
||||
@ -261,7 +261,7 @@ impl Keystore {
|
||||
|
||||
let keypair = keypair_from_secret(plain_text.as_bytes())?;
|
||||
// Verify that the derived `PublicKey` matches `self`.
|
||||
if keypair.pk.to_hex_string()[2..] != self.json.pubkey {
|
||||
if keypair.pk.as_hex_string()[2..] != self.json.pubkey {
|
||||
return Err(Error::PublicKeyMismatch);
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
mod metrics;
|
||||
|
||||
use beacon_node::{get_eth2_network_config, ProductionBeaconNode};
|
||||
|
@ -13,7 +13,7 @@ impl BlockQueue {
|
||||
|
||||
pub fn dequeue(&self) -> Vec<SignedBeaconBlockHeader> {
|
||||
let mut blocks = self.blocks.lock();
|
||||
std::mem::replace(&mut *blocks, vec![])
|
||||
std::mem::take(&mut *blocks)
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
//! This crate provides a simluation that creates `n` beacon node and validator clients, each with
|
||||
//! `v` validators. A deposit contract is deployed at the start of the simulation using a local
|
||||
//! `ganache-cli` instance (you must have `ganache-cli` installed and avaliable on your path). All
|
||||
|
@ -160,7 +160,7 @@ impl SlashingDatabase {
|
||||
let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?;
|
||||
for pubkey in public_keys {
|
||||
if self.get_validator_id_opt(&txn, pubkey)?.is_none() {
|
||||
stmt.execute(&[pubkey.to_hex_string()])?;
|
||||
stmt.execute(&[pubkey.as_hex_string()])?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -205,7 +205,7 @@ impl SlashingDatabase {
|
||||
Ok(txn
|
||||
.query_row(
|
||||
"SELECT id FROM validators WHERE public_key = ?1",
|
||||
params![&public_key.to_hex_string()],
|
||||
params![&public_key.as_hex_string()],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.optional()?)
|
||||
@ -990,11 +990,9 @@ mod tests {
|
||||
assert_eq!(db.conn_pool.max_size(), POOL_SIZE);
|
||||
assert_eq!(db.conn_pool.connection_timeout(), CONNECTION_TIMEOUT);
|
||||
let conn = db.conn_pool.get().unwrap();
|
||||
assert_eq!(
|
||||
conn.pragma_query_value(None, "foreign_keys", |row| { row.get::<_, bool>(0) })
|
||||
.unwrap(),
|
||||
true
|
||||
);
|
||||
assert!(conn
|
||||
.pragma_query_value(None, "foreign_keys", |row| { row.get::<_, bool>(0) })
|
||||
.unwrap());
|
||||
assert_eq!(
|
||||
conn.pragma_query_value(None, "locking_mode", |row| { row.get::<_, String>(0) })
|
||||
.unwrap()
|
||||
|
@ -129,13 +129,13 @@ mod tests {
|
||||
.write_all(format!("default: {}\n", DEFAULT_GRAFFITI).as_bytes())
|
||||
.unwrap();
|
||||
graffiti_file
|
||||
.write_all(format!("{}: {}\n", pk1.to_hex_string(), CUSTOM_GRAFFITI1).as_bytes())
|
||||
.write_all(format!("{}: {}\n", pk1.as_hex_string(), CUSTOM_GRAFFITI1).as_bytes())
|
||||
.unwrap();
|
||||
graffiti_file
|
||||
.write_all(format!("{}: {}\n", pk2.to_hex_string(), CUSTOM_GRAFFITI2).as_bytes())
|
||||
.write_all(format!("{}: {}\n", pk2.as_hex_string(), CUSTOM_GRAFFITI2).as_bytes())
|
||||
.unwrap();
|
||||
graffiti_file
|
||||
.write_all(format!("{}:{}\n", pk3.to_hex_string(), EMPTY_GRAFFITI).as_bytes())
|
||||
.write_all(format!("{}:{}\n", pk3.as_hex_string(), EMPTY_GRAFFITI).as_bytes())
|
||||
.unwrap();
|
||||
graffiti_file.flush().unwrap();
|
||||
file_name
|
||||
|
Loading…
Reference in New Issue
Block a user