rust 1.53.0 updates (#2411)
## Issue Addressed `make lint` failing on rust 1.53.0. ## Proposed Changes 1.53.0 updates ## Additional Info I haven't figure out why yet, we were now hitting the recursion limit in a few crates. So I had to add `#![recursion_limit = "256"]` in a few places Co-authored-by: realbigsean <seananderson33@gmail.com> Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
parent
3dc1eb5eb6
commit
b84ff9f793
3
.github/custom/config.toml
vendored
Normal file
3
.github/custom/config.toml
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Custom Cargo config to be used for the udeps CI job
|
||||||
|
[http]
|
||||||
|
multiplexing = false
|
6
.github/workflows/test-suite.yml
vendored
6
.github/workflows/test-suite.yml
vendored
@ -12,7 +12,7 @@ env:
|
|||||||
# Deny warnings in CI
|
# Deny warnings in CI
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
||||||
PINNED_NIGHTLY: nightly-2021-03-01
|
PINNED_NIGHTLY: nightly-2021-06-09
|
||||||
jobs:
|
jobs:
|
||||||
target-branch-check:
|
target-branch-check:
|
||||||
name: target-branch-check
|
name: target-branch-check
|
||||||
@ -198,6 +198,10 @@ jobs:
|
|||||||
run: rustup toolchain install $PINNED_NIGHTLY
|
run: rustup toolchain install $PINNED_NIGHTLY
|
||||||
- name: Install cargo-udeps
|
- name: Install cargo-udeps
|
||||||
run: cargo install cargo-udeps --locked
|
run: cargo install cargo-udeps --locked
|
||||||
|
- name: Create Cargo config dir
|
||||||
|
run: mkdir -p .cargo
|
||||||
|
- name: Install custom Cargo config
|
||||||
|
run: cp -f .github/custom/config.toml .cargo/config.toml
|
||||||
- name: Run cargo udeps to identify unused crates in the dependency graph
|
- name: Run cargo udeps to identify unused crates in the dependency graph
|
||||||
run: make udeps
|
run: make udeps
|
||||||
env:
|
env:
|
||||||
|
@ -236,7 +236,7 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
format!(
|
format!(
|
||||||
"Error registering validator {}: {:?}",
|
"Error registering validator {}: {:?}",
|
||||||
voting_pubkey.to_hex_string(),
|
voting_pubkey.as_hex_string(),
|
||||||
e
|
e
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
@ -250,7 +250,7 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
.build()
|
.build()
|
||||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||||
|
|
||||||
println!("{}/{}\t{}", i + 1, n, voting_pubkey.to_hex_string());
|
println!("{}/{}\t{}", i + 1, n, voting_pubkey.as_hex_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -242,7 +242,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin
|
|||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
format!(
|
format!(
|
||||||
"Error registering validator {}: {:?}",
|
"Error registering validator {}: {:?}",
|
||||||
voting_pubkey.to_hex_string(),
|
voting_pubkey.as_hex_string(),
|
||||||
e
|
e
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
@ -762,8 +762,8 @@ mod test {
|
|||||||
|
|
||||||
let eth1_chain = get_eth1_chain();
|
let eth1_chain = get_eth1_chain();
|
||||||
|
|
||||||
assert_eq!(
|
assert!(
|
||||||
eth1_chain.use_dummy_backend, false,
|
!eth1_chain.use_dummy_backend,
|
||||||
"test should not use dummy backend"
|
"test should not use dummy backend"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -795,8 +795,8 @@ mod test {
|
|||||||
let eth1_chain = get_eth1_chain();
|
let eth1_chain = get_eth1_chain();
|
||||||
let max_deposits = <E as EthSpec>::MaxDeposits::to_u64();
|
let max_deposits = <E as EthSpec>::MaxDeposits::to_u64();
|
||||||
|
|
||||||
assert_eq!(
|
assert!(
|
||||||
eth1_chain.use_dummy_backend, false,
|
!eth1_chain.use_dummy_backend,
|
||||||
"test should not use dummy backend"
|
"test should not use dummy backend"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -877,8 +877,8 @@ mod test {
|
|||||||
|
|
||||||
let eth1_chain = get_eth1_chain();
|
let eth1_chain = get_eth1_chain();
|
||||||
|
|
||||||
assert_eq!(
|
assert!(
|
||||||
eth1_chain.use_dummy_backend, false,
|
!eth1_chain.use_dummy_backend,
|
||||||
"test should not use dummy backend"
|
"test should not use dummy backend"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -901,8 +901,8 @@ mod test {
|
|||||||
|
|
||||||
let eth1_chain = get_eth1_chain();
|
let eth1_chain = get_eth1_chain();
|
||||||
|
|
||||||
assert_eq!(
|
assert!(
|
||||||
eth1_chain.use_dummy_backend, false,
|
!eth1_chain.use_dummy_backend,
|
||||||
"test should not use dummy backend"
|
"test should not use dummy backend"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -353,11 +353,11 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn lock() {
|
fn lock() {
|
||||||
let lock = Lock::new();
|
let lock = Lock::new();
|
||||||
assert_eq!(lock.lock(), false);
|
assert!(!lock.lock());
|
||||||
assert_eq!(lock.lock(), true);
|
assert!(lock.lock());
|
||||||
assert_eq!(lock.lock(), true);
|
assert!(lock.lock());
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
assert_eq!(lock.lock(), false);
|
assert!(!lock.lock());
|
||||||
assert_eq!(lock.lock(), true);
|
assert!(lock.lock());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,14 +88,14 @@ impl EnrExt for Enr {
|
|||||||
if let Some(udp) = self.udp() {
|
if let Some(udp) = self.udp() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Udp(udp));
|
multiaddr.push(Protocol::Udp(udp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(tcp) = self.tcp() {
|
if let Some(tcp) = self.tcp() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Tcp(tcp));
|
multiaddr.push(Protocol::Tcp(tcp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -103,7 +103,7 @@ impl EnrExt for Enr {
|
|||||||
if let Some(udp6) = self.udp6() {
|
if let Some(udp6) = self.udp6() {
|
||||||
let mut multiaddr: Multiaddr = ip6.into();
|
let mut multiaddr: Multiaddr = ip6.into();
|
||||||
multiaddr.push(Protocol::Udp(udp6));
|
multiaddr.push(Protocol::Udp(udp6));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ impl EnrExt for Enr {
|
|||||||
if let Some(tcp) = self.tcp() {
|
if let Some(tcp) = self.tcp() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Tcp(tcp));
|
multiaddr.push(Protocol::Tcp(tcp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -154,7 +154,7 @@ impl EnrExt for Enr {
|
|||||||
if let Some(udp) = self.udp() {
|
if let Some(udp) = self.udp() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Udp(udp));
|
multiaddr.push(Protocol::Udp(udp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.clone().into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -556,20 +556,18 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
if known_meta_data.seq_number < meta_data.seq_number {
|
if known_meta_data.seq_number < meta_data.seq_number {
|
||||||
debug!(self.log, "Updating peer's metadata";
|
debug!(self.log, "Updating peer's metadata";
|
||||||
"peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
"peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||||
peer_info.meta_data = Some(meta_data);
|
|
||||||
} else {
|
} else {
|
||||||
debug!(self.log, "Received old metadata";
|
debug!(self.log, "Received old metadata";
|
||||||
"peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
"peer_id" => %peer_id, "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number);
|
||||||
// Updating metadata even in this case to prevent storing
|
// Updating metadata even in this case to prevent storing
|
||||||
// incorrect `metadata.attnets` for a peer
|
// incorrect `metadata.attnets` for a peer
|
||||||
peer_info.meta_data = Some(meta_data);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// we have no meta-data for this peer, update
|
// we have no meta-data for this peer, update
|
||||||
debug!(self.log, "Obtained peer's metadata";
|
debug!(self.log, "Obtained peer's metadata";
|
||||||
"peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number);
|
"peer_id" => %peer_id, "new_seq_no" => meta_data.seq_number);
|
||||||
peer_info.meta_data = Some(meta_data);
|
|
||||||
}
|
}
|
||||||
|
peer_info.meta_data = Some(meta_data);
|
||||||
} else {
|
} else {
|
||||||
crit!(self.log, "Received METADATA from an unknown peer";
|
crit!(self.log, "Received METADATA from an unknown peer";
|
||||||
"peer_id" => %peer_id);
|
"peer_id" => %peer_id);
|
||||||
@ -583,11 +581,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
// port is removed, which is assumed to be associated with the discv5 protocol (and
|
// port is removed, which is assumed to be associated with the discv5 protocol (and
|
||||||
// therefore irrelevant for other libp2p components).
|
// therefore irrelevant for other libp2p components).
|
||||||
let mut out_list = enr.multiaddr();
|
let mut out_list = enr.multiaddr();
|
||||||
out_list.retain(|addr| {
|
out_list.retain(|addr| !addr.iter().any(|v| matches!(v, MProtocol::Udp(_))));
|
||||||
addr.iter()
|
|
||||||
.find(|v| matches!(v, MProtocol::Udp(_)))
|
|
||||||
.is_none()
|
|
||||||
});
|
|
||||||
|
|
||||||
out_list
|
out_list
|
||||||
} else {
|
} else {
|
||||||
|
@ -62,14 +62,14 @@ impl PeerSyncStatus {
|
|||||||
matches!(self, PeerSyncStatus::Behind { .. })
|
matches!(self, PeerSyncStatus::Behind { .. })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Updates the peer's sync status, returning whether the status transitioned.
|
||||||
|
///
|
||||||
|
/// E.g. returns `true` if the state changed from `Synced` to `Advanced`, but not if
|
||||||
|
/// the status remained `Synced` with different `SyncInfo` within.
|
||||||
pub fn update(&mut self, new_state: PeerSyncStatus) -> bool {
|
pub fn update(&mut self, new_state: PeerSyncStatus) -> bool {
|
||||||
if *self == new_state {
|
let changed_status = *self != new_state;
|
||||||
*self = new_state;
|
*self = new_state;
|
||||||
false // state was not updated
|
changed_status
|
||||||
} else {
|
|
||||||
*self = new_state;
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn as_str(&self) -> &'static str {
|
pub fn as_str(&self) -> &'static str {
|
||||||
|
@ -587,7 +587,7 @@ where
|
|||||||
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
|
match std::mem::replace(&mut info.state, InboundState::Poisoned) {
|
||||||
InboundState::Idle(substream) if !deactivated => {
|
InboundState::Idle(substream) if !deactivated => {
|
||||||
if !info.pending_items.is_empty() {
|
if !info.pending_items.is_empty() {
|
||||||
let to_send = std::mem::replace(&mut info.pending_items, vec![]);
|
let to_send = std::mem::take(&mut info.pending_items);
|
||||||
let fut = process_inbound_substream(
|
let fut = process_inbound_substream(
|
||||||
substream,
|
substream,
|
||||||
info.remaining_chunks,
|
info.remaining_chunks,
|
||||||
@ -665,8 +665,7 @@ where
|
|||||||
// elements
|
// elements
|
||||||
|
|
||||||
if !deactivated && !info.pending_items.is_empty() {
|
if !deactivated && !info.pending_items.is_empty() {
|
||||||
let to_send =
|
let to_send = std::mem::take(&mut info.pending_items);
|
||||||
std::mem::replace(&mut info.pending_items, vec![]);
|
|
||||||
let fut = process_inbound_substream(
|
let fut = process_inbound_substream(
|
||||||
substream,
|
substream,
|
||||||
info.remaining_chunks,
|
info.remaining_chunks,
|
||||||
|
@ -1254,7 +1254,6 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
// GET config/fork_schedule
|
// GET config/fork_schedule
|
||||||
let get_config_fork_schedule = config_path
|
let get_config_fork_schedule = config_path
|
||||||
.clone()
|
|
||||||
.and(warp::path("fork_schedule"))
|
.and(warp::path("fork_schedule"))
|
||||||
.and(warp::path::end())
|
.and(warp::path::end())
|
||||||
.and(chain_filter.clone())
|
.and(chain_filter.clone())
|
||||||
@ -1268,7 +1267,6 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
// GET config/spec
|
// GET config/spec
|
||||||
let get_config_spec = config_path
|
let get_config_spec = config_path
|
||||||
.clone()
|
|
||||||
.and(warp::path("spec"))
|
.and(warp::path("spec"))
|
||||||
.and(warp::path::end())
|
.and(warp::path::end())
|
||||||
.and(chain_filter.clone())
|
.and(chain_filter.clone())
|
||||||
@ -1284,7 +1282,6 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
// GET config/deposit_contract
|
// GET config/deposit_contract
|
||||||
let get_config_deposit_contract = config_path
|
let get_config_deposit_contract = config_path
|
||||||
.clone()
|
|
||||||
.and(warp::path("deposit_contract"))
|
.and(warp::path("deposit_contract"))
|
||||||
.and(warp::path::end())
|
.and(warp::path::end())
|
||||||
.and(chain_filter.clone())
|
.and(chain_filter.clone())
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
|
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
|
||||||
|
#![recursion_limit = "256"]
|
||||||
|
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||||
@ -2333,7 +2334,7 @@ async fn poll_events<S: Stream<Item = Result<EventKind<T>, eth2::Error>> + Unpin
|
|||||||
};
|
};
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = collect_stream_fut => {return events}
|
_ = collect_stream_fut => {events}
|
||||||
_ = tokio::time::sleep(timeout) => { return events; }
|
_ = tokio::time::sleep(timeout) => { return events; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -559,11 +559,10 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
||||||
if self
|
if !self
|
||||||
.unsubscriptions
|
.unsubscriptions
|
||||||
.keys()
|
.keys()
|
||||||
.find(|s| s.subnet_id == subnet_id)
|
.any(|s| s.subnet_id == subnet_id)
|
||||||
.is_none()
|
|
||||||
{
|
{
|
||||||
// we are not at capacity, unsubscribe from the current subnet.
|
// we are not at capacity, unsubscribe from the current subnet.
|
||||||
debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id);
|
debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id);
|
||||||
@ -601,11 +600,10 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
|
|
||||||
for subnet_id in to_remove_subnets {
|
for subnet_id in to_remove_subnets {
|
||||||
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
// If there are no unsubscription events for `subnet_id`, we unsubscribe immediately.
|
||||||
if self
|
if !self
|
||||||
.unsubscriptions
|
.unsubscriptions
|
||||||
.keys()
|
.keys()
|
||||||
.find(|s| s.subnet_id == *subnet_id)
|
.any(|s| s.subnet_id == *subnet_id)
|
||||||
.is_none()
|
|
||||||
{
|
{
|
||||||
self.events
|
self.events
|
||||||
.push_back(AttServiceMessage::Unsubscribe(*subnet_id));
|
.push_back(AttServiceMessage::Unsubscribe(*subnet_id));
|
||||||
|
@ -147,10 +147,10 @@ async fn get_events<S: Stream<Item = AttServiceMessage> + Unpin>(
|
|||||||
};
|
};
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = collect_stream_fut => {return events}
|
_ = collect_stream_fut => {events}
|
||||||
_ = tokio::time::sleep(
|
_ = tokio::time::sleep(
|
||||||
Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
|
Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
|
||||||
) => { return events; }
|
) => { events }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ pub fn construct_upnp_mappings<T: EthSpec>(
|
|||||||
"tcp",
|
"tcp",
|
||||||
&log,
|
&log,
|
||||||
).and_then(|_| {
|
).and_then(|_| {
|
||||||
let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new(ip.clone().into(), config.tcp_port)).map_err(|_| ());
|
let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new((*ip).into(), config.tcp_port)).map_err(|_| ());
|
||||||
info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port));
|
info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port));
|
||||||
external_socket
|
external_socket
|
||||||
}).ok();
|
}).ok();
|
||||||
|
@ -933,10 +933,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
// check if we have the batch for our optimistic start. If not, request it first.
|
// check if we have the batch for our optimistic start. If not, request it first.
|
||||||
// We wait for this batch before requesting any other batches.
|
// We wait for this batch before requesting any other batches.
|
||||||
if let Some(epoch) = self.optimistic_start {
|
if let Some(epoch) = self.optimistic_start {
|
||||||
if !self.batches.contains_key(&epoch) {
|
if let Entry::Vacant(entry) = self.batches.entry(epoch) {
|
||||||
if let Some(peer) = idle_peers.pop() {
|
if let Some(peer) = idle_peers.pop() {
|
||||||
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH);
|
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH);
|
||||||
self.batches.insert(epoch, optimistic_batch);
|
entry.insert(optimistic_batch);
|
||||||
self.send_batch(network, epoch, peer)?;
|
self.send_batch(network, epoch, peer)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,12 +285,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
|
|
||||||
/// Store a state in the store.
|
/// Store a state in the store.
|
||||||
pub fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
|
pub fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
|
||||||
if state.slot < self.get_split_slot() {
|
|
||||||
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
|
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||||
|
if state.slot < self.get_split_slot() {
|
||||||
self.store_cold_state(state_root, &state, &mut ops)?;
|
self.store_cold_state(state_root, &state, &mut ops)?;
|
||||||
self.cold_db.do_atomically(ops)
|
self.cold_db.do_atomically(ops)
|
||||||
} else {
|
} else {
|
||||||
let mut ops: Vec<KeyValueStoreOp> = Vec::new();
|
|
||||||
self.store_hot_state(state_root, state, &mut ops)?;
|
self.store_hot_state(state_root, state, &mut ops)?;
|
||||||
self.hot_db.do_atomically(ops)
|
self.hot_db.do_atomically(ops)
|
||||||
}
|
}
|
||||||
|
@ -251,18 +251,18 @@ mod tests {
|
|||||||
let key = Hash256::random();
|
let key = Hash256::random();
|
||||||
let item = StorableThing { a: 1, b: 42 };
|
let item = StorableThing { a: 1, b: 42 };
|
||||||
|
|
||||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||||
|
|
||||||
store.put(&key, &item).unwrap();
|
store.put(&key, &item).unwrap();
|
||||||
|
|
||||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), true);
|
assert!(store.exists::<StorableThing>(&key).unwrap());
|
||||||
|
|
||||||
let retrieved = store.get(&key).unwrap().unwrap();
|
let retrieved = store.get(&key).unwrap().unwrap();
|
||||||
assert_eq!(item, retrieved);
|
assert_eq!(item, retrieved);
|
||||||
|
|
||||||
store.delete::<StorableThing>(&key).unwrap();
|
store.delete::<StorableThing>(&key).unwrap();
|
||||||
|
|
||||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||||
|
|
||||||
assert_eq!(store.get::<StorableThing>(&key).unwrap(), None);
|
assert_eq!(store.get::<StorableThing>(&key).unwrap(), None);
|
||||||
}
|
}
|
||||||
@ -289,14 +289,14 @@ mod tests {
|
|||||||
let key = Hash256::random();
|
let key = Hash256::random();
|
||||||
let item = StorableThing { a: 1, b: 42 };
|
let item = StorableThing { a: 1, b: 42 };
|
||||||
|
|
||||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||||
|
|
||||||
store.put(&key, &item).unwrap();
|
store.put(&key, &item).unwrap();
|
||||||
|
|
||||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), true);
|
assert!(store.exists::<StorableThing>(&key).unwrap());
|
||||||
|
|
||||||
store.delete::<StorableThing>(&key).unwrap();
|
store.delete::<StorableThing>(&key).unwrap();
|
||||||
|
|
||||||
assert_eq!(store.exists::<StorableThing>(&key).unwrap(), false);
|
assert!(!store.exists::<StorableThing>(&key).unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#![cfg(test)]
|
#![cfg(test)]
|
||||||
|
#![recursion_limit = "256"]
|
||||||
|
|
||||||
use beacon_chain::StateSkipConfig;
|
use beacon_chain::StateSkipConfig;
|
||||||
use node_test_rig::{
|
use node_test_rig::{
|
||||||
|
@ -231,7 +231,7 @@ impl<'a> Builder<'a> {
|
|||||||
if self.store_withdrawal_keystore {
|
if self.store_withdrawal_keystore {
|
||||||
// Write the withdrawal password to file.
|
// Write the withdrawal password to file.
|
||||||
write_password_to_file(
|
write_password_to_file(
|
||||||
password_dir.join(withdrawal_keypair.pk.to_hex_string()),
|
password_dir.join(withdrawal_keypair.pk.as_hex_string()),
|
||||||
withdrawal_password.as_bytes(),
|
withdrawal_password.as_bytes(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -294,9 +294,8 @@ mod tests {
|
|||||||
"after first push sub should have len {}",
|
"after first push sub should have len {}",
|
||||||
len
|
len
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub.is_empty(arena).expect("should exist"),
|
!sub.is_empty(arena).expect("should exist"),
|
||||||
false,
|
|
||||||
"new sub should not be empty"
|
"new sub should not be empty"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -375,9 +374,8 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
"new sub should have len 0"
|
"new sub should have len 0"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub.is_empty(arena).expect("should exist"),
|
sub.is_empty(arena).expect("should exist"),
|
||||||
true,
|
|
||||||
"new sub should be empty"
|
"new sub should be empty"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -397,9 +395,8 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
"new sub should have len 0"
|
"new sub should have len 0"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub_01.is_empty(arena).expect("should exist"),
|
sub_01.is_empty(arena).expect("should exist"),
|
||||||
true,
|
|
||||||
"new sub should be empty"
|
"new sub should be empty"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -409,9 +406,8 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
"new sub should have len 0"
|
"new sub should have len 0"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub_02.is_empty(arena).expect("should exist"),
|
sub_02.is_empty(arena).expect("should exist"),
|
||||||
true,
|
|
||||||
"new sub should be empty"
|
"new sub should be empty"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -432,9 +428,8 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
"new sub should have len 0"
|
"new sub should have len 0"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub_01.is_empty(arena).expect("should exist"),
|
sub_01.is_empty(arena).expect("should exist"),
|
||||||
true,
|
|
||||||
"new sub should be empty"
|
"new sub should be empty"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -446,9 +441,8 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
"new sub should have len 0"
|
"new sub should have len 0"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub_02.is_empty(arena).expect("should exist"),
|
sub_02.is_empty(arena).expect("should exist"),
|
||||||
true,
|
|
||||||
"new sub should be empty"
|
"new sub should be empty"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -474,9 +468,8 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
"new sub should have len 0"
|
"new sub should have len 0"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub.is_empty(arena).expect("should exist"),
|
sub.is_empty(arena).expect("should exist"),
|
||||||
true,
|
|
||||||
"new sub should be empty"
|
"new sub should be empty"
|
||||||
);
|
);
|
||||||
subs.push(sub);
|
subs.push(sub);
|
||||||
@ -492,9 +485,8 @@ mod tests {
|
|||||||
0,
|
0,
|
||||||
"new sub should have len 0"
|
"new sub should have len 0"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert!(
|
||||||
sub.is_empty(arena).expect("should exist"),
|
sub.is_empty(arena).expect("should exist"),
|
||||||
true,
|
|
||||||
"new sub should be empty"
|
"new sub should be empty"
|
||||||
);
|
);
|
||||||
subs.push(sub);
|
subs.push(sub);
|
||||||
|
@ -6,7 +6,7 @@ use crate::{test_utils::*, *};
|
|||||||
fn default_values() {
|
fn default_values() {
|
||||||
let cache = CommitteeCache::default();
|
let cache = CommitteeCache::default();
|
||||||
|
|
||||||
assert_eq!(cache.is_initialized_at(Epoch::new(0)), false);
|
assert!(!cache.is_initialized_at(Epoch::new(0)));
|
||||||
assert!(&cache.active_validator_indices().is_empty());
|
assert!(&cache.active_validator_indices().is_empty());
|
||||||
assert_eq!(cache.get_beacon_committee(Slot::new(0), 0), None);
|
assert_eq!(cache.get_beacon_committee(Slot::new(0), 0), None);
|
||||||
assert_eq!(cache.get_attestation_duties(0), None);
|
assert_eq!(cache.get_attestation_duties(0), None);
|
||||||
|
@ -105,10 +105,7 @@ mod tests {
|
|||||||
let indexed_vote_first = create_indexed_attestation(3, 1);
|
let indexed_vote_first = create_indexed_attestation(3, 1);
|
||||||
let indexed_vote_second = create_indexed_attestation(3, 2);
|
let indexed_vote_second = create_indexed_attestation(3, 2);
|
||||||
|
|
||||||
assert_eq!(
|
assert!(indexed_vote_first.is_double_vote(&indexed_vote_second))
|
||||||
indexed_vote_first.is_double_vote(&indexed_vote_second),
|
|
||||||
true
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -116,10 +113,7 @@ mod tests {
|
|||||||
let indexed_vote_first = create_indexed_attestation(1, 1);
|
let indexed_vote_first = create_indexed_attestation(1, 1);
|
||||||
let indexed_vote_second = create_indexed_attestation(2, 1);
|
let indexed_vote_second = create_indexed_attestation(2, 1);
|
||||||
|
|
||||||
assert_eq!(
|
assert!(!indexed_vote_first.is_double_vote(&indexed_vote_second));
|
||||||
indexed_vote_first.is_double_vote(&indexed_vote_second),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -127,10 +121,7 @@ mod tests {
|
|||||||
let indexed_vote_first = create_indexed_attestation(2, 1);
|
let indexed_vote_first = create_indexed_attestation(2, 1);
|
||||||
let indexed_vote_second = create_indexed_attestation(1, 2);
|
let indexed_vote_second = create_indexed_attestation(1, 2);
|
||||||
|
|
||||||
assert_eq!(
|
assert!(indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -138,10 +129,7 @@ mod tests {
|
|||||||
let indexed_vote_first = create_indexed_attestation(4, 1);
|
let indexed_vote_first = create_indexed_attestation(4, 1);
|
||||||
let indexed_vote_second = create_indexed_attestation(3, 2);
|
let indexed_vote_second = create_indexed_attestation(3, 2);
|
||||||
|
|
||||||
assert_eq!(
|
assert!(indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -149,10 +137,7 @@ mod tests {
|
|||||||
let indexed_vote_first = create_indexed_attestation(2, 2);
|
let indexed_vote_first = create_indexed_attestation(2, 2);
|
||||||
let indexed_vote_second = create_indexed_attestation(1, 1);
|
let indexed_vote_second = create_indexed_attestation(1, 1);
|
||||||
|
|
||||||
assert_eq!(
|
assert!(!indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -160,10 +145,7 @@ mod tests {
|
|||||||
let indexed_vote_first = create_indexed_attestation(1, 1);
|
let indexed_vote_first = create_indexed_attestation(1, 1);
|
||||||
let indexed_vote_second = create_indexed_attestation(2, 2);
|
let indexed_vote_second = create_indexed_attestation(2, 2);
|
||||||
|
|
||||||
assert_eq!(
|
assert!(!indexed_vote_first.is_surround_vote(&indexed_vote_second));
|
||||||
indexed_vote_first.is_surround_vote(&indexed_vote_second),
|
|
||||||
false
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ssz_and_tree_hash_tests!(IndexedAttestation<MainnetEthSpec>);
|
ssz_and_tree_hash_tests!(IndexedAttestation<MainnetEthSpec>);
|
||||||
|
@ -93,10 +93,10 @@ mod tests {
|
|||||||
|
|
||||||
let epoch = Epoch::new(0);
|
let epoch = Epoch::new(0);
|
||||||
|
|
||||||
assert_eq!(v.is_active_at(epoch), false);
|
assert!(!v.is_active_at(epoch));
|
||||||
assert_eq!(v.is_exited_at(epoch), false);
|
assert!(!v.is_exited_at(epoch));
|
||||||
assert_eq!(v.is_withdrawable_at(epoch), false);
|
assert!(!v.is_withdrawable_at(epoch));
|
||||||
assert_eq!(v.slashed, false);
|
assert!(!v.slashed);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -108,9 +108,9 @@ mod tests {
|
|||||||
..Validator::default()
|
..Validator::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(v.is_active_at(epoch - 1), false);
|
assert!(!v.is_active_at(epoch - 1));
|
||||||
assert_eq!(v.is_active_at(epoch), true);
|
assert!(v.is_active_at(epoch));
|
||||||
assert_eq!(v.is_active_at(epoch + 1), true);
|
assert!(v.is_active_at(epoch + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -122,9 +122,9 @@ mod tests {
|
|||||||
..Validator::default()
|
..Validator::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(v.is_exited_at(epoch - 1), false);
|
assert!(!v.is_exited_at(epoch - 1));
|
||||||
assert_eq!(v.is_exited_at(epoch), true);
|
assert!(v.is_exited_at(epoch));
|
||||||
assert_eq!(v.is_exited_at(epoch + 1), true);
|
assert!(v.is_exited_at(epoch + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -136,9 +136,9 @@ mod tests {
|
|||||||
..Validator::default()
|
..Validator::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(v.is_withdrawable_at(epoch - 1), false);
|
assert!(!v.is_withdrawable_at(epoch - 1));
|
||||||
assert_eq!(v.is_withdrawable_at(epoch), true);
|
assert!(v.is_withdrawable_at(epoch));
|
||||||
assert_eq!(v.is_withdrawable_at(epoch + 1), true);
|
assert!(v.is_withdrawable_at(epoch + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
ssz_and_tree_hash_tests!(Validator);
|
ssz_and_tree_hash_tests!(Validator);
|
||||||
|
@ -51,7 +51,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `self.serialize()` as a `0x`-prefixed hex string.
|
/// Returns `self.serialize()` as a `0x`-prefixed hex string.
|
||||||
pub fn to_hex_string(&self) -> String {
|
pub fn as_hex_string(&self) -> String {
|
||||||
format!("{:?}", self)
|
format!("{:?}", self)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ impl<Pub> GenericPublicKeyBytes<Pub> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `self.serialize()` as a `0x`-prefixed hex string.
|
/// Returns `self.serialize()` as a `0x`-prefixed hex string.
|
||||||
pub fn to_hex_string(&self) -> String {
|
pub fn as_hex_string(&self) -> String {
|
||||||
format!("{:?}", self)
|
format!("{:?}", self)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +230,7 @@ impl Keystore {
|
|||||||
},
|
},
|
||||||
uuid,
|
uuid,
|
||||||
path: Some(path),
|
path: Some(path),
|
||||||
pubkey: keypair.pk.to_hex_string()[2..].to_string(),
|
pubkey: keypair.pk.as_hex_string()[2..].to_string(),
|
||||||
version: Version::four(),
|
version: Version::four(),
|
||||||
description: Some(description),
|
description: Some(description),
|
||||||
name: None,
|
name: None,
|
||||||
@ -261,7 +261,7 @@ impl Keystore {
|
|||||||
|
|
||||||
let keypair = keypair_from_secret(plain_text.as_bytes())?;
|
let keypair = keypair_from_secret(plain_text.as_bytes())?;
|
||||||
// Verify that the derived `PublicKey` matches `self`.
|
// Verify that the derived `PublicKey` matches `self`.
|
||||||
if keypair.pk.to_hex_string()[2..] != self.json.pubkey {
|
if keypair.pk.as_hex_string()[2..] != self.json.pubkey {
|
||||||
return Err(Error::PublicKeyMismatch);
|
return Err(Error::PublicKeyMismatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#![recursion_limit = "256"]
|
||||||
|
|
||||||
mod metrics;
|
mod metrics;
|
||||||
|
|
||||||
use beacon_node::{get_eth2_network_config, ProductionBeaconNode};
|
use beacon_node::{get_eth2_network_config, ProductionBeaconNode};
|
||||||
|
@ -13,7 +13,7 @@ impl BlockQueue {
|
|||||||
|
|
||||||
pub fn dequeue(&self) -> Vec<SignedBeaconBlockHeader> {
|
pub fn dequeue(&self) -> Vec<SignedBeaconBlockHeader> {
|
||||||
let mut blocks = self.blocks.lock();
|
let mut blocks = self.blocks.lock();
|
||||||
std::mem::replace(&mut *blocks, vec![])
|
std::mem::take(&mut *blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn len(&self) -> usize {
|
pub fn len(&self) -> usize {
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#![recursion_limit = "256"]
|
||||||
|
|
||||||
//! This crate provides a simluation that creates `n` beacon node and validator clients, each with
|
//! This crate provides a simluation that creates `n` beacon node and validator clients, each with
|
||||||
//! `v` validators. A deposit contract is deployed at the start of the simulation using a local
|
//! `v` validators. A deposit contract is deployed at the start of the simulation using a local
|
||||||
//! `ganache-cli` instance (you must have `ganache-cli` installed and avaliable on your path). All
|
//! `ganache-cli` instance (you must have `ganache-cli` installed and avaliable on your path). All
|
||||||
|
@ -160,7 +160,7 @@ impl SlashingDatabase {
|
|||||||
let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?;
|
let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?;
|
||||||
for pubkey in public_keys {
|
for pubkey in public_keys {
|
||||||
if self.get_validator_id_opt(&txn, pubkey)?.is_none() {
|
if self.get_validator_id_opt(&txn, pubkey)?.is_none() {
|
||||||
stmt.execute(&[pubkey.to_hex_string()])?;
|
stmt.execute(&[pubkey.as_hex_string()])?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -205,7 +205,7 @@ impl SlashingDatabase {
|
|||||||
Ok(txn
|
Ok(txn
|
||||||
.query_row(
|
.query_row(
|
||||||
"SELECT id FROM validators WHERE public_key = ?1",
|
"SELECT id FROM validators WHERE public_key = ?1",
|
||||||
params![&public_key.to_hex_string()],
|
params![&public_key.as_hex_string()],
|
||||||
|row| row.get(0),
|
|row| row.get(0),
|
||||||
)
|
)
|
||||||
.optional()?)
|
.optional()?)
|
||||||
@ -990,11 +990,9 @@ mod tests {
|
|||||||
assert_eq!(db.conn_pool.max_size(), POOL_SIZE);
|
assert_eq!(db.conn_pool.max_size(), POOL_SIZE);
|
||||||
assert_eq!(db.conn_pool.connection_timeout(), CONNECTION_TIMEOUT);
|
assert_eq!(db.conn_pool.connection_timeout(), CONNECTION_TIMEOUT);
|
||||||
let conn = db.conn_pool.get().unwrap();
|
let conn = db.conn_pool.get().unwrap();
|
||||||
assert_eq!(
|
assert!(conn
|
||||||
conn.pragma_query_value(None, "foreign_keys", |row| { row.get::<_, bool>(0) })
|
.pragma_query_value(None, "foreign_keys", |row| { row.get::<_, bool>(0) })
|
||||||
.unwrap(),
|
.unwrap());
|
||||||
true
|
|
||||||
);
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
conn.pragma_query_value(None, "locking_mode", |row| { row.get::<_, String>(0) })
|
conn.pragma_query_value(None, "locking_mode", |row| { row.get::<_, String>(0) })
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -129,13 +129,13 @@ mod tests {
|
|||||||
.write_all(format!("default: {}\n", DEFAULT_GRAFFITI).as_bytes())
|
.write_all(format!("default: {}\n", DEFAULT_GRAFFITI).as_bytes())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
graffiti_file
|
graffiti_file
|
||||||
.write_all(format!("{}: {}\n", pk1.to_hex_string(), CUSTOM_GRAFFITI1).as_bytes())
|
.write_all(format!("{}: {}\n", pk1.as_hex_string(), CUSTOM_GRAFFITI1).as_bytes())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
graffiti_file
|
graffiti_file
|
||||||
.write_all(format!("{}: {}\n", pk2.to_hex_string(), CUSTOM_GRAFFITI2).as_bytes())
|
.write_all(format!("{}: {}\n", pk2.as_hex_string(), CUSTOM_GRAFFITI2).as_bytes())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
graffiti_file
|
graffiti_file
|
||||||
.write_all(format!("{}:{}\n", pk3.to_hex_string(), EMPTY_GRAFFITI).as_bytes())
|
.write_all(format!("{}:{}\n", pk3.as_hex_string(), EMPTY_GRAFFITI).as_bytes())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
graffiti_file.flush().unwrap();
|
graffiti_file.flush().unwrap();
|
||||||
file_name
|
file_name
|
||||||
|
Loading…
Reference in New Issue
Block a user