Remove old benches (#465)
* Remove cached_tree_hash benches * Remove state_processing benches
This commit is contained in:
parent
eb669ab40f
commit
81a089aa8b
@ -4,12 +4,7 @@ version = "0.1.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[[bench]]
|
||||
name = "benches"
|
||||
harness = false
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
env_logger = "0.6.0"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
|
@ -1,270 +0,0 @@
|
||||
use criterion::Criterion;
|
||||
use criterion::{black_box, Benchmark};
|
||||
use state_processing::{
|
||||
per_block_processing,
|
||||
per_block_processing::{
|
||||
process_attestations, process_attester_slashings, process_deposits, process_eth1_data,
|
||||
process_exits, process_proposer_slashings, process_randao, process_transfers,
|
||||
verify_block_signature,
|
||||
},
|
||||
};
|
||||
use tree_hash::TreeHash;
|
||||
use types::*;
|
||||
|
||||
/// Run the detailed benchmarking suite on the given `BeaconState`.
|
||||
///
|
||||
/// `desc` will be added to the title of each bench.
|
||||
pub fn bench_block_processing(
|
||||
c: &mut Criterion,
|
||||
initial_block: &BeaconBlock,
|
||||
initial_state: &BeaconState,
|
||||
initial_spec: &ChainSpec,
|
||||
desc: &str,
|
||||
) {
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("verify_block_signature", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
verify_block_signature(&mut state, &block, &spec).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_randao", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_randao(&mut state, &block, &spec).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_eth1_data", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_eth1_data(&mut state, &block.eth1_data).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_proposer_slashings", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_proposer_slashings(&mut state, &block.body.proposer_slashings, &spec)
|
||||
.unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_attester_slashings", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_attester_slashings(&mut state, &block.body.attester_slashings, &spec)
|
||||
.unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_attestations", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_attestations(&mut state, &block.body.attestations, &spec).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_deposits", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_deposits(&mut state, &block.body.deposits, &spec).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_exits", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("process_transfers", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
process_transfers(&mut state, &block.body.transfers, &spec).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state = initial_state.clone();
|
||||
let block = initial_block.clone();
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("per_block_processing", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
per_block_processing(&mut state, &block, &spec).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let mut state = initial_state.clone();
|
||||
state.drop_cache(RelativeEpoch::Previous);
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("build_previous_state_committee_cache", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
state
|
||||
.build_committee_cache(RelativeEpoch::Previous, &spec)
|
||||
.unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let mut state = initial_state.clone();
|
||||
state.drop_cache(RelativeEpoch::Current);
|
||||
let spec = initial_spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("build_current_state_committee_cache", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
state
|
||||
.build_committee_cache(RelativeEpoch::Current, &spec)
|
||||
.unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let mut state = initial_state.clone();
|
||||
state.drop_pubkey_cache();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("build_pubkey_cache", move |b| {
|
||||
b.iter_batched(
|
||||
|| state.clone(),
|
||||
|mut state| {
|
||||
state.update_pubkey_cache().unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let block = initial_block.clone();
|
||||
c.bench(
|
||||
&format!("{}/block_processing", desc),
|
||||
Benchmark::new("tree_hash_block", move |b| {
|
||||
b.iter(|| black_box(block.tree_hash_root()))
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
}
|
@ -1,263 +0,0 @@
|
||||
use criterion::Criterion;
|
||||
use criterion::{black_box, Benchmark};
|
||||
use state_processing::{
|
||||
per_epoch_processing,
|
||||
per_epoch_processing::{
|
||||
clean_attestations, initialize_validator_statuses, process_crosslinks, process_eth1_data,
|
||||
process_justification, process_rewards_and_penalities, process_validator_registry,
|
||||
update_active_tree_index_roots, update_latest_slashed_balances,
|
||||
},
|
||||
};
|
||||
use tree_hash::TreeHash;
|
||||
use types::test_utils::TestingBeaconStateBuilder;
|
||||
use types::*;
|
||||
|
||||
pub const BENCHING_SAMPLE_SIZE: usize = 10;
|
||||
pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10;
|
||||
|
||||
/// Run the benchmarking suite on a foundation spec with 16,384 validators.
|
||||
pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) {
|
||||
let spec = ChainSpec::mainnet();
|
||||
|
||||
let mut builder =
|
||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
|
||||
|
||||
// Set the state to be just before an epoch transition.
|
||||
let target_slot = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
|
||||
builder.teleport_to_slot(target_slot, &spec);
|
||||
|
||||
// Builds all caches; benches will not contain shuffling/committee building times.
|
||||
builder.build_caches(&spec).unwrap();
|
||||
|
||||
// Inserts one attestation with full participation for each committee able to include an
|
||||
// attestation in this state.
|
||||
builder.insert_attestations(&spec);
|
||||
|
||||
let (state, _keypairs) = builder.build();
|
||||
|
||||
// Assert that the state has an attestations for each committee that is able to include an
|
||||
// attestation in the state.
|
||||
let committees_per_epoch = spec.get_committee_count(validator_count);
|
||||
let committees_per_slot = committees_per_epoch / T::slots_per_epoch();
|
||||
let previous_epoch_attestations = committees_per_epoch;
|
||||
let current_epoch_attestations =
|
||||
committees_per_slot * (T::slots_per_epoch() - spec.min_attestation_inclusion_delay);
|
||||
assert_eq!(
|
||||
state.latest_attestations.len() as u64,
|
||||
previous_epoch_attestations + current_epoch_attestations,
|
||||
"The state should have an attestation for each committee."
|
||||
);
|
||||
|
||||
// Assert that we will run the first arm of process_rewards_and_penalties
|
||||
let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch;
|
||||
assert_eq!(
|
||||
epochs_since_finality, 4,
|
||||
"Epochs since finality should be 4"
|
||||
);
|
||||
|
||||
bench_epoch_processing(c, &state, &spec, &format!("{}_validators", validator_count));
|
||||
}
|
||||
|
||||
/// Run the detailed benchmarking suite on the given `BeaconState`.
|
||||
///
|
||||
/// `desc` will be added to the title of each bench.
|
||||
fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) {
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("process_eth1_data", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
process_eth1_data(&mut state, &spec_clone);
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("initialize_validator_statuses", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
initialize_validator_statuses(&mut state, &spec_clone).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
let attesters = initialize_validator_statuses(&state, &spec).unwrap();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("process_justification", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
process_justification(&mut state, &attesters.total_balances, &spec_clone);
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("process_crosslinks", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()),
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let mut state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
let attesters = initialize_validator_statuses(&state, &spec).unwrap();
|
||||
let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("process_rewards_and_penalties", move |b| {
|
||||
b.iter_batched(
|
||||
|| (state_clone.clone(), attesters.clone()),
|
||||
|(mut state, mut attesters)| {
|
||||
process_rewards_and_penalities(
|
||||
&mut state,
|
||||
&mut attesters,
|
||||
&winning_root_for_shards,
|
||||
&spec_clone,
|
||||
)
|
||||
.unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("process_ejections", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
state.process_ejections(&spec_clone);
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("process_validator_registry", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
process_validator_registry(&mut state, &spec_clone).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("update_active_tree_index_roots", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
update_active_tree_index_roots(&mut state, &spec_clone).unwrap();
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("update_latest_slashed_balances", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
update_latest_slashed_balances(&mut state, &spec_clone);
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("clean_attestations", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| {
|
||||
clean_attestations(&mut state, &spec_clone);
|
||||
state
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
let spec_clone = spec.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("per_epoch_processing", move |b| {
|
||||
b.iter_batched(
|
||||
|| state_clone.clone(),
|
||||
|mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()),
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
|
||||
let state_clone = state.clone();
|
||||
c.bench(
|
||||
&format!("{}/epoch_processing", desc),
|
||||
Benchmark::new("tree_hash_state", move |b| {
|
||||
b.iter(|| black_box(state_clone.tree_hash_root()))
|
||||
})
|
||||
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
|
||||
);
|
||||
}
|
@ -1,103 +0,0 @@
|
||||
use block_benching_builder::BlockBenchingBuilder;
|
||||
use criterion::Criterion;
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use env_logger::{Builder, Env};
|
||||
use log::info;
|
||||
use types::*;
|
||||
|
||||
mod bench_block_processing;
|
||||
mod bench_epoch_processing;
|
||||
mod block_benching_builder;
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = 16_384;
|
||||
|
||||
// `LOG_LEVEL == "info"` gives handy messages.
|
||||
pub const LOG_LEVEL: &str = "info";
|
||||
|
||||
/// Build a worst-case block and benchmark processing it.
|
||||
pub fn block_processing_worst_case(c: &mut Criterion) {
|
||||
if LOG_LEVEL != "" {
|
||||
Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init();
|
||||
}
|
||||
info!(
|
||||
"Building worst case block bench with {} validators",
|
||||
VALIDATOR_COUNT
|
||||
);
|
||||
|
||||
// Use the specifications from the Eth2.0 spec.
|
||||
let spec = ChainSpec::mainnet();
|
||||
|
||||
// Create a builder for configuring the block and state for benching.
|
||||
let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
|
||||
|
||||
// Set the number of included operations to be maximum (e.g., `MAX_ATTESTATIONS`, etc.)
|
||||
bench_builder.maximize_block_operations(&spec);
|
||||
|
||||
// Set the state and block to be in the last slot of the 4th epoch.
|
||||
let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
|
||||
bench_builder.set_slot(last_slot_of_epoch, &spec);
|
||||
|
||||
// Build all the state caches so the build times aren't included in the benches.
|
||||
bench_builder.build_caches(&spec);
|
||||
|
||||
// Generate the block and state then run benches.
|
||||
let (block, state) = bench_builder.build(&spec);
|
||||
bench_block_processing::bench_block_processing(
|
||||
c,
|
||||
&block,
|
||||
&state,
|
||||
&spec,
|
||||
&format!("{}_validators/worst_case", VALIDATOR_COUNT),
|
||||
);
|
||||
}
|
||||
|
||||
/// Build a reasonable-case block and benchmark processing it.
|
||||
pub fn block_processing_reasonable_case(c: &mut Criterion) {
|
||||
info!(
|
||||
"Building reasonable case block bench with {} validators",
|
||||
VALIDATOR_COUNT
|
||||
);
|
||||
|
||||
// Use the specifications from the Eth2.0 spec.
|
||||
let spec = ChainSpec::mainnet();
|
||||
|
||||
// Create a builder for configuring the block and state for benching.
|
||||
let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
|
||||
|
||||
// Set the number of included operations to what we might expect normally.
|
||||
bench_builder.num_proposer_slashings = 0;
|
||||
bench_builder.num_attester_slashings = 0;
|
||||
bench_builder.num_attestations = (spec.shard_count / T::slots_per_epoch()) as usize;
|
||||
bench_builder.num_deposits = 2;
|
||||
bench_builder.num_exits = 2;
|
||||
bench_builder.num_transfers = 2;
|
||||
|
||||
// Set the state and block to be in the last slot of the 4th epoch.
|
||||
let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
|
||||
bench_builder.set_slot(last_slot_of_epoch, &spec);
|
||||
|
||||
// Build all the state caches so the build times aren't included in the benches.
|
||||
bench_builder.build_caches(&spec);
|
||||
|
||||
// Generate the block and state then run benches.
|
||||
let (block, state) = bench_builder.build(&spec);
|
||||
bench_block_processing::bench_block_processing(
|
||||
c,
|
||||
&block,
|
||||
&state,
|
||||
&spec,
|
||||
&format!("{}_validators/reasonable_case", VALIDATOR_COUNT),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn state_processing(c: &mut Criterion) {
|
||||
bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
block_processing_reasonable_case,
|
||||
block_processing_worst_case,
|
||||
state_processing
|
||||
);
|
||||
criterion_main!(benches);
|
@ -1,175 +0,0 @@
|
||||
use log::info;
|
||||
use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder};
|
||||
use types::*;
|
||||
|
||||
pub struct BlockBenchingBuilder {
|
||||
pub state_builder: TestingBeaconStateBuilder,
|
||||
pub block_builder: TestingBeaconBlockBuilder,
|
||||
|
||||
pub num_validators: usize,
|
||||
pub num_proposer_slashings: usize,
|
||||
pub num_attester_slashings: usize,
|
||||
pub num_indices_per_slashable_vote: usize,
|
||||
pub num_attestations: usize,
|
||||
pub num_deposits: usize,
|
||||
pub num_exits: usize,
|
||||
pub num_transfers: usize,
|
||||
}
|
||||
|
||||
impl BlockBenchingBuilder {
|
||||
pub fn new(num_validators: usize, spec: &ChainSpec) -> Self {
|
||||
let state_builder =
|
||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec);
|
||||
let block_builder = TestingBeaconBlockBuilder::new(spec);
|
||||
|
||||
Self {
|
||||
state_builder,
|
||||
block_builder,
|
||||
num_validators: 0,
|
||||
num_proposer_slashings: 0,
|
||||
num_attester_slashings: 0,
|
||||
num_indices_per_slashable_vote: spec.max_indices_per_slashable_vote as usize,
|
||||
num_attestations: 0,
|
||||
num_deposits: 0,
|
||||
num_exits: 0,
|
||||
num_transfers: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn maximize_block_operations(&mut self, spec: &ChainSpec) {
|
||||
self.num_proposer_slashings = spec.max_proposer_slashings as usize;
|
||||
self.num_attester_slashings = spec.max_attester_slashings as usize;
|
||||
self.num_indices_per_slashable_vote = spec.max_indices_per_slashable_vote as usize;
|
||||
self.num_attestations = spec.max_attestations as usize;
|
||||
self.num_deposits = spec.max_deposits as usize;
|
||||
self.num_exits = spec.max_voluntary_exits as usize;
|
||||
self.num_transfers = spec.max_transfers as usize;
|
||||
}
|
||||
|
||||
pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) {
|
||||
self.state_builder.teleport_to_slot(slot, &spec);
|
||||
}
|
||||
|
||||
pub fn build_caches(&mut self, spec: &ChainSpec) {
|
||||
// Builds all caches; benches will not contain shuffling/committee building times.
|
||||
self.state_builder.build_caches(&spec).unwrap();
|
||||
}
|
||||
|
||||
pub fn build(mut self, spec: &ChainSpec) -> (BeaconBlock, BeaconState) {
|
||||
let (mut state, keypairs) = self.state_builder.build();
|
||||
let builder = &mut self.block_builder;
|
||||
|
||||
builder.set_slot(state.slot);
|
||||
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
||||
let keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_randao_reveal(&keypair.sk, &state.fork, spec);
|
||||
|
||||
// Used as a stream of validator indices for use in slashings, exits, etc.
|
||||
let mut validators_iter = (0..keypairs.len() as u64).into_iter();
|
||||
|
||||
// Insert `ProposerSlashing` objects.
|
||||
for _ in 0..self.num_proposer_slashings {
|
||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
||||
|
||||
builder.insert_proposer_slashing(
|
||||
validator_index,
|
||||
&keypairs[validator_index as usize].sk,
|
||||
&state.fork,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
info!(
|
||||
"Inserted {} proposer slashings.",
|
||||
builder.block.body.proposer_slashings.len()
|
||||
);
|
||||
|
||||
// Insert `AttesterSlashing` objects
|
||||
for _ in 0..self.num_attester_slashings {
|
||||
let mut attesters: Vec<u64> = vec![];
|
||||
let mut secret_keys: Vec<&SecretKey> = vec![];
|
||||
|
||||
for _ in 0..self.num_indices_per_slashable_vote {
|
||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
||||
|
||||
attesters.push(validator_index);
|
||||
secret_keys.push(&keypairs[validator_index as usize].sk);
|
||||
}
|
||||
|
||||
builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec);
|
||||
}
|
||||
info!(
|
||||
"Inserted {} attester slashings.",
|
||||
builder.block.body.attester_slashings.len()
|
||||
);
|
||||
|
||||
// Insert `Attestation` objects.
|
||||
let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect();
|
||||
builder
|
||||
.insert_attestations(
|
||||
&state,
|
||||
&all_secret_keys,
|
||||
self.num_attestations as usize,
|
||||
spec,
|
||||
)
|
||||
.unwrap();
|
||||
info!(
|
||||
"Inserted {} attestations.",
|
||||
builder.block.body.attestations.len()
|
||||
);
|
||||
|
||||
// Insert `Deposit` objects.
|
||||
for i in 0..self.num_deposits {
|
||||
builder.insert_deposit(
|
||||
32_000_000_000,
|
||||
state.deposit_index + (i as u64),
|
||||
&state,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
info!("Inserted {} deposits.", builder.block.body.deposits.len());
|
||||
|
||||
// Insert the maximum possible number of `Exit` objects.
|
||||
for _ in 0..self.num_exits {
|
||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
||||
|
||||
builder.insert_exit(
|
||||
&state,
|
||||
validator_index,
|
||||
&keypairs[validator_index as usize].sk,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
info!(
|
||||
"Inserted {} exits.",
|
||||
builder.block.body.voluntary_exits.len()
|
||||
);
|
||||
|
||||
// Insert the maximum possible number of `Transfer` objects.
|
||||
for _ in 0..self.num_transfers {
|
||||
let validator_index = validators_iter.next().expect("Insufficient validators.");
|
||||
|
||||
// Manually set the validator to be withdrawn.
|
||||
state.validator_registry[validator_index as usize].withdrawable_epoch =
|
||||
state.previous_epoch(spec);
|
||||
|
||||
builder.insert_transfer(
|
||||
&state,
|
||||
validator_index,
|
||||
validator_index,
|
||||
1,
|
||||
keypairs[validator_index as usize].clone(),
|
||||
spec,
|
||||
);
|
||||
}
|
||||
info!("Inserted {} transfers.", builder.block.body.transfers.len());
|
||||
|
||||
let mut block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||
|
||||
// Set the eth1 data to be different from the state.
|
||||
block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
}
|
@ -4,12 +4,7 @@ version = "0.1.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[[bench]]
|
||||
name = "benches"
|
||||
harness = false
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.2"
|
||||
tree_hash_derive = { path = "../tree_hash_derive" }
|
||||
|
||||
[dependencies]
|
||||
|
@ -1,73 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
|
||||
use cached_tree_hash::TreeHashCache;
|
||||
use criterion::black_box;
|
||||
use criterion::{Benchmark, Criterion};
|
||||
use ethereum_types::H256 as Hash256;
|
||||
use hashing::hash;
|
||||
use tree_hash::TreeHash;
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
let n = 1024;
|
||||
|
||||
let source_vec: Vec<Hash256> = (0..n).map(|_| Hash256::random()).collect();
|
||||
|
||||
let mut source_modified_vec = source_vec.clone();
|
||||
source_modified_vec[n - 1] = Hash256::random();
|
||||
|
||||
let modified_vec = source_modified_vec.clone();
|
||||
c.bench(
|
||||
&format!("vec_of_{}_hashes", n),
|
||||
Benchmark::new("standard", move |b| {
|
||||
b.iter_with_setup(
|
||||
|| modified_vec.clone(),
|
||||
|modified_vec| black_box(modified_vec.tree_hash_root()),
|
||||
)
|
||||
})
|
||||
.sample_size(100),
|
||||
);
|
||||
|
||||
let modified_vec = source_modified_vec.clone();
|
||||
c.bench(
|
||||
&format!("vec_of_{}_hashes", n),
|
||||
Benchmark::new("build_cache", move |b| {
|
||||
b.iter_with_setup(
|
||||
|| modified_vec.clone(),
|
||||
|vec| black_box(TreeHashCache::new(&vec, 0)),
|
||||
)
|
||||
})
|
||||
.sample_size(100),
|
||||
);
|
||||
|
||||
let vec = source_vec.clone();
|
||||
let modified_vec = source_modified_vec.clone();
|
||||
c.bench(
|
||||
&format!("vec_of_{}_hashes", n),
|
||||
Benchmark::new("cache_update", move |b| {
|
||||
b.iter_with_setup(
|
||||
|| {
|
||||
let cache = TreeHashCache::new(&vec, 0).unwrap();
|
||||
(cache, modified_vec.clone())
|
||||
},
|
||||
|(mut cache, modified_vec)| black_box(cache.update(&modified_vec)),
|
||||
)
|
||||
})
|
||||
.sample_size(100),
|
||||
);
|
||||
|
||||
c.bench(
|
||||
&format!("{}_hashes", n),
|
||||
Benchmark::new("hash_64_bytes", move |b| {
|
||||
b.iter(|| {
|
||||
for _ in 0..n {
|
||||
let _digest = hash(&[42; 64]);
|
||||
}
|
||||
})
|
||||
})
|
||||
.sample_size(100),
|
||||
);
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
Loading…
Reference in New Issue
Block a user