lighthouse/consensus/ssz_derive/src/lib.rs

392 lines
13 KiB
Rust
Raw Normal View History

#![recursion_limit = "256"]
//! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate.
//!
//! Supports field attributes, see each derive macro for more information.
use proc_macro::TokenStream;
2019-04-17 11:21:07 +00:00
use quote::quote;
use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput};
/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields
/// that should not be serialized.
///
/// # Panics
/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time.
fn get_serializable_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> {
struct_data
.fields
.iter()
.filter_map(|f| {
if should_skip_serializing(f) {
None
} else {
Some(match &f.ident {
Some(ref ident) => ident,
_ => panic!("ssz_derive only supports named struct fields."),
})
}
})
.collect()
}
/// Returns a Vec of `syn::Type` for each named field in the struct, whilst filtering out fields
/// that should not be serialized.
fn get_serializable_field_types(struct_data: &syn::DataStruct) -> Vec<&syn::Type> {
struct_data
.fields
.iter()
.filter_map(|f| {
if should_skip_serializing(f) {
None
} else {
Some(&f.ty)
}
})
.collect()
}
/// Returns true if some field has an attribute declaring it should not be serialized.
///
/// The field attribute is: `#[ssz(skip_serializing)]`
fn should_skip_serializing(field: &syn::Field) -> bool {
field.attrs.iter().any(|attr| {
Stable futures (#879) * Port eth1 lib to use stable futures * Port eth1_test_rig to stable futures * Port eth1 tests to stable futures * Port genesis service to stable futures * Port genesis tests to stable futures * Port beacon_chain to stable futures * Port lcli to stable futures * Fix eth1_test_rig (#1014) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Update hashmap hashset to stable futures * Adds panic test to hashset delay * Port remote_beacon_node to stable futures * Fix lcli merge conflicts * Non rpc stuff compiles * protocol.rs compiles * Port websockets, timer and notifier to stable futures (#1035) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Port remote_beacon_node to stable futures * Partial eth2-libp2p stable future upgrade * Finished first round of fighting RPC types * Further progress towards porting eth2-libp2p adds caching to discovery * Update behaviour * RPC handler to stable futures * Update RPC to master libp2p * Network service additions * Fix the fallback transport construction (#1102) * Correct warning * Remove hashmap delay * Compiling version of eth2-libp2p * Update all crates versions * Fix conversion function and add tests (#1113) * Port validator_client to stable futures (#1114) * Add PH & MS slot clock changes * Account for genesis time * Add progress on duties refactor * Add simple is_aggregator bool to val subscription * Start work on attestation_verification.rs * Add progress on ObservedAttestations * Progress with ObservedAttestations * Fix tests * Add observed attestations to the beacon chain * Add attestation observation to processing code * Add progress on attestation verification * Add first draft of ObservedAttesters * Add more tests * Add observed attesters to beacon chain * Add observers to attestation processing * Add more attestation verification * Create ObservedAggregators map * Remove commented-out code * Add observed aggregators into chain * Add progress * Finish adding features to attestation verification * Ensure beacon chain compiles * Link attn verification into chain * Integrate new attn verification in chain * Remove old attestation processing code * Start trying to fix beacon_chain tests * Split adding into pools into two functions * Add aggregation to harness * Get test harness working again * Adjust the number of aggregators for test harness * Fix edge-case in harness * Integrate new attn processing in network * Fix compile bug in validator_client * Update validator API endpoints * Fix aggreagation in test harness * Fix enum thing * Fix attestation observation bug: * Patch failing API tests * Start adding comments to attestation verification * Remove unused attestation field * Unify "is block known" logic * Update comments * Supress fork choice errors for network processing * Add todos * Tidy * Add gossip attn tests * Disallow test harness to produce old attns * Comment out in-progress tests * Partially address pruning tests * Fix failing store test * Add aggregate tests * Add comments about which spec conditions we check * Dont re-aggregate * Split apart test harness attn production * Fix compile error in network * Make progress on commented-out test * Fix skipping attestation test * Add fork choice verification tests * Tidy attn tests, remove dead code * Remove some accidentally added code * Fix clippy lint * Rename test file * Add block tests, add cheap block proposer check * Rename block testing file * Add observed_block_producers * Tidy * Switch around block signature verification * Finish block testing * Remove gossip from signature tests * First pass of self review * Fix deviation in spec * Update test spec tags * Start moving over to hashset * Finish moving observed attesters to hashmap * Move aggregation pool over to hashmap * Make fc attn borrow again * Fix rest_api compile error * Fix missing comments * Fix monster test * Uncomment increasing slots test * Address remaining comments * Remove unsafe, use cfg test * Remove cfg test flag * Fix dodgy comment * Revert "Update hashmap hashset to stable futures" This reverts commit d432378a3cc5cd67fc29c0b15b96b886c1323554. * Revert "Adds panic test to hashset delay" This reverts commit 281502396fc5b90d9c421a309c2c056982c9525b. * Ported attestation_service * Ported duties_service * Ported fork_service * More ports * Port block_service * Minor fixes * VC compiles * Update TODOS * Borrow self where possible * Ignore aggregates that are already known. * Unify aggregator modulo logic * Fix typo in logs * Refactor validator subscription logic * Avoid reproducing selection proof * Skip HTTP call if no subscriptions * Rename DutyAndState -> DutyAndProof * Tidy logs * Print root as dbg * Fix compile errors in tests * Fix compile error in test * Re-Fix attestation and duties service * Minor fixes Co-authored-by: Paul Hauner <paul@paulhauner.com> * Network crate update to stable futures * Port account_manager to stable futures (#1121) * Port account_manager to stable futures * Run async fns in tokio environment * Port rest_api crate to stable futures (#1118) * Port rest_api lib to stable futures * Reduce tokio features * Update notifier to stable futures * Builder update * Further updates * Convert self referential async functions * stable futures fixes (#1124) * Fix eth1 update functions * Fix genesis and client * Fix beacon node lib * Return appropriate runtimes from environment * Fix test rig * Refactor eth1 service update * Upgrade simulator to stable futures * Lighthouse compiles on stable futures * Remove println debugging statement * Update libp2p service, start rpc test upgrade * Update network crate for new libp2p * Update tokio::codec to futures_codec (#1128) * Further work towards RPC corrections * Correct http timeout and network service select * Use tokio runtime for libp2p * Revert "Update tokio::codec to futures_codec (#1128)" This reverts commit e57aea924acf5cbabdcea18895ac07e38a425ed7. * Upgrade RPC libp2p tests * Upgrade secio fallback test * Upgrade gossipsub examples * Clean up RPC protocol * Test fixes (#1133) * Correct websocket timeout and run on os thread * Fix network test * Clean up PR * Correct tokio tcp move attestation service tests * Upgrade attestation service tests * Correct network test * Correct genesis test * Test corrections * Log info when block is received * Modify logs and update attester service events * Stable futures: fixes to vc, eth1 and account manager (#1142) * Add local testnet scripts * Remove whiteblock script * Rename local testnet script * Move spawns onto handle * Fix VC panic * Initial fix to block production issue * Tidy block producer fix * Tidy further * Add local testnet clean script * Run cargo fmt * Tidy duties service * Tidy fork service * Tidy ForkService * Tidy AttestationService * Tidy notifier * Ensure await is not suppressed in eth1 * Ensure await is not suppressed in account_manager * Use .ok() instead of .unwrap_or(()) * RPC decoding test for proto * Update discv5 and eth2-libp2p deps * Fix lcli double runtime issue (#1144) * Handle stream termination and dialing peer errors * Correct peer_info variant types * Remove unnecessary warnings * Handle subnet unsubscription removal and improve logigng * Add logs around ping * Upgrade discv5 and improve logging * Handle peer connection status for multiple connections * Improve network service logging * Improve logging around peer manager * Upgrade swarm poll centralise peer management * Identify clients on error * Fix `remove_peer` in sync (#1150) * remove_peer removes from all chains * Remove logs * Fix early return from loop * Improved logging, fix panic * Partially correct tests * Stable futures: Vc sync (#1149) * Improve syncing heuristic * Add comments * Use safer method for tolerance * Fix tests * Stable futures: Fix VC bug, update agg pool, add more metrics (#1151) * Expose epoch processing summary * Expose participation metrics to prometheus * Switch to f64 * Reduce precision * Change precision * Expose observed attesters metrics * Add metrics for agg/unagg attn counts * Add metrics for gossip rx * Add metrics for gossip tx * Adds ignored attns to prom * Add attestation timing * Add timer for aggregation pool sig agg * Add write lock timer for agg pool * Add more metrics to agg pool * Change map lock code * Add extra metric to agg pool * Change lock handling in agg pool * Change .write() to .read() * Add another agg pool timer * Fix for is_aggregator * Fix pruning bug Co-authored-by: pawan <pawandhananjay@gmail.com> Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-05-17 11:16:48 +00:00
attr.path.is_ident("ssz")
&& attr.tokens.to_string().replace(" ", "") == "(skip_serializing)"
})
}
/// Implements `ssz::Encode` for some `struct` or `enum`.
2019-02-19 07:43:09 +00:00
///
/// Fields are encoded in the order they are defined.
///
/// ## Field attributes
///
/// - `#[ssz(skip_serializing)]`: the field will not be serialized.
#[proc_macro_derive(Encode, attributes(ssz))]
pub fn ssz_encode_derive(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as DeriveInput);
match &item.data {
syn::Data::Struct(s) => ssz_encode_derive_struct(&item, s),
syn::Data::Enum(s) => ssz_encode_derive_enum(&item, s),
_ => panic!("ssz_derive only supports structs and enums"),
}
}
fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct) -> TokenStream {
let name = &derive_input.ident;
let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl();
let field_idents = get_serializable_named_field_idents(struct_data);
let field_idents_a = get_serializable_named_field_idents(struct_data);
let field_types_a = get_serializable_field_types(struct_data);
let field_types_b = field_types_a.clone();
let field_types_d = field_types_a.clone();
let field_types_e = field_types_a.clone();
let field_types_f = field_types_a.clone();
let output = quote! {
impl #impl_generics ssz::Encode for #name #ty_generics #where_clause {
fn is_ssz_fixed_len() -> bool {
#(
<#field_types_a as ssz::Encode>::is_ssz_fixed_len() &&
)*
true
}
fn ssz_fixed_len() -> usize {
if <Self as ssz::Encode>::is_ssz_fixed_len() {
let mut len: usize = 0;
#(
len = len
.checked_add(<#field_types_b as ssz::Encode>::ssz_fixed_len())
.expect("encode ssz_fixed_len length overflow");
)*
len
} else {
ssz::BYTES_PER_LENGTH_OFFSET
}
}
fn ssz_bytes_len(&self) -> usize {
if <Self as ssz::Encode>::is_ssz_fixed_len() {
<Self as ssz::Encode>::ssz_fixed_len()
} else {
let mut len: usize = 0;
#(
if <#field_types_d as ssz::Encode>::is_ssz_fixed_len() {
len = len
.checked_add(<#field_types_e as ssz::Encode>::ssz_fixed_len())
.expect("encode ssz_bytes_len length overflow");
} else {
len = len
.checked_add(ssz::BYTES_PER_LENGTH_OFFSET)
.expect("encode ssz_bytes_len length overflow for offset");
len = len
.checked_add(self.#field_idents_a.ssz_bytes_len())
.expect("encode ssz_bytes_len length overflow for bytes");
}
)*
len
}
}
2019-05-05 23:26:58 +00:00
fn ssz_append(&self, buf: &mut Vec<u8>) {
let mut offset: usize = 0;
#(
offset = offset
.checked_add(<#field_types_f as ssz::Encode>::ssz_fixed_len())
.expect("encode ssz_append offset overflow");
)*
2019-05-05 23:26:58 +00:00
2019-05-05 23:58:31 +00:00
let mut encoder = ssz::SszEncoder::container(buf, offset);
#(
2019-05-05 23:26:58 +00:00
encoder.append(&self.#field_idents);
)*
2019-05-05 23:58:31 +00:00
encoder.finalize();
}
}
};
output.into()
}
/// Derive `Encode` for a restricted subset of all possible enum types.
///
/// Only supports:
/// - Enums with a single field per variant, where
/// - All fields are variably sized from an SSZ-perspective (not fixed size).
///
/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run
/// time* if the variable-size requirement isn't met.
fn ssz_encode_derive_enum(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream {
let name = &derive_input.ident;
let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl();
let (patterns, assert_exprs): (Vec<_>, Vec<_>) = enum_data
.variants
.iter()
.map(|variant| {
let variant_name = &variant.ident;
if variant.fields.len() != 1 {
panic!("ssz::Encode can only be derived for enums with 1 field per variant");
}
let pattern = quote! {
#name::#variant_name(ref inner)
};
let ty = &(&variant.fields).into_iter().next().unwrap().ty;
let type_assert = quote! {
!<#ty as ssz::Encode>::is_ssz_fixed_len()
};
(pattern, type_assert)
})
.unzip();
let output = quote! {
impl #impl_generics ssz::Encode for #name #ty_generics #where_clause {
fn is_ssz_fixed_len() -> bool {
assert!(
#(
#assert_exprs &&
)* true,
"not all enum variants are variably-sized"
);
false
}
fn ssz_bytes_len(&self) -> usize {
match self {
#(
#patterns => inner.ssz_bytes_len(),
)*
}
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
match self {
#(
#patterns => inner.ssz_append(buf),
)*
}
}
}
};
output.into()
}
/// Returns true if some field has an attribute declaring it should not be deserialized.
///
/// The field attribute is: `#[ssz(skip_deserializing)]`
fn should_skip_deserializing(field: &syn::Field) -> bool {
field.attrs.iter().any(|attr| {
Stable futures (#879) * Port eth1 lib to use stable futures * Port eth1_test_rig to stable futures * Port eth1 tests to stable futures * Port genesis service to stable futures * Port genesis tests to stable futures * Port beacon_chain to stable futures * Port lcli to stable futures * Fix eth1_test_rig (#1014) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Update hashmap hashset to stable futures * Adds panic test to hashset delay * Port remote_beacon_node to stable futures * Fix lcli merge conflicts * Non rpc stuff compiles * protocol.rs compiles * Port websockets, timer and notifier to stable futures (#1035) * Fix lcli * Port timer to stable futures * Fix timer * Port websocket_server to stable futures * Port notifier to stable futures * Add TODOS * Port remote_beacon_node to stable futures * Partial eth2-libp2p stable future upgrade * Finished first round of fighting RPC types * Further progress towards porting eth2-libp2p adds caching to discovery * Update behaviour * RPC handler to stable futures * Update RPC to master libp2p * Network service additions * Fix the fallback transport construction (#1102) * Correct warning * Remove hashmap delay * Compiling version of eth2-libp2p * Update all crates versions * Fix conversion function and add tests (#1113) * Port validator_client to stable futures (#1114) * Add PH & MS slot clock changes * Account for genesis time * Add progress on duties refactor * Add simple is_aggregator bool to val subscription * Start work on attestation_verification.rs * Add progress on ObservedAttestations * Progress with ObservedAttestations * Fix tests * Add observed attestations to the beacon chain * Add attestation observation to processing code * Add progress on attestation verification * Add first draft of ObservedAttesters * Add more tests * Add observed attesters to beacon chain * Add observers to attestation processing * Add more attestation verification * Create ObservedAggregators map * Remove commented-out code * Add observed aggregators into chain * Add progress * Finish adding features to attestation verification * Ensure beacon chain compiles * Link attn verification into chain * Integrate new attn verification in chain * Remove old attestation processing code * Start trying to fix beacon_chain tests * Split adding into pools into two functions * Add aggregation to harness * Get test harness working again * Adjust the number of aggregators for test harness * Fix edge-case in harness * Integrate new attn processing in network * Fix compile bug in validator_client * Update validator API endpoints * Fix aggreagation in test harness * Fix enum thing * Fix attestation observation bug: * Patch failing API tests * Start adding comments to attestation verification * Remove unused attestation field * Unify "is block known" logic * Update comments * Supress fork choice errors for network processing * Add todos * Tidy * Add gossip attn tests * Disallow test harness to produce old attns * Comment out in-progress tests * Partially address pruning tests * Fix failing store test * Add aggregate tests * Add comments about which spec conditions we check * Dont re-aggregate * Split apart test harness attn production * Fix compile error in network * Make progress on commented-out test * Fix skipping attestation test * Add fork choice verification tests * Tidy attn tests, remove dead code * Remove some accidentally added code * Fix clippy lint * Rename test file * Add block tests, add cheap block proposer check * Rename block testing file * Add observed_block_producers * Tidy * Switch around block signature verification * Finish block testing * Remove gossip from signature tests * First pass of self review * Fix deviation in spec * Update test spec tags * Start moving over to hashset * Finish moving observed attesters to hashmap * Move aggregation pool over to hashmap * Make fc attn borrow again * Fix rest_api compile error * Fix missing comments * Fix monster test * Uncomment increasing slots test * Address remaining comments * Remove unsafe, use cfg test * Remove cfg test flag * Fix dodgy comment * Revert "Update hashmap hashset to stable futures" This reverts commit d432378a3cc5cd67fc29c0b15b96b886c1323554. * Revert "Adds panic test to hashset delay" This reverts commit 281502396fc5b90d9c421a309c2c056982c9525b. * Ported attestation_service * Ported duties_service * Ported fork_service * More ports * Port block_service * Minor fixes * VC compiles * Update TODOS * Borrow self where possible * Ignore aggregates that are already known. * Unify aggregator modulo logic * Fix typo in logs * Refactor validator subscription logic * Avoid reproducing selection proof * Skip HTTP call if no subscriptions * Rename DutyAndState -> DutyAndProof * Tidy logs * Print root as dbg * Fix compile errors in tests * Fix compile error in test * Re-Fix attestation and duties service * Minor fixes Co-authored-by: Paul Hauner <paul@paulhauner.com> * Network crate update to stable futures * Port account_manager to stable futures (#1121) * Port account_manager to stable futures * Run async fns in tokio environment * Port rest_api crate to stable futures (#1118) * Port rest_api lib to stable futures * Reduce tokio features * Update notifier to stable futures * Builder update * Further updates * Convert self referential async functions * stable futures fixes (#1124) * Fix eth1 update functions * Fix genesis and client * Fix beacon node lib * Return appropriate runtimes from environment * Fix test rig * Refactor eth1 service update * Upgrade simulator to stable futures * Lighthouse compiles on stable futures * Remove println debugging statement * Update libp2p service, start rpc test upgrade * Update network crate for new libp2p * Update tokio::codec to futures_codec (#1128) * Further work towards RPC corrections * Correct http timeout and network service select * Use tokio runtime for libp2p * Revert "Update tokio::codec to futures_codec (#1128)" This reverts commit e57aea924acf5cbabdcea18895ac07e38a425ed7. * Upgrade RPC libp2p tests * Upgrade secio fallback test * Upgrade gossipsub examples * Clean up RPC protocol * Test fixes (#1133) * Correct websocket timeout and run on os thread * Fix network test * Clean up PR * Correct tokio tcp move attestation service tests * Upgrade attestation service tests * Correct network test * Correct genesis test * Test corrections * Log info when block is received * Modify logs and update attester service events * Stable futures: fixes to vc, eth1 and account manager (#1142) * Add local testnet scripts * Remove whiteblock script * Rename local testnet script * Move spawns onto handle * Fix VC panic * Initial fix to block production issue * Tidy block producer fix * Tidy further * Add local testnet clean script * Run cargo fmt * Tidy duties service * Tidy fork service * Tidy ForkService * Tidy AttestationService * Tidy notifier * Ensure await is not suppressed in eth1 * Ensure await is not suppressed in account_manager * Use .ok() instead of .unwrap_or(()) * RPC decoding test for proto * Update discv5 and eth2-libp2p deps * Fix lcli double runtime issue (#1144) * Handle stream termination and dialing peer errors * Correct peer_info variant types * Remove unnecessary warnings * Handle subnet unsubscription removal and improve logigng * Add logs around ping * Upgrade discv5 and improve logging * Handle peer connection status for multiple connections * Improve network service logging * Improve logging around peer manager * Upgrade swarm poll centralise peer management * Identify clients on error * Fix `remove_peer` in sync (#1150) * remove_peer removes from all chains * Remove logs * Fix early return from loop * Improved logging, fix panic * Partially correct tests * Stable futures: Vc sync (#1149) * Improve syncing heuristic * Add comments * Use safer method for tolerance * Fix tests * Stable futures: Fix VC bug, update agg pool, add more metrics (#1151) * Expose epoch processing summary * Expose participation metrics to prometheus * Switch to f64 * Reduce precision * Change precision * Expose observed attesters metrics * Add metrics for agg/unagg attn counts * Add metrics for gossip rx * Add metrics for gossip tx * Adds ignored attns to prom * Add attestation timing * Add timer for aggregation pool sig agg * Add write lock timer for agg pool * Add more metrics to agg pool * Change map lock code * Add extra metric to agg pool * Change lock handling in agg pool * Change .write() to .read() * Add another agg pool timer * Fix for is_aggregator * Fix pruning bug Co-authored-by: pawan <pawandhananjay@gmail.com> Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-05-17 11:16:48 +00:00
attr.path.is_ident("ssz")
&& attr.tokens.to_string().replace(" ", "") == "(skip_deserializing)"
})
}
/// Implements `ssz::Decode` for some `struct`.
2019-02-19 07:43:09 +00:00
///
/// Fields are decoded in the order they are defined.
///
/// ## Field attributes
///
/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a
/// `Default` implementation. The decoder will assume that the field was not serialized at all
/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it).
#[proc_macro_derive(Decode)]
pub fn ssz_decode_derive(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as DeriveInput);
let name = &item.ident;
let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl();
let struct_data = match &item.data {
syn::Data::Struct(s) => s,
2019-02-19 05:04:29 +00:00
_ => panic!("ssz_derive only supports structs."),
};
let mut register_types = vec![];
let mut field_names = vec![];
let mut fixed_decodes = vec![];
let mut decodes = vec![];
let mut is_fixed_lens = vec![];
let mut fixed_lens = vec![];
// Build quotes for fields that should be deserialized and those that should be built from
// `Default`.
for field in &struct_data.fields {
match &field.ident {
Some(ref ident) => {
field_names.push(quote! {
#ident
});
if should_skip_deserializing(field) {
// Field should not be deserialized; use a `Default` impl to instantiate.
decodes.push(quote! {
let #ident = <_>::default();
});
fixed_decodes.push(quote! {
let #ident = <_>::default();
});
} else {
let ty = &field.ty;
register_types.push(quote! {
builder.register_type::<#ty>()?;
});
decodes.push(quote! {
let #ident = decoder.decode_next()?;
});
fixed_decodes.push(quote! {
let #ident = decode_field!(#ty);
});
is_fixed_lens.push(quote! {
<#ty as ssz::Decode>::is_ssz_fixed_len()
});
fixed_lens.push(quote! {
<#ty as ssz::Decode>::ssz_fixed_len()
});
}
}
_ => panic!("ssz_derive only supports named struct fields."),
};
}
let output = quote! {
impl #impl_generics ssz::Decode for #name #ty_generics #where_clause {
fn is_ssz_fixed_len() -> bool {
#(
#is_fixed_lens &&
)*
true
}
fn ssz_fixed_len() -> usize {
if <Self as ssz::Decode>::is_ssz_fixed_len() {
let mut len: usize = 0;
#(
len = len
.checked_add(#fixed_lens)
.expect("decode ssz_fixed_len overflow");
)*
len
} else {
ssz::BYTES_PER_LENGTH_OFFSET
}
}
2019-11-24 01:00:49 +00:00
fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result<Self, ssz::DecodeError> {
if <Self as ssz::Decode>::is_ssz_fixed_len() {
if bytes.len() != <Self as ssz::Decode>::ssz_fixed_len() {
return Err(ssz::DecodeError::InvalidByteLength {
len: bytes.len(),
expected: <Self as ssz::Decode>::ssz_fixed_len(),
});
}
let mut start: usize = 0;
let mut end = start;
macro_rules! decode_field {
($type: ty) => {{
start = end;
end = end
.checked_add(<$type as ssz::Decode>::ssz_fixed_len())
.ok_or_else(|| ssz::DecodeError::OutOfBoundsByte {
i: usize::max_value()
})?;
let slice = bytes.get(start..end)
.ok_or_else(|| ssz::DecodeError::InvalidByteLength {
len: bytes.len(),
expected: end
})?;
<$type as ssz::Decode>::from_ssz_bytes(slice)?
}};
}
#(
#fixed_decodes
)*
Ok(Self {
#(
#field_names,
)*
})
} else {
let mut builder = ssz::SszDecoderBuilder::new(bytes);
#(
#register_types
)*
let mut decoder = builder.build()?;
#(
#decodes
)*
Ok(Self {
#(
#field_names,
)*
})
}
}
}
};
output.into()
}