Merge latest master
This commit is contained in:
commit
9e416a9bcd
11
.github/workflows/test-suite.yml
vendored
11
.github/workflows/test-suite.yml
vendored
@ -75,11 +75,14 @@ jobs:
|
||||
- name: Typecheck benchmark code without running it
|
||||
run: make check-benches
|
||||
install-lcli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Build lcli via Makefile
|
||||
run: make install-lcli
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Lint code for quality and style with Clippy
|
||||
run: make lint
|
||||
|
107
Cargo.lock
generated
107
Cargo.lock
generated
@ -140,7 +140,7 @@ name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -160,7 +160,7 @@ name = "backtrace"
|
||||
version = "0.3.46"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"backtrace-sys 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"backtrace-sys 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -168,7 +168,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "backtrace-sys"
|
||||
version = "0.1.35"
|
||||
version = "0.1.36"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -226,6 +226,7 @@ dependencies = [
|
||||
"proto_array_fork_choice 0.2.0",
|
||||
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"safe_arith 0.1.0",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -383,7 +384,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "bs58"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@ -477,7 +478,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -622,7 +623,7 @@ name = "cookie"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -638,7 +639,7 @@ dependencies = [
|
||||
"publicsuffix 1.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -681,7 +682,7 @@ dependencies = [
|
||||
"oorandom 11.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"plotters 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -898,7 +899,7 @@ dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -1009,7 +1010,7 @@ version = "0.1.0-alpha.3"
|
||||
source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3"
|
||||
dependencies = [
|
||||
"base64 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ed25519-dalek 1.0.0-pre.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)",
|
||||
@ -1030,7 +1031,7 @@ dependencies = [
|
||||
"atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -1042,7 +1043,7 @@ dependencies = [
|
||||
"atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -1218,7 +1219,7 @@ dependencies = [
|
||||
"serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tree_hash 0.1.1",
|
||||
"tree_hash_derive 0.2.0",
|
||||
"typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1444,7 +1445,7 @@ version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1457,7 +1458,7 @@ name = "generic-array"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1563,7 +1564,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.10"
|
||||
version = "0.1.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1651,8 +1652,8 @@ dependencies = [
|
||||
"language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1676,7 +1677,7 @@ dependencies = [
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1886,7 +1887,7 @@ dependencies = [
|
||||
"genesis 0.2.0",
|
||||
"hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"simple_logger 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -1975,7 +1976,7 @@ version = "0.13.2"
|
||||
source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3"
|
||||
dependencies = [
|
||||
"asn1_der 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"ed25519-dalek 1.0.0-pre.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"failure 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2071,7 +2072,7 @@ name = "libp2p-floodsub"
|
||||
version = "0.13.1"
|
||||
source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3"
|
||||
dependencies = [
|
||||
"bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cuckoofilter 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -2370,7 +2371,7 @@ dependencies = [
|
||||
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"subtle 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2517,6 +2518,7 @@ dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quickcheck 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"quickcheck_macros 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"safe_arith 0.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -2748,10 +2750,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.12.0"
|
||||
version = "1.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -2834,7 +2836,7 @@ version = "0.6.0"
|
||||
source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3"
|
||||
dependencies = [
|
||||
"arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"data-encoding 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -3083,7 +3085,7 @@ dependencies = [
|
||||
"error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -3327,7 +3329,7 @@ dependencies = [
|
||||
"crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3355,7 +3357,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.3.6"
|
||||
version = "1.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -3425,7 +3427,7 @@ dependencies = [
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -3573,6 +3575,10 @@ name = "ryu"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "safe_arith"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "safemem"
|
||||
version = "0.3.3"
|
||||
@ -3583,7 +3589,7 @@ name = "same-file"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -3839,7 +3845,7 @@ name = "slog-kvfilter"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -3894,7 +3900,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libflate 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -3993,6 +3999,7 @@ dependencies = [
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"merkle_proof 0.2.0",
|
||||
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"safe_arith 0.1.0",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -4156,7 +4163,7 @@ name = "termcolor"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -4185,11 +4192,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.1.42"
|
||||
version = "0.1.43"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
@ -4231,7 +4237,7 @@ dependencies = [
|
||||
"bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -4354,7 +4360,7 @@ dependencies = [
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -4408,7 +4414,7 @@ dependencies = [
|
||||
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
@ -4572,7 +4578,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.11.2"
|
||||
version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
@ -4600,6 +4606,7 @@ dependencies = [
|
||||
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"safe_arith 0.1.0",
|
||||
"serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -4801,7 +4808,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -5069,7 +5076,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.4"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
@ -5190,7 +5197,7 @@ dependencies = [
|
||||
"checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2"
|
||||
"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
|
||||
"checksum backtrace 0.3.46 (registry+https://github.com/rust-lang/crates.io-index)" = "b1e692897359247cc6bb902933361652380af0f1b7651ae5c5013407f30e109e"
|
||||
"checksum backtrace-sys 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "7de8aba10a69c8e8d7622c5710229485ec32e9d55fdad160ea559c086fdcd118"
|
||||
"checksum backtrace-sys 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "78848718ee1255a2485d1309ad9cdecfc2e7d0362dd11c6829364c6b35ae1bc7"
|
||||
"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
|
||||
"checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7"
|
||||
"checksum base64 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d5ca2cd0adc3f48f9e9ea5a6bbdf9ccc0bfade884847e484d452414c7ccffb3"
|
||||
@ -5206,7 +5213,7 @@ dependencies = [
|
||||
"checksum block-cipher-trait 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774"
|
||||
"checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5"
|
||||
"checksum bs58 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c95ee6bba9d950218b6cc910cf62bc9e0a171d0f4537e3627b0f54d08549b188"
|
||||
"checksum bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b170cd256a3f9fa6b9edae3e44a7dfdfc77e8124dbc3e2612d75f9c3e2396dae"
|
||||
"checksum bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb"
|
||||
"checksum bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41"
|
||||
"checksum bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187"
|
||||
"checksum byte-slice-cast 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3"
|
||||
@ -5299,7 +5306,7 @@ dependencies = [
|
||||
"checksum hashbrown 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1de41fb8dba9714efd92241565cdff73f78508c95697dd56787d3cba27e2353"
|
||||
"checksum hashbrown 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead"
|
||||
"checksum heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1679e6ea370dee694f91f1dc469bf94cf8f52051d147aec3e1f9497c6fc22461"
|
||||
"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e"
|
||||
"checksum hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8a0d737e0f947a1864e93d33fdef4af8445a00d1ed8dc0c8ddb73139ea6abf15"
|
||||
"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77"
|
||||
"checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35"
|
||||
"checksum hkdf 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fa08a006102488bd9cd5b8013aabe84955cf5ae22e304c2caf655b633aefae3"
|
||||
@ -5391,7 +5398,7 @@ dependencies = [
|
||||
"checksum num-bigint 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304"
|
||||
"checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
|
||||
"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
|
||||
"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
|
||||
"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
|
||||
"checksum oorandom 11.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ebcec7c9c2a95cacc7cd0ecb89d8a8454eca13906f6deb55258ffff0adeb9405"
|
||||
"checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c"
|
||||
"checksum openssl 0.10.29 (registry+https://github.com/rust-lang/crates.io-index)" = "cee6d85f4cb4c4f59a6a85d5b68a233d280c82e29e822913b9c8b129fbf20bdd"
|
||||
@ -5453,7 +5460,7 @@ dependencies = [
|
||||
"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
|
||||
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
|
||||
"checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431"
|
||||
"checksum regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3"
|
||||
"checksum regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692"
|
||||
"checksum regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
|
||||
"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
|
||||
"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
|
||||
@ -5528,7 +5535,7 @@ dependencies = [
|
||||
"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f"
|
||||
"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
|
||||
"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
|
||||
"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f"
|
||||
"checksum time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
|
||||
"checksum tiny-keccak 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2"
|
||||
"checksum tinytemplate 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "57a3c6667d3e65eb1bc3aed6fd14011c6cbc3a0665218ab7f5daf040b9ec371a"
|
||||
"checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6"
|
||||
@ -5560,7 +5567,7 @@ dependencies = [
|
||||
"checksum try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "283d3b89e1368717881a9d51dad843cc435380d8109c9e47d38780a324698d8b"
|
||||
"checksum twofish 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712d261e83e727c8e2dbb75dacac67c36e35db36a958ee504f2164fc052434e1"
|
||||
"checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887"
|
||||
"checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9"
|
||||
"checksum typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
|
||||
"checksum uint 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2143cded94692b156c356508d92888acc824db5bffc0b4089732264c6fcf86d4"
|
||||
"checksum uint 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d"
|
||||
"checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33"
|
||||
@ -5603,7 +5610,7 @@ dependencies = [
|
||||
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
|
||||
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
|
||||
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
"checksum winapi-util 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e"
|
||||
"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
|
||||
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
"checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9"
|
||||
"checksum ws 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a2c47b5798ccc774ffb93ff536aec7c4275d722fd9c740c83cdd1af1f2d94"
|
||||
|
@ -18,6 +18,7 @@ members = [
|
||||
"eth2/utils/lighthouse_metrics",
|
||||
"eth2/utils/merkle_proof",
|
||||
"eth2/utils/int_to_bytes",
|
||||
"eth2/utils/safe_arith",
|
||||
"eth2/utils/serde_hex",
|
||||
"eth2/utils/slot_clock",
|
||||
"eth2/utils/rest_types",
|
||||
|
5
Makefile
5
Makefile
@ -45,6 +45,11 @@ test: test-release
|
||||
# Runs the entire test suite, downloading test vectors if required.
|
||||
test-full: cargo-fmt test-release test-debug test-ef
|
||||
|
||||
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
|
||||
# Clippy lints are opt-in per-crate for now, which is why we allow all by default.
|
||||
lint:
|
||||
cargo clippy --all -- -A clippy::all
|
||||
|
||||
# Runs the makefile in the `ef_tests` repo.
|
||||
#
|
||||
# May download and extract an archive of test vectors from the ethereum
|
||||
|
@ -42,6 +42,7 @@ rand = "0.7.2"
|
||||
proto_array_fork_choice = { path = "../../eth2/proto_array_fork_choice" }
|
||||
lru = "0.4.3"
|
||||
tempfile = "3.1.0"
|
||||
safe_arith = { path = "../../eth2/utils/safe_arith" }
|
||||
|
||||
[dev-dependencies]
|
||||
lazy_static = "1.4.0"
|
||||
|
@ -8,6 +8,7 @@ use crate::events::{EventHandler, EventKind};
|
||||
use crate::fork_choice::{Error as ForkChoiceError, ForkChoice};
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::metrics;
|
||||
use crate::migrate::Migrate;
|
||||
use crate::naive_aggregation_pool::{Error as NaiveAggregationError, NaiveAggregationPool};
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
@ -28,12 +29,16 @@ use state_processing::{
|
||||
};
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::io::prelude::*;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use store::iter::{
|
||||
BlockRootsIterator, ReverseBlockRootIterator, ReverseStateRootIterator, StateRootsIterator,
|
||||
BlockRootsIterator, ParentRootBlockIterator, ReverseBlockRootIterator,
|
||||
ReverseStateRootIterator, StateRootsIterator,
|
||||
};
|
||||
use store::{Error as DBError, Migrate, Store};
|
||||
use store::{Error as DBError, Store};
|
||||
use types::*;
|
||||
|
||||
// Text included in blocks.
|
||||
@ -159,7 +164,7 @@ pub struct HeadInfo {
|
||||
|
||||
pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||
type Store: store::Store<Self::EthSpec>;
|
||||
type StoreMigrator: store::Migrate<Self::Store, Self::EthSpec>;
|
||||
type StoreMigrator: Migrate<Self::Store, Self::EthSpec>;
|
||||
type SlotClock: slot_clock::SlotClock;
|
||||
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>;
|
||||
type EthSpec: types::EthSpec;
|
||||
@ -199,7 +204,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
/// A handler for events generated by the beacon chain.
|
||||
pub event_handler: T::EventHandler,
|
||||
/// Used to track the heads of the beacon chain.
|
||||
pub(crate) head_tracker: HeadTracker,
|
||||
pub(crate) head_tracker: Arc<HeadTracker>,
|
||||
/// A cache dedicated to block processing.
|
||||
pub(crate) snapshot_cache: TimeoutRwLock<SnapshotCache<T::EthSpec>>,
|
||||
/// Caches the shuffling for a given epoch and state root.
|
||||
@ -497,6 +502,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.head_tracker.heads()
|
||||
}
|
||||
|
||||
pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool {
|
||||
self.head_tracker.contains_head((*block_hash).into())
|
||||
}
|
||||
|
||||
/// Returns the `BeaconState` at the given slot.
|
||||
///
|
||||
/// Returns `None` when the state is not found in the database or there is an error skipping
|
||||
@ -1230,6 +1239,76 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that the shuffling at `block_root` is equal to one of the shufflings of `state`.
|
||||
///
|
||||
/// The `target_epoch` argument determines which shuffling to check compatibility with, it
|
||||
/// should be equal to the current or previous epoch of `state`, or else `false` will be
|
||||
/// returned.
|
||||
///
|
||||
/// The compatibility check is designed to be fast: we check that the block that
|
||||
/// determined the RANDAO mix for the `target_epoch` matches the ancestor of the block
|
||||
/// identified by `block_root` (at that slot).
|
||||
pub fn shuffling_is_compatible(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
target_epoch: Epoch,
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
) -> bool {
|
||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
||||
let shuffling_lookahead = 1 + self.spec.min_seed_lookahead.as_u64();
|
||||
|
||||
// Shuffling can't have changed if we're in the first few epochs
|
||||
if state.current_epoch() < shuffling_lookahead {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Otherwise the shuffling is determined by the block at the end of the target epoch
|
||||
// minus the shuffling lookahead (usually 2). We call this the "pivot".
|
||||
let pivot_slot =
|
||||
if target_epoch == state.previous_epoch() || target_epoch == state.current_epoch() {
|
||||
(target_epoch - shuffling_lookahead).end_slot(slots_per_epoch)
|
||||
} else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let state_pivot_block_root = match state.get_block_root(pivot_slot) {
|
||||
Ok(root) => *root,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
&self.log,
|
||||
"Missing pivot block root for attestation";
|
||||
"slot" => pivot_slot,
|
||||
"error" => format!("{:?}", e),
|
||||
);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
// Use fork choice's view of the block DAG to quickly evaluate whether the attestation's
|
||||
// pivot block is the same as the current state's pivot block. If it is, then the
|
||||
// attestation's shuffling is the same as the current state's.
|
||||
// To account for skipped slots, find the first block at *or before* the pivot slot.
|
||||
let fork_choice_lock = self.fork_choice.core_proto_array();
|
||||
let pivot_block_root = fork_choice_lock
|
||||
.iter_block_roots(block_root)
|
||||
.find(|(_, slot)| *slot <= pivot_slot)
|
||||
.map(|(block_root, _)| block_root);
|
||||
drop(fork_choice_lock);
|
||||
|
||||
match pivot_block_root {
|
||||
Some(root) => root == state_pivot_block_root,
|
||||
None => {
|
||||
debug!(
|
||||
&self.log,
|
||||
"Discarding attestation because of missing ancestor";
|
||||
"pivot_slot" => pivot_slot.as_u64(),
|
||||
"block_root" => format!("{:?}", block_root),
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept some exit and queue it for inclusion in an appropriate block.
|
||||
pub fn process_voluntary_exit(
|
||||
&self,
|
||||
@ -1749,6 +1828,21 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?
|
||||
.into();
|
||||
|
||||
// Map from attestation head block root to shuffling compatibility.
|
||||
// Used to memoize the `attestation_shuffling_is_compatible` function.
|
||||
let mut shuffling_filter_cache = HashMap::new();
|
||||
let attestation_filter = |att: &&Attestation<T::EthSpec>| -> bool {
|
||||
*shuffling_filter_cache
|
||||
.entry((att.data.beacon_block_root, att.data.target.epoch))
|
||||
.or_insert_with(|| {
|
||||
self.shuffling_is_compatible(
|
||||
&att.data.beacon_block_root,
|
||||
att.data.target.epoch,
|
||||
&state,
|
||||
)
|
||||
})
|
||||
};
|
||||
|
||||
let mut block = SignedBeaconBlock {
|
||||
message: BeaconBlock {
|
||||
slot: state.slot,
|
||||
@ -1763,7 +1857,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
attester_slashings: attester_slashings.into(),
|
||||
attestations: self
|
||||
.op_pool
|
||||
.get_attestations(&state, &self.spec)
|
||||
.get_attestations(&state, attestation_filter, &self.spec)
|
||||
.map_err(BlockProductionError::OpPoolError)?
|
||||
.into(),
|
||||
deposits,
|
||||
@ -1821,6 +1915,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let beacon_block_root = self.fork_choice.find_head(&self)?;
|
||||
|
||||
let current_head = self.head_info()?;
|
||||
let old_finalized_root = current_head.finalized_checkpoint.root;
|
||||
|
||||
if beacon_block_root == current_head.block_root {
|
||||
return Ok(());
|
||||
@ -1948,7 +2043,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
});
|
||||
|
||||
if new_finalized_epoch != old_finalized_epoch {
|
||||
self.after_finalization(old_finalized_epoch, finalized_root)?;
|
||||
self.after_finalization(
|
||||
old_finalized_epoch,
|
||||
finalized_root,
|
||||
old_finalized_root.into(),
|
||||
)?;
|
||||
}
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconHeadChanged {
|
||||
@ -1977,6 +2076,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&self,
|
||||
old_finalized_epoch: Epoch,
|
||||
finalized_block_root: Hash256,
|
||||
old_finalized_root: SignedBeaconBlockHash,
|
||||
) -> Result<(), Error> {
|
||||
let finalized_block = self
|
||||
.store
|
||||
@ -2016,10 +2116,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
// TODO: configurable max finality distance
|
||||
let max_finality_distance = 0;
|
||||
self.store_migrator.freeze_to_state(
|
||||
self.store_migrator.process_finalization(
|
||||
finalized_block.state_root,
|
||||
finalized_state,
|
||||
max_finality_distance,
|
||||
Arc::clone(&self.head_tracker),
|
||||
old_finalized_root,
|
||||
finalized_block_root.into(),
|
||||
);
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconFinalization {
|
||||
@ -2103,6 +2206,100 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.slot_clock
|
||||
.duration_to_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
||||
}
|
||||
|
||||
pub fn dump_as_dot<W: Write>(&self, output: &mut W) {
|
||||
let canonical_head_hash = self
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
|
||||
.unwrap()
|
||||
.beacon_block_root;
|
||||
let mut visited: HashSet<Hash256> = HashSet::new();
|
||||
let mut finalized_blocks: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
let genesis_block_hash = Hash256::zero();
|
||||
write!(output, "digraph beacon {{\n").unwrap();
|
||||
write!(output, "\t_{:?}[label=\"genesis\"];\n", genesis_block_hash).unwrap();
|
||||
|
||||
// Canonical head needs to be processed first as otherwise finalized blocks aren't detected
|
||||
// properly.
|
||||
let heads = {
|
||||
let mut heads = self.heads();
|
||||
let canonical_head_index = heads
|
||||
.iter()
|
||||
.position(|(block_hash, _)| *block_hash == canonical_head_hash)
|
||||
.unwrap();
|
||||
let (canonical_head_hash, canonical_head_slot) =
|
||||
heads.swap_remove(canonical_head_index);
|
||||
heads.insert(0, (canonical_head_hash, canonical_head_slot));
|
||||
heads
|
||||
};
|
||||
|
||||
for (head_hash, _head_slot) in heads {
|
||||
for (block_hash, signed_beacon_block) in
|
||||
ParentRootBlockIterator::new(&*self.store, head_hash)
|
||||
{
|
||||
if visited.contains(&block_hash) {
|
||||
break;
|
||||
}
|
||||
visited.insert(block_hash);
|
||||
|
||||
if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 {
|
||||
let block = self.get_block(&block_hash).unwrap().unwrap();
|
||||
let state = self
|
||||
.get_state(&block.state_root(), Some(block.slot()))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
finalized_blocks.insert(state.finalized_checkpoint.root);
|
||||
}
|
||||
|
||||
if block_hash == canonical_head_hash {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box3d];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else if finalized_blocks.contains(&block_hash) {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=Msquare];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?} -> _{:?};\n",
|
||||
block_hash,
|
||||
signed_beacon_block.parent_root()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
write!(output, "}}\n").unwrap();
|
||||
}
|
||||
|
||||
// Used for debugging
|
||||
#[allow(dead_code)]
|
||||
pub fn dump_dot_file(&self, file_name: &str) {
|
||||
let mut file = std::fs::File::create(file_name).unwrap();
|
||||
self.dump_as_dot(&mut file);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> Drop for BeaconChain<T> {
|
||||
|
@ -5,6 +5,7 @@ use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||
use crate::events::NullEventHandler;
|
||||
use crate::fork_choice::SszForkChoice;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::migrate::Migrate;
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
|
||||
@ -47,7 +48,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
for Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -97,7 +98,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -442,7 +443,7 @@ where
|
||||
event_handler: self
|
||||
.event_handler
|
||||
.ok_or_else(|| "Cannot build without an event handler".to_string())?,
|
||||
head_tracker: self.head_tracker.unwrap_or_default(),
|
||||
head_tracker: Arc::new(self.head_tracker.unwrap_or_default()),
|
||||
snapshot_cache: TimeoutRwLock::new(SnapshotCache::new(
|
||||
DEFAULT_SNAPSHOT_CACHE_SIZE,
|
||||
canonical_head,
|
||||
@ -475,7 +476,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -545,7 +546,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -583,7 +584,7 @@ impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -622,7 +623,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -654,12 +655,12 @@ fn genesis_block<T: EthSpec>(
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::migrate::{MemoryStore, NullMigrator};
|
||||
use eth2_hashing::hash;
|
||||
use genesis::{generate_deterministic_keypairs, interop_genesis_state};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use ssz::Encode;
|
||||
use std::time::Duration;
|
||||
use store::{migrate::NullMigrator, MemoryStore};
|
||||
use tempfile::tempdir;
|
||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||
|
||||
|
@ -44,6 +44,14 @@ pub enum Error {
|
||||
///
|
||||
/// The eth1 caches are stale, or a junk value was voted into the chain.
|
||||
UnknownPreviousEth1BlockHash,
|
||||
/// An arithmetic error occurred.
|
||||
ArithError(safe_arith::ArithError),
|
||||
}
|
||||
|
||||
impl From<safe_arith::ArithError> for Error {
|
||||
fn from(e: safe_arith::ArithError) -> Self {
|
||||
Self::ArithError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Encode, Decode, Clone)]
|
||||
@ -371,7 +379,7 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
|
||||
_spec: &ChainSpec,
|
||||
) -> Result<Vec<Deposit>, Error> {
|
||||
let deposit_index = state.eth1_deposit_index;
|
||||
let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote) {
|
||||
let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? {
|
||||
new_eth1_data.deposit_count
|
||||
} else {
|
||||
state.eth1_data.deposit_count
|
||||
|
@ -25,11 +25,22 @@ impl HeadTracker {
|
||||
/// the upstream user.
|
||||
pub fn register_block<E: EthSpec>(&self, block_root: Hash256, block: &BeaconBlock<E>) {
|
||||
let mut map = self.0.write();
|
||||
|
||||
map.remove(&block.parent_root);
|
||||
map.insert(block_root, block.slot);
|
||||
}
|
||||
|
||||
/// Removes abandoned head.
|
||||
pub fn remove_head(&self, block_root: Hash256) {
|
||||
let mut map = self.0.write();
|
||||
debug_assert!(map.contains_key(&block_root));
|
||||
map.remove(&block_root);
|
||||
}
|
||||
|
||||
/// Returns true iff `block_root` is a recognized head.
|
||||
pub fn contains_head(&self, block_root: Hash256) -> bool {
|
||||
self.0.read().contains_key(&block_root)
|
||||
}
|
||||
|
||||
/// Returns the list of heads in the chain.
|
||||
pub fn heads(&self) -> Vec<(Hash256, Slot)> {
|
||||
self.0
|
||||
|
@ -12,6 +12,7 @@ pub mod events;
|
||||
mod fork_choice;
|
||||
mod head_tracker;
|
||||
mod metrics;
|
||||
pub mod migrate;
|
||||
mod naive_aggregation_pool;
|
||||
mod persisted_beacon_chain;
|
||||
mod shuffling_cache;
|
||||
|
349
beacon_node/beacon_chain/src/migrate.rs
Normal file
349
beacon_node/beacon_chain/src/migrate.rs
Normal file
@ -0,0 +1,349 @@
|
||||
use crate::errors::BeaconChainError;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, warn, Logger};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::iter::FromIterator;
|
||||
use std::mem;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use store::iter::{ParentRootBlockIterator, RootsIterator};
|
||||
use store::{hot_cold_store::HotColdDBError, Error, SimpleDiskStore, Store};
|
||||
pub use store::{DiskStore, MemoryStore};
|
||||
use types::*;
|
||||
use types::{BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Trait for migration processes that update the database upon finalization.
|
||||
pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
||||
fn new(db: Arc<S>, log: Logger) -> Self;
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
_state_root: Hash256,
|
||||
_new_finalized_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
_head_tracker: Arc<HeadTracker>,
|
||||
_old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
_new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
}
|
||||
|
||||
/// Traverses live heads and prunes blocks and states of chains that we know can't be built
|
||||
/// upon because finalization would prohibit it. This is a optimisation intended to save disk
|
||||
/// space.
|
||||
///
|
||||
/// Assumptions:
|
||||
/// * It is called after every finalization.
|
||||
fn prune_abandoned_forks(
|
||||
store: Arc<S>,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_slot: Slot,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
let old_finalized_slot = store
|
||||
.get_block(&old_finalized_block_hash.into())?
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))?
|
||||
.slot();
|
||||
|
||||
// Collect hashes from new_finalized_block back to old_finalized_block (inclusive)
|
||||
let mut found_block = false; // hack for `take_until`
|
||||
let newly_finalized_blocks: HashMap<SignedBeaconBlockHash, Slot> = HashMap::from_iter(
|
||||
ParentRootBlockIterator::new(&*store, new_finalized_block_hash.into())
|
||||
.take_while(|(block_hash, _)| {
|
||||
if found_block {
|
||||
false
|
||||
} else {
|
||||
found_block |= *block_hash == old_finalized_block_hash.into();
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|(block_hash, block)| (block_hash.into(), block.slot())),
|
||||
);
|
||||
|
||||
// We don't know which blocks are shared among abandoned chains, so we buffer and delete
|
||||
// everything in one fell swoop.
|
||||
let mut abandoned_blocks: HashSet<SignedBeaconBlockHash> = HashSet::new();
|
||||
let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new();
|
||||
let mut abandoned_heads: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
for (head_hash, head_slot) in head_tracker.heads() {
|
||||
let mut potentially_abandoned_head: Option<Hash256> = Some(head_hash);
|
||||
let mut potentially_abandoned_blocks: Vec<(
|
||||
Slot,
|
||||
Option<SignedBeaconBlockHash>,
|
||||
Option<BeaconStateHash>,
|
||||
)> = Vec::new();
|
||||
|
||||
let head_state_hash = store
|
||||
.get_block(&head_hash)?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))?
|
||||
.state_root();
|
||||
|
||||
let iterator = std::iter::once((head_hash, head_state_hash, head_slot))
|
||||
.chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?);
|
||||
for (block_hash, state_hash, slot) in iterator {
|
||||
if slot < old_finalized_slot {
|
||||
// We must assume here any candidate chains include old_finalized_block_hash,
|
||||
// i.e. there aren't any forks starting at a block that is a strict ancestor of
|
||||
// old_finalized_block_hash.
|
||||
break;
|
||||
}
|
||||
match newly_finalized_blocks.get(&block_hash.into()).copied() {
|
||||
// Block is not finalized, mark it and its state for deletion
|
||||
None => {
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
Some(block_hash.into()),
|
||||
Some(state_hash.into()),
|
||||
));
|
||||
}
|
||||
Some(finalized_slot) => {
|
||||
// Block root is finalized, and we have reached the slot it was finalized
|
||||
// at: we've hit a shared part of the chain.
|
||||
if finalized_slot == slot {
|
||||
// The first finalized block of a candidate chain lies after (in terms
|
||||
// of slots order) the newly finalized block. It's not a candidate for
|
||||
// prunning.
|
||||
if finalized_slot == new_finalized_slot {
|
||||
potentially_abandoned_blocks.clear();
|
||||
potentially_abandoned_head.take();
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
// Block root is finalized, but we're at a skip slot: delete the state only.
|
||||
else {
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
None,
|
||||
Some(state_hash.into()),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
abandoned_heads.extend(potentially_abandoned_head.into_iter());
|
||||
if !potentially_abandoned_blocks.is_empty() {
|
||||
abandoned_blocks.extend(
|
||||
potentially_abandoned_blocks
|
||||
.iter()
|
||||
.filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash),
|
||||
);
|
||||
abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map(
|
||||
|(slot, _, maybe_state_hash)| match maybe_state_hash {
|
||||
None => None,
|
||||
Some(state_hash) => Some((*slot, *state_hash)),
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// XXX Should be performed atomically, see
|
||||
// https://github.com/sigp/lighthouse/issues/692
|
||||
for block_hash in abandoned_blocks.into_iter() {
|
||||
store.delete_block(&block_hash.into())?;
|
||||
}
|
||||
for (slot, state_hash) in abandoned_states.into_iter() {
|
||||
store.delete_state(&state_hash.into(), slot)?;
|
||||
}
|
||||
for head_hash in abandoned_heads.into_iter() {
|
||||
head_tracker.remove_head(head_hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that does nothing, for stores that don't need migration.
|
||||
pub struct NullMigrator;
|
||||
|
||||
impl<E: EthSpec> Migrate<SimpleDiskStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<SimpleDiskStore<E>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<MemoryStore<E>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||
///
|
||||
/// Mostly useful for tests.
|
||||
pub struct BlockingMigrator<S> {
|
||||
db: Arc<S>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
||||
fn new(db: Arc<S>, _: Logger) -> Self {
|
||||
BlockingMigrator { db }
|
||||
}
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
state_root: Hash256,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
if let Err(e) = S::process_finalization(self.db.clone(), state_root, &new_finalized_state) {
|
||||
// This migrator is only used for testing, so we just log to stderr without a logger.
|
||||
eprintln!("Migration error: {:?}", e);
|
||||
}
|
||||
|
||||
if let Err(e) = Self::prune_abandoned_forks(
|
||||
self.db.clone(),
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_state.slot,
|
||||
) {
|
||||
eprintln!("Pruning error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MpscSender<E> = mpsc::Sender<(
|
||||
Hash256,
|
||||
BeaconState<E>,
|
||||
Arc<HeadTracker>,
|
||||
SignedBeaconBlockHash,
|
||||
SignedBeaconBlockHash,
|
||||
Slot,
|
||||
)>;
|
||||
|
||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec> {
|
||||
db: Arc<DiskStore<E>>,
|
||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
||||
fn new(db: Arc<DiskStore<E>>, log: Logger) -> Self {
|
||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
|
||||
Self { db, tx_thread, log }
|
||||
}
|
||||
|
||||
/// Perform the freezing operation on the database,
|
||||
fn process_finalization(
|
||||
&self,
|
||||
finalized_state_root: Hash256,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
max_finality_distance: u64,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
if !self.needs_migration(new_finalized_state.slot, max_finality_distance) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
|
||||
|
||||
let new_finalized_slot = new_finalized_state.slot;
|
||||
if let Err(tx_err) = tx.send((
|
||||
finalized_state_root,
|
||||
new_finalized_state,
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
)) {
|
||||
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone());
|
||||
|
||||
drop(mem::replace(tx, new_tx));
|
||||
let old_thread = mem::replace(thread, new_thread);
|
||||
|
||||
// Join the old thread, which will probably have panicked, or may have
|
||||
// halted normally just now as a result of us dropping the old `mpsc::Sender`.
|
||||
if let Err(thread_err) = old_thread.join() {
|
||||
warn!(
|
||||
self.log,
|
||||
"Migration thread died, so it was restarted";
|
||||
"reason" => format!("{:?}", thread_err)
|
||||
);
|
||||
}
|
||||
|
||||
// Retry at most once, we could recurse but that would risk overflowing the stack.
|
||||
let _ = tx.send(tx_err.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BackgroundMigrator<E> {
|
||||
/// Return true if a migration needs to be performed, given a new `finalized_slot`.
|
||||
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
|
||||
let finality_distance = finalized_slot - self.db.get_split_slot();
|
||||
finality_distance > max_finality_distance
|
||||
}
|
||||
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<DiskStore<E>>,
|
||||
log: Logger,
|
||||
) -> (
|
||||
mpsc::Sender<(
|
||||
Hash256,
|
||||
BeaconState<E>,
|
||||
Arc<HeadTracker>,
|
||||
SignedBeaconBlockHash,
|
||||
SignedBeaconBlockHash,
|
||||
Slot,
|
||||
)>,
|
||||
thread::JoinHandle<()>,
|
||||
) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok((
|
||||
state_root,
|
||||
state,
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
)) = rx.recv()
|
||||
{
|
||||
match DiskStore::process_finalization(db.clone(), state_root, &state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
match Self::prune_abandoned_forks(
|
||||
db.clone(),
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
) {
|
||||
Ok(()) => {}
|
||||
Err(e) => warn!(log, "Block pruning failed: {:?}", e),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(tx, thread)
|
||||
}
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
pub use crate::beacon_chain::{
|
||||
BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY,
|
||||
};
|
||||
use crate::migrate::{BlockingMigrator, Migrate, NullMigrator};
|
||||
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
@ -14,16 +15,16 @@ use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use slot_clock::TestingSlotClock;
|
||||
use state_processing::per_slot_processing;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{
|
||||
migrate::{BlockingMigrator, NullMigrator},
|
||||
DiskStore, MemoryStore, Migrate, Store,
|
||||
};
|
||||
use store::{DiskStore, MemoryStore, Store};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
AggregateSignature, Attestation, BeaconState, ChainSpec, Domain, EthSpec, Hash256, Keypair,
|
||||
SecretKey, Signature, SignedBeaconBlock, SignedRoot, Slot,
|
||||
AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, EthSpec,
|
||||
Hash256, Keypair, SecretKey, Signature, SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot,
|
||||
Slot,
|
||||
};
|
||||
|
||||
pub use types::test_utils::generate_deterministic_keypairs;
|
||||
@ -135,7 +136,10 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.logger(log.clone())
|
||||
.custom_spec(spec.clone())
|
||||
.store(store.clone())
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(store))
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
||||
store,
|
||||
log.clone(),
|
||||
))
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.genesis_state(
|
||||
interop_genesis_state::<E>(&keypairs, HARNESS_GENESIS_TIME, &spec)
|
||||
@ -175,7 +179,10 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.logger(log.clone())
|
||||
.custom_spec(spec)
|
||||
.store(store.clone())
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(store))
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
||||
store,
|
||||
log.clone(),
|
||||
))
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.resume_from_db()
|
||||
.expect("should resume beacon chain from db")
|
||||
@ -272,6 +279,123 @@ where
|
||||
head_block_root.expect("did not produce any blocks")
|
||||
}
|
||||
|
||||
/// Returns current canonical head slot
|
||||
pub fn get_chain_slot(&self) -> Slot {
|
||||
self.chain.slot().unwrap()
|
||||
}
|
||||
|
||||
/// Returns current canonical head state
|
||||
pub fn get_head_state(&self) -> BeaconState<E> {
|
||||
self.chain.head().unwrap().beacon_state
|
||||
}
|
||||
|
||||
/// Adds a single block (synchronously) onto either the canonical chain (block_strategy ==
|
||||
/// OnCanonicalHead) or a fork (block_strategy == ForkCanonicalChainAt).
|
||||
pub fn add_block(
|
||||
&self,
|
||||
state: &BeaconState<E>,
|
||||
block_strategy: BlockStrategy,
|
||||
slot: Slot,
|
||||
validators: &[usize],
|
||||
) -> (SignedBeaconBlockHash, BeaconState<E>) {
|
||||
while self.chain.slot().expect("should have a slot") < slot {
|
||||
self.advance_slot();
|
||||
}
|
||||
|
||||
let (block, new_state) = self.build_block(state.clone(), slot, block_strategy);
|
||||
|
||||
let block_root = self
|
||||
.chain
|
||||
.process_block(block)
|
||||
.expect("should not error during block processing");
|
||||
|
||||
self.chain.fork_choice().expect("should find head");
|
||||
|
||||
let attestation_strategy = AttestationStrategy::SomeValidators(validators.to_vec());
|
||||
self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot);
|
||||
(block_root.into(), new_state)
|
||||
}
|
||||
|
||||
/// `add_block()` repeated `num_blocks` times.
|
||||
pub fn add_blocks(
|
||||
&self,
|
||||
mut state: BeaconState<E>,
|
||||
mut slot: Slot,
|
||||
num_blocks: usize,
|
||||
attesting_validators: &[usize],
|
||||
block_strategy: BlockStrategy,
|
||||
) -> (
|
||||
HashMap<Slot, SignedBeaconBlockHash>,
|
||||
HashMap<Slot, BeaconStateHash>,
|
||||
Slot,
|
||||
SignedBeaconBlockHash,
|
||||
BeaconState<E>,
|
||||
) {
|
||||
let mut blocks: HashMap<Slot, SignedBeaconBlockHash> = HashMap::with_capacity(num_blocks);
|
||||
let mut states: HashMap<Slot, BeaconStateHash> = HashMap::with_capacity(num_blocks);
|
||||
for _ in 0..num_blocks {
|
||||
let (new_root_hash, new_state) =
|
||||
self.add_block(&state, block_strategy, slot, attesting_validators);
|
||||
blocks.insert(slot, new_root_hash);
|
||||
states.insert(slot, new_state.tree_hash_root().into());
|
||||
state = new_state;
|
||||
slot += 1;
|
||||
}
|
||||
let head_hash = blocks[&(slot - 1)];
|
||||
(blocks, states, slot, head_hash, state)
|
||||
}
|
||||
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_canonical_chain_blocks(
|
||||
&self,
|
||||
state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
num_blocks: usize,
|
||||
attesting_validators: &[usize],
|
||||
) -> (
|
||||
HashMap<Slot, SignedBeaconBlockHash>,
|
||||
HashMap<Slot, BeaconStateHash>,
|
||||
Slot,
|
||||
SignedBeaconBlockHash,
|
||||
BeaconState<E>,
|
||||
) {
|
||||
let block_strategy = BlockStrategy::OnCanonicalHead;
|
||||
self.add_blocks(
|
||||
state,
|
||||
slot,
|
||||
num_blocks,
|
||||
attesting_validators,
|
||||
block_strategy,
|
||||
)
|
||||
}
|
||||
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_stray_blocks(
|
||||
&self,
|
||||
state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
num_blocks: usize,
|
||||
attesting_validators: &[usize],
|
||||
) -> (
|
||||
HashMap<Slot, SignedBeaconBlockHash>,
|
||||
HashMap<Slot, BeaconStateHash>,
|
||||
Slot,
|
||||
SignedBeaconBlockHash,
|
||||
BeaconState<E>,
|
||||
) {
|
||||
let block_strategy = BlockStrategy::ForkCanonicalChainAt {
|
||||
previous_slot: slot,
|
||||
first_slot: slot + 2,
|
||||
};
|
||||
self.add_blocks(
|
||||
state,
|
||||
slot + 2,
|
||||
num_blocks,
|
||||
attesting_validators,
|
||||
block_strategy,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a newly created block, signed by the proposer for the given slot.
|
||||
fn build_block(
|
||||
&self,
|
||||
@ -347,7 +471,9 @@ where
|
||||
.process_attestation(attestation, AttestationType::Aggregated)
|
||||
.expect("should not error during attestation processing")
|
||||
{
|
||||
AttestationProcessingOutcome::Processed => (),
|
||||
// PastEpoch can occur if we fork over several epochs
|
||||
AttestationProcessingOutcome::Processed
|
||||
| AttestationProcessingOutcome::PastEpoch { .. } => (),
|
||||
other => panic!("did not successfully process attestation: {:?}", other),
|
||||
}
|
||||
});
|
||||
|
@ -40,8 +40,8 @@ fn produces_attestations() {
|
||||
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
assert_eq!(state.slot, num_blocks_produced, "head should have updated");
|
||||
assert!(
|
||||
state.finalized_checkpoint.epoch > 0,
|
||||
assert_ne!(
|
||||
state.finalized_checkpoint.epoch, 0,
|
||||
"head should have updated"
|
||||
);
|
||||
|
||||
|
@ -6,9 +6,12 @@ extern crate lazy_static;
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||
};
|
||||
use beacon_chain::{AttestationProcessingOutcome, AttestationType};
|
||||
use beacon_chain::BeaconSnapshot;
|
||||
use beacon_chain::{AttestationProcessingOutcome, AttestationType, StateSkipConfig};
|
||||
use rand::Rng;
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use store::{
|
||||
iter::{BlockRootsIterator, StateRootsIterator},
|
||||
@ -20,11 +23,12 @@ use types::test_utils::{SeedableRng, XorShiftRng};
|
||||
use types::*;
|
||||
|
||||
// Should ideally be divisible by 3.
|
||||
pub const VALIDATOR_COUNT: usize = 24;
|
||||
pub const LOW_VALIDATOR_COUNT: usize = 24;
|
||||
pub const HIGH_VALIDATOR_COUNT: usize = 64;
|
||||
|
||||
lazy_static! {
|
||||
/// A cached set of keys.
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT);
|
||||
}
|
||||
|
||||
type E = MinimalEthSpec;
|
||||
@ -57,7 +61,7 @@ fn full_participation_no_skips() {
|
||||
let num_blocks_produced = E::slots_per_epoch() * 5;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
harness.extend_chain(
|
||||
num_blocks_produced as usize,
|
||||
@ -77,7 +81,7 @@ fn randomised_skips() {
|
||||
let mut num_blocks_produced = 0;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
||||
|
||||
let mut head_slot = 0;
|
||||
@ -113,14 +117,16 @@ fn randomised_skips() {
|
||||
fn long_skip() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
// Number of blocks to create in the first run, intentionally not falling on an epoch
|
||||
// boundary in order to check that the DB hot -> cold migration is capable of reaching
|
||||
// back across the skip distance, and correctly migrating those extra non-finalized states.
|
||||
let initial_blocks = E::slots_per_epoch() * 5 + E::slots_per_epoch() / 2;
|
||||
let skip_slots = E::slots_per_historical_root() as u64 * 8;
|
||||
let final_blocks = E::slots_per_epoch() * 4;
|
||||
// Create the minimum ~2.5 epochs of extra blocks required to re-finalize the chain.
|
||||
// Having this set lower ensures that we start justifying and finalizing quickly after a skip.
|
||||
let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2;
|
||||
|
||||
harness.extend_chain(
|
||||
initial_blocks as usize,
|
||||
@ -223,7 +229,7 @@ fn split_slot_restore() {
|
||||
|
||||
let split_slot = {
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
let num_blocks = 4 * E::slots_per_epoch();
|
||||
|
||||
@ -251,10 +257,10 @@ fn epoch_boundary_state_attestation_processing() {
|
||||
let num_blocks_produced = E::slots_per_epoch() * 5;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
let late_validators = vec![0, 1];
|
||||
let timely_validators = (2..VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||
let timely_validators = (2..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||
|
||||
let mut late_attestations = vec![];
|
||||
|
||||
@ -333,7 +339,7 @@ fn epoch_boundary_state_attestation_processing() {
|
||||
fn delete_blocks_and_states() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
let unforked_blocks = 4 * E::slots_per_epoch();
|
||||
|
||||
@ -345,13 +351,11 @@ fn delete_blocks_and_states() {
|
||||
);
|
||||
|
||||
// Create a fork post-finalization.
|
||||
let two_thirds = (VALIDATOR_COUNT / 3) * 2;
|
||||
let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2;
|
||||
let honest_validators: Vec<usize> = (0..two_thirds).collect();
|
||||
let faulty_validators: Vec<usize> = (two_thirds..VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (two_thirds..LOW_VALIDATOR_COUNT).collect();
|
||||
|
||||
// NOTE: should remove this -1 and/or write a similar test once #845 is resolved
|
||||
// https://github.com/sigp/lighthouse/issues/845
|
||||
let fork_blocks = 2 * E::slots_per_epoch() - 1;
|
||||
let fork_blocks = 2 * E::slots_per_epoch();
|
||||
|
||||
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
|
||||
&honest_validators,
|
||||
@ -425,6 +429,825 @@ fn delete_blocks_and_states() {
|
||||
check_chain_dump(&harness, unforked_blocks + fork_blocks + 1);
|
||||
}
|
||||
|
||||
// Check that we never produce invalid blocks when there is deep forking that changes the shuffling.
|
||||
// See https://github.com/sigp/lighthouse/issues/845
|
||||
fn multi_epoch_fork_valid_blocks_test(
|
||||
initial_blocks: usize,
|
||||
num_fork1_blocks: usize,
|
||||
num_fork2_blocks: usize,
|
||||
num_fork1_validators: usize,
|
||||
) -> (TempDir, TestHarness, Hash256, Hash256) {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
// Create the initial portion of the chain
|
||||
if initial_blocks > 0 {
|
||||
harness.extend_chain(
|
||||
initial_blocks,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
}
|
||||
|
||||
assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT);
|
||||
let fork1_validators: Vec<usize> = (0..num_fork1_validators).collect();
|
||||
let fork2_validators: Vec<usize> = (num_fork1_validators..LOW_VALIDATOR_COUNT).collect();
|
||||
|
||||
let (head1, head2) = harness.generate_two_forks_by_skipping_a_block(
|
||||
&fork1_validators,
|
||||
&fork2_validators,
|
||||
num_fork1_blocks,
|
||||
num_fork2_blocks,
|
||||
);
|
||||
|
||||
(db_path, harness, head1, head2)
|
||||
}
|
||||
|
||||
// This is the minimal test of block production with different shufflings.
|
||||
#[test]
|
||||
fn block_production_different_shuffling_early() {
|
||||
let slots_per_epoch = E::slots_per_epoch() as usize;
|
||||
multi_epoch_fork_valid_blocks_test(
|
||||
slots_per_epoch - 2,
|
||||
slots_per_epoch + 3,
|
||||
slots_per_epoch + 3,
|
||||
LOW_VALIDATOR_COUNT / 2,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn block_production_different_shuffling_long() {
|
||||
let slots_per_epoch = E::slots_per_epoch() as usize;
|
||||
multi_epoch_fork_valid_blocks_test(
|
||||
2 * slots_per_epoch - 2,
|
||||
3 * slots_per_epoch,
|
||||
3 * slots_per_epoch,
|
||||
LOW_VALIDATOR_COUNT / 2,
|
||||
);
|
||||
}
|
||||
|
||||
// Check that the op pool safely includes multiple attestations per block when necessary.
|
||||
// This checks the correctness of the shuffling compatibility memoization.
|
||||
#[test]
|
||||
fn multiple_attestations_per_block() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store, HIGH_VALIDATOR_COUNT);
|
||||
let chain = &harness.chain;
|
||||
|
||||
harness.extend_chain(
|
||||
MainnetEthSpec::slots_per_epoch() as usize * 3,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
let head = chain.head().unwrap();
|
||||
let committees_per_slot = head
|
||||
.beacon_state
|
||||
.get_committee_count_at_slot(head.beacon_state.slot)
|
||||
.unwrap();
|
||||
assert!(committees_per_slot > 1);
|
||||
|
||||
for snapshot in chain.chain_dump().unwrap() {
|
||||
assert_eq!(
|
||||
snapshot.beacon_block.message.body.attestations.len() as u64,
|
||||
if snapshot.beacon_block.slot() <= 1 {
|
||||
0
|
||||
} else {
|
||||
committees_per_slot
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shuffling_compatible_linear_chain() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
// Skip the block at the end of the first epoch.
|
||||
let head_block_root = harness.extend_chain(
|
||||
4 * E::slots_per_epoch() as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
check_shuffling_compatible(
|
||||
&harness,
|
||||
&get_state_for_block(&harness, head_block_root),
|
||||
head_block_root,
|
||||
true,
|
||||
true,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shuffling_compatible_missing_pivot_block() {
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||
|
||||
// Skip the block at the end of the first epoch.
|
||||
harness.extend_chain(
|
||||
E::slots_per_epoch() as usize - 2,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
harness.advance_slot();
|
||||
harness.advance_slot();
|
||||
let head_block_root = harness.extend_chain(
|
||||
2 * E::slots_per_epoch() as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
);
|
||||
|
||||
check_shuffling_compatible(
|
||||
&harness,
|
||||
&get_state_for_block(&harness, head_block_root),
|
||||
head_block_root,
|
||||
true,
|
||||
true,
|
||||
Some(E::slots_per_epoch() - 2),
|
||||
Some(E::slots_per_epoch() - 2),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shuffling_compatible_simple_fork() {
|
||||
let slots_per_epoch = E::slots_per_epoch() as usize;
|
||||
let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test(
|
||||
2 * slots_per_epoch,
|
||||
3 * slots_per_epoch,
|
||||
3 * slots_per_epoch,
|
||||
LOW_VALIDATOR_COUNT / 2,
|
||||
);
|
||||
|
||||
let head1_state = get_state_for_block(&harness, head1);
|
||||
let head2_state = get_state_for_block(&harness, head2);
|
||||
|
||||
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
||||
check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None);
|
||||
check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None);
|
||||
check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None);
|
||||
|
||||
drop(db_path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shuffling_compatible_short_fork() {
|
||||
let slots_per_epoch = E::slots_per_epoch() as usize;
|
||||
let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test(
|
||||
2 * slots_per_epoch - 2,
|
||||
slots_per_epoch + 2,
|
||||
slots_per_epoch + 2,
|
||||
LOW_VALIDATOR_COUNT / 2,
|
||||
);
|
||||
|
||||
let head1_state = get_state_for_block(&harness, head1);
|
||||
let head2_state = get_state_for_block(&harness, head2);
|
||||
|
||||
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
||||
check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None);
|
||||
// NOTE: don't check this case, as block 14 from the first chain appears valid on the second
|
||||
// chain due to it matching the second chain's block 15.
|
||||
// check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None);
|
||||
check_shuffling_compatible(
|
||||
&harness,
|
||||
&head2_state,
|
||||
head2,
|
||||
true,
|
||||
true,
|
||||
// Required because of the skipped slot.
|
||||
Some(2 * E::slots_per_epoch() - 2),
|
||||
None,
|
||||
);
|
||||
|
||||
drop(db_path);
|
||||
}
|
||||
|
||||
fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconState<E> {
|
||||
let head_block = harness.chain.get_block(&block_root).unwrap().unwrap();
|
||||
harness
|
||||
.chain
|
||||
.get_state(&head_block.state_root(), Some(head_block.slot()))
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Check the invariants that apply to `shuffling_is_compatible`.
|
||||
fn check_shuffling_compatible(
|
||||
harness: &TestHarness,
|
||||
head_state: &BeaconState<E>,
|
||||
head_block_root: Hash256,
|
||||
current_epoch_valid: bool,
|
||||
previous_epoch_valid: bool,
|
||||
current_epoch_cutoff_slot: Option<u64>,
|
||||
previous_epoch_cutoff_slot: Option<u64>,
|
||||
) {
|
||||
let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1;
|
||||
let current_pivot_slot =
|
||||
(head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
||||
let previous_pivot_slot =
|
||||
(head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
||||
|
||||
for (block_root, slot) in harness
|
||||
.chain
|
||||
.rev_iter_block_roots_from(head_block_root)
|
||||
.unwrap()
|
||||
{
|
||||
// Shuffling is compatible targeting the current epoch,
|
||||
// iff slot is greater than or equal to the current epoch pivot block
|
||||
assert_eq!(
|
||||
harness.chain.shuffling_is_compatible(
|
||||
&block_root,
|
||||
head_state.current_epoch(),
|
||||
&head_state
|
||||
),
|
||||
current_epoch_valid
|
||||
&& slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64())
|
||||
);
|
||||
// Similarly for the previous epoch
|
||||
assert_eq!(
|
||||
harness.chain.shuffling_is_compatible(
|
||||
&block_root,
|
||||
head_state.previous_epoch(),
|
||||
&head_state
|
||||
),
|
||||
previous_epoch_valid
|
||||
&& slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64())
|
||||
);
|
||||
// Targeting the next epoch should always return false
|
||||
assert_eq!(
|
||||
harness.chain.shuffling_is_compatible(
|
||||
&block_root,
|
||||
head_state.current_epoch() + 1,
|
||||
&head_state
|
||||
),
|
||||
false
|
||||
);
|
||||
// Targeting two epochs before the current epoch should also always return false
|
||||
if head_state.current_epoch() >= 2 {
|
||||
assert_eq!(
|
||||
harness.chain.shuffling_is_compatible(
|
||||
&block_root,
|
||||
head_state.current_epoch() - 2,
|
||||
&head_state
|
||||
),
|
||||
false
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
||||
#[test]
|
||||
fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_pre_finalization, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks(
|
||||
harness.get_head_state(),
|
||||
slot,
|
||||
slots_per_epoch - 1,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Precondition: Ensure all stray_blocks blocks are still known
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness
|
||||
.chain
|
||||
.get_state(&state_hash.into(), Some(slot))
|
||||
.unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
// Precondition: Only genesis is finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
assert!(harness.chain.knows_head(&stray_head));
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks_post_finalization, _, _, _, _) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 5, &honest_validators);
|
||||
|
||||
// Postcondition: New blocks got finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks_pre_finalization[&Slot::new(slots_per_epoch as u64)],
|
||||
canonical_blocks_post_finalization[&Slot::new((slots_per_epoch * 2) as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_none(),
|
||||
"abandoned block {} should have been pruned",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(!harness.chain.knows_head(&stray_head));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let all_validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Fill up 0th epoch
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 1st epoch
|
||||
let (_, _, canonical_slot, shared_head, canonical_state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, 1, &all_validators);
|
||||
let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks(
|
||||
canonical_state.clone(),
|
||||
canonical_slot,
|
||||
1,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Preconditions
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks, _, _, _, _) = harness.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch * 5,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
// Postconditions
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)],
|
||||
canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
for &block_hash in stray_blocks.values() {
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.get_block(&block_hash.into())
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"stray block {} should have been pruned",
|
||||
block_hash,
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(!harness.chain.knows_head(&stray_head));
|
||||
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Fill up 0th epoch with canonical chain blocks
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 1st epoch. Contains a fork.
|
||||
let (stray_blocks, stray_states, _, stray_head, _) =
|
||||
harness.add_stray_blocks(state.clone(), slot, slots_per_epoch - 1, &faulty_validators);
|
||||
|
||||
// Preconditions
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
// Trigger finalization
|
||||
let (_, _, _, _, _) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 4, &honest_validators);
|
||||
|
||||
// Postconditions
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(harness.chain.knows_head(&stray_head));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prunes_fork_running_past_finalized_checkpoint() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Fill up 0th epoch with canonical chain blocks
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 1st epoch. Contains a fork.
|
||||
let (stray_blocks_first_epoch, stray_states_first_epoch, stray_slot, _, stray_state) =
|
||||
harness.add_stray_blocks(state.clone(), slot, slots_per_epoch, &faulty_validators);
|
||||
|
||||
let (canonical_blocks_first_epoch, _, canonical_slot, _, canonical_state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 2nd epoch. Extends both the canonical chain and the fork.
|
||||
let (stray_blocks_second_epoch, stray_states_second_epoch, _, stray_head, _) = harness
|
||||
.add_stray_blocks(
|
||||
stray_state,
|
||||
stray_slot,
|
||||
slots_per_epoch - 1,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Precondition: Ensure all stray_blocks blocks are still known
|
||||
let stray_blocks: HashMap<Slot, SignedBeaconBlockHash> = stray_blocks_first_epoch
|
||||
.into_iter()
|
||||
.chain(stray_blocks_second_epoch.into_iter())
|
||||
.collect();
|
||||
|
||||
let stray_states: HashMap<Slot, BeaconStateHash> = stray_states_first_epoch
|
||||
.into_iter()
|
||||
.chain(stray_states_second_epoch.into_iter())
|
||||
.collect();
|
||||
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
// Precondition: Only genesis is finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
assert!(harness.chain.knows_head(&stray_head));
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks_second_epoch, _, _, _, _) = harness.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch * 4,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
// Postconditions
|
||||
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
||||
.into_iter()
|
||||
.chain(canonical_blocks_first_epoch.into_iter())
|
||||
.chain(canonical_blocks_second_epoch.into_iter())
|
||||
.collect();
|
||||
|
||||
// Postcondition: New blocks got finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks[&Slot::new(slots_per_epoch as u64)],
|
||||
canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_none(),
|
||||
"abandoned block {} should have been pruned",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(!harness.chain.knows_head(&stray_head));
|
||||
}
|
||||
|
||||
// This is to check if state outside of normal block processing are pruned correctly.
|
||||
#[test]
|
||||
fn prunes_skipped_slots_states() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Arrange skipped slots so as to cross the epoch boundary. That way, we excercise the code
|
||||
// responsible for storing state outside of normal block processing.
|
||||
|
||||
let canonical_slot = harness.get_chain_slot();
|
||||
let canonical_state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, canonical_slot, _, canonical_state) = harness
|
||||
.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch - 1,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
let (stray_blocks, stray_states, stray_slot, _, _) = harness.add_stray_blocks(
|
||||
canonical_state.clone(),
|
||||
canonical_slot,
|
||||
slots_per_epoch,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Preconditions
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
// Make sure slots were skipped
|
||||
let stray_state = harness
|
||||
.chain
|
||||
.state_at_slot(stray_slot, StateSkipConfig::WithoutStateRoots)
|
||||
.unwrap();
|
||||
let block_root = stray_state.get_block_root(canonical_slot - 1);
|
||||
assert_eq!(stray_state.get_block_root(canonical_slot), block_root);
|
||||
assert_eq!(stray_state.get_block_root(canonical_slot + 1), block_root);
|
||||
|
||||
let skipped_slots = vec![canonical_slot, canonical_slot + 1];
|
||||
for &slot in &skipped_slots {
|
||||
assert_eq!(stray_state.get_block_root(slot), block_root);
|
||||
let state_hash = stray_state.get_state_root(slot).unwrap();
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.get_state(&state_hash, Some(slot))
|
||||
.unwrap()
|
||||
.is_some(),
|
||||
"skipped slots state should be still present"
|
||||
);
|
||||
}
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks_post_finalization, _, _, _, _) = harness.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch * 5,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
// Postconditions
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
||||
.into_iter()
|
||||
.chain(canonical_blocks_post_finalization.into_iter())
|
||||
.collect();
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks[&Slot::new(slots_per_epoch as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
for &slot in &skipped_slots {
|
||||
assert_eq!(stray_state.get_block_root(slot), block_root);
|
||||
let state_hash = stray_state.get_state_root(slot).unwrap();
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.get_state(&state_hash, Some(slot))
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"skipped slot states should have been pruned"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that the head state's slot matches `expected_slot`.
|
||||
fn check_slot(harness: &TestHarness, expected_slot: u64) {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
@ -548,3 +1371,19 @@ fn check_iterators(harness: &TestHarness) {
|
||||
Some(Slot::new(0))
|
||||
);
|
||||
}
|
||||
|
||||
fn get_finalized_epoch_boundary_blocks(
|
||||
dump: &[BeaconSnapshot<MinimalEthSpec>],
|
||||
) -> HashSet<SignedBeaconBlockHash> {
|
||||
dump.iter()
|
||||
.cloned()
|
||||
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn get_blocks(dump: &[BeaconSnapshot<MinimalEthSpec>]) -> HashSet<SignedBeaconBlockHash> {
|
||||
dump.iter()
|
||||
.cloned()
|
||||
.map(|checkpoint| checkpoint.beacon_block_root.into())
|
||||
.collect()
|
||||
}
|
||||
|
@ -4,11 +4,9 @@ use crate::Client;
|
||||
use beacon_chain::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
eth1_chain::{CachingEth1Backend, Eth1Chain},
|
||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||
store::{
|
||||
migrate::{BackgroundMigrator, Migrate, NullMigrator},
|
||||
DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig,
|
||||
},
|
||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||
store::{DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig},
|
||||
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
|
||||
};
|
||||
use environment::RuntimeContext;
|
||||
@ -68,7 +66,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -403,7 +401,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -450,7 +448,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -498,7 +496,7 @@ impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: store::Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -540,7 +538,7 @@ impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: store::Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, SimpleDiskStore<TEthSpec>> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -600,10 +598,15 @@ where
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
pub fn background_migrator(mut self) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
.ok_or_else(|| "disk_store requires a log".to_string())?
|
||||
.service_context("freezer_db".into());
|
||||
let store = self.store.clone().ok_or_else(|| {
|
||||
"background_migrator requires the store to be initialized".to_string()
|
||||
})?;
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store));
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store, context.log.clone()));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -621,7 +624,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -727,7 +730,7 @@ impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
|
@ -383,7 +383,8 @@ impl Eth1GenesisService {
|
||||
.map_err(|e| format!("Error whilst processing deposit: {:?}", e))
|
||||
})?;
|
||||
|
||||
process_activations(&mut local_state, spec);
|
||||
process_activations(&mut local_state, spec)
|
||||
.map_err(|e| format!("Error whilst processing activations: {:?}", e))?;
|
||||
let is_valid = is_valid_genesis_state(&local_state, spec);
|
||||
|
||||
trace!(
|
||||
|
@ -10,6 +10,7 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis};
|
||||
pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir};
|
||||
pub use eth2_config::Eth2Config;
|
||||
|
||||
use beacon_chain::migrate::{BackgroundMigrator, DiskStore};
|
||||
use beacon_chain::{
|
||||
builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender,
|
||||
slot_clock::SystemTimeSlotClock,
|
||||
@ -20,7 +21,6 @@ use environment::RuntimeContext;
|
||||
use futures::{Future, IntoFuture};
|
||||
use slog::{info, warn};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use store::{migrate::BackgroundMigrator, DiskStore};
|
||||
use types::EthSpec;
|
||||
|
||||
/// A type-alias to the tighten the definition of a production-intended `Client`.
|
||||
|
@ -204,7 +204,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
||||
}
|
||||
|
||||
/// Advance the split point of the store, moving new finalized states to the freezer.
|
||||
fn freeze_to_state(
|
||||
fn process_finalization(
|
||||
store: Arc<Self>,
|
||||
frozen_head_root: Hash256,
|
||||
frozen_head: &BeaconState<E>,
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::Store;
|
||||
use crate::{Error, Store};
|
||||
use std::borrow::Cow;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
@ -43,12 +43,95 @@ impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, StateRootsIterator<'a, E, U
|
||||
}
|
||||
|
||||
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
||||
inner: RootsIterator<'a, T, U>,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> {
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::new(store, beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::owned(store, beacon_state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner
|
||||
.next()
|
||||
.map(|(_, state_root, slot)| (state_root, slot))
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the
|
||||
/// iterator returns `None` indefinitely.
|
||||
///
|
||||
/// Uses the `block_roots` field of `BeaconState` as the source of block roots and will
|
||||
/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been
|
||||
/// exhausted.
|
||||
///
|
||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||
pub struct BlockRootsIterator<'a, T: EthSpec, U> {
|
||||
inner: RootsIterator<'a, T, U>,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> BlockRootsIterator<'a, T, U> {
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::new(store, beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::owned(store, beacon_state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner
|
||||
.next()
|
||||
.map(|(block_root, _, slot)| (block_root, slot))
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`.
|
||||
pub struct RootsIterator<'a, T: EthSpec, U> {
|
||||
store: Arc<U>,
|
||||
beacon_state: Cow<'a, BeaconState<T>>,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
|
||||
impl<'a, T: EthSpec, U> Clone for RootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: self.store.clone(),
|
||||
@ -58,7 +141,7 @@ impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> {
|
||||
impl<'a, T: EthSpec, U: Store<T>> RootsIterator<'a, T, U> {
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
store,
|
||||
@ -74,10 +157,21 @@ impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> {
|
||||
beacon_state: Cow::Owned(beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_block(store: Arc<U>, block_hash: Hash256) -> Result<Self, Error> {
|
||||
let block = store
|
||||
.get_block(&block_hash)?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?;
|
||||
let state = store
|
||||
.get_state(&block.state_root(), Some(block.slot()))?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconState(block.state_root().into()))?;
|
||||
Ok(Self::owned(store, state))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for RootsIterator<'a, T, U> {
|
||||
/// (block_root, state_root, slot)
|
||||
type Item = (Hash256, Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.slot == 0 || self.slot > self.beacon_state.slot {
|
||||
@ -86,18 +180,22 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
|
||||
|
||||
self.slot -= 1;
|
||||
|
||||
match self.beacon_state.get_state_root(self.slot) {
|
||||
Ok(root) => Some((*root, self.slot)),
|
||||
Err(BeaconStateError::SlotOutOfBounds) => {
|
||||
match (
|
||||
self.beacon_state.get_block_root(self.slot),
|
||||
self.beacon_state.get_state_root(self.slot),
|
||||
) {
|
||||
(Ok(block_root), Ok(state_root)) => Some((*block_root, *state_root, self.slot)),
|
||||
(Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => {
|
||||
// Read a `BeaconState` from the store that has access to prior historical roots.
|
||||
let beacon_state =
|
||||
next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?;
|
||||
|
||||
self.beacon_state = Cow::Owned(beacon_state);
|
||||
|
||||
let root = self.beacon_state.get_state_root(self.slot).ok()?;
|
||||
let block_root = *self.beacon_state.get_block_root(self.slot).ok()?;
|
||||
let state_root = *self.beacon_state.get_state_root(self.slot).ok()?;
|
||||
|
||||
Some((*root, self.slot))
|
||||
Some((block_root, state_root, self.slot))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
@ -165,79 +263,7 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockIterator<'a, T, U> {
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let (root, _slot) = self.roots.next()?;
|
||||
self.roots.store.get_block(&root).ok()?
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the
|
||||
/// iterator returns `None` indefinitely.
|
||||
///
|
||||
/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will
|
||||
/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been
|
||||
/// exhausted.
|
||||
///
|
||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||
pub struct BlockRootsIterator<'a, T: EthSpec, U> {
|
||||
store: Arc<U>,
|
||||
beacon_state: Cow<'a, BeaconState<T>>,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: self.store.clone(),
|
||||
beacon_state: self.beacon_state.clone(),
|
||||
slot: self.slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> BlockRootsIterator<'a, T, U> {
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
store,
|
||||
slot: beacon_state.slot,
|
||||
beacon_state: Cow::Borrowed(beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self {
|
||||
Self {
|
||||
store,
|
||||
slot: beacon_state.slot,
|
||||
beacon_state: Cow::Owned(beacon_state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.slot == 0 || self.slot > self.beacon_state.slot {
|
||||
return None;
|
||||
}
|
||||
|
||||
self.slot -= 1;
|
||||
|
||||
match self.beacon_state.get_block_root(self.slot) {
|
||||
Ok(root) => Some((*root, self.slot)),
|
||||
Err(BeaconStateError::SlotOutOfBounds) => {
|
||||
// Read a `BeaconState` from the store that has access to prior historical roots.
|
||||
let beacon_state =
|
||||
next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?;
|
||||
|
||||
self.beacon_state = Cow::Owned(beacon_state);
|
||||
|
||||
let root = self.beacon_state.get_block_root(self.slot).ok()?;
|
||||
|
||||
Some((*root, self.slot))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
self.roots.inner.store.get_block(&root).ok()?
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ pub mod chunked_vector;
|
||||
pub mod config;
|
||||
mod errors;
|
||||
mod forwards_iter;
|
||||
mod hot_cold_store;
|
||||
pub mod hot_cold_store;
|
||||
mod impls;
|
||||
mod leveldb_store;
|
||||
mod memory_store;
|
||||
@ -24,7 +24,6 @@ mod partial_beacon_state;
|
||||
mod state_batch;
|
||||
|
||||
pub mod iter;
|
||||
pub mod migrate;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -32,7 +31,6 @@ pub use self::config::StoreConfig;
|
||||
pub use self::hot_cold_store::{HotColdDB as DiskStore, HotStateSummary};
|
||||
pub use self::leveldb_store::LevelDB as SimpleDiskStore;
|
||||
pub use self::memory_store::MemoryStore;
|
||||
pub use self::migrate::Migrate;
|
||||
pub use self::partial_beacon_state::PartialBeaconState;
|
||||
pub use errors::Error;
|
||||
pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer;
|
||||
@ -132,7 +130,7 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
||||
}
|
||||
|
||||
/// (Optionally) Move all data before the frozen slot to the freezer database.
|
||||
fn freeze_to_state(
|
||||
fn process_finalization(
|
||||
_store: Arc<Self>,
|
||||
_frozen_head_root: Hash256,
|
||||
_frozen_head: &BeaconState<E>,
|
||||
|
@ -1,153 +0,0 @@
|
||||
use crate::{
|
||||
hot_cold_store::HotColdDBError, DiskStore, Error, MemoryStore, SimpleDiskStore, Store,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, warn};
|
||||
use std::mem;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use types::{BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Trait for migration processes that update the database upon finalization.
|
||||
pub trait Migrate<S, E: EthSpec>: Send + Sync + 'static {
|
||||
fn new(db: Arc<S>) -> Self;
|
||||
|
||||
fn freeze_to_state(
|
||||
&self,
|
||||
_state_root: Hash256,
|
||||
_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that does nothing, for stores that don't need migration.
|
||||
pub struct NullMigrator;
|
||||
|
||||
impl<E: EthSpec> Migrate<SimpleDiskStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<SimpleDiskStore<E>>) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<MemoryStore<E>>) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||
///
|
||||
/// Mostly useful for tests.
|
||||
pub struct BlockingMigrator<S>(Arc<S>);
|
||||
|
||||
impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
||||
fn new(db: Arc<S>) -> Self {
|
||||
BlockingMigrator(db)
|
||||
}
|
||||
|
||||
fn freeze_to_state(
|
||||
&self,
|
||||
state_root: Hash256,
|
||||
state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
) {
|
||||
if let Err(e) = S::freeze_to_state(self.0.clone(), state_root, &state) {
|
||||
// This migrator is only used for testing, so we just log to stderr without a logger.
|
||||
eprintln!("Migration error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MpscSender<E> = mpsc::Sender<(Hash256, BeaconState<E>)>;
|
||||
|
||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec> {
|
||||
db: Arc<DiskStore<E>>,
|
||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
||||
fn new(db: Arc<DiskStore<E>>) -> Self {
|
||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone()));
|
||||
Self { db, tx_thread }
|
||||
}
|
||||
|
||||
/// Perform the freezing operation on the database,
|
||||
fn freeze_to_state(
|
||||
&self,
|
||||
finalized_state_root: Hash256,
|
||||
finalized_state: BeaconState<E>,
|
||||
max_finality_distance: u64,
|
||||
) {
|
||||
if !self.needs_migration(finalized_state.slot, max_finality_distance) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
|
||||
|
||||
if let Err(tx_err) = tx.send((finalized_state_root, finalized_state)) {
|
||||
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone());
|
||||
|
||||
drop(mem::replace(tx, new_tx));
|
||||
let old_thread = mem::replace(thread, new_thread);
|
||||
|
||||
// Join the old thread, which will probably have panicked, or may have
|
||||
// halted normally just now as a result of us dropping the old `mpsc::Sender`.
|
||||
if let Err(thread_err) = old_thread.join() {
|
||||
warn!(
|
||||
self.db.log,
|
||||
"Migration thread died, so it was restarted";
|
||||
"reason" => format!("{:?}", thread_err)
|
||||
);
|
||||
}
|
||||
|
||||
// Retry at most once, we could recurse but that would risk overflowing the stack.
|
||||
let _ = tx.send(tx_err.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BackgroundMigrator<E> {
|
||||
/// Return true if a migration needs to be performed, given a new `finalized_slot`.
|
||||
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
|
||||
let finality_distance = finalized_slot - self.db.get_split_slot();
|
||||
finality_distance > max_finality_distance
|
||||
}
|
||||
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<DiskStore<E>>,
|
||||
) -> (
|
||||
mpsc::Sender<(Hash256, BeaconState<E>)>,
|
||||
thread::JoinHandle<()>,
|
||||
) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok((state_root, state)) = rx.recv() {
|
||||
match DiskStore::freeze_to_state(db.clone(), state_root, &state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
db.log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
db.log,
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(tx, thread)
|
||||
}
|
||||
}
|
@ -98,10 +98,14 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
|
||||
/// Get a list of attestations for inclusion in a block.
|
||||
///
|
||||
/// NOTE: Assumes that all attestations in the operation_pool are valid.
|
||||
/// The `validity_filter` is a closure that provides extra filtering of the attestations
|
||||
/// before an approximately optimal bundle is constructed. We use it to provide access
|
||||
/// to the fork choice data from the `BeaconChain` struct that doesn't logically belong
|
||||
/// in the operation pool.
|
||||
pub fn get_attestations(
|
||||
&self,
|
||||
state: &BeaconState<T>,
|
||||
validity_filter: impl FnMut(&&Attestation<T>) -> bool,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Vec<Attestation<T>>, OpPoolError> {
|
||||
// Attestations for the current fork, which may be from the current or previous epoch.
|
||||
@ -143,6 +147,7 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
)
|
||||
.is_ok()
|
||||
})
|
||||
.filter(validity_filter)
|
||||
.flat_map(|att| AttMaxCover::new(att, state, total_active_balance, spec));
|
||||
|
||||
Ok(maximum_cover(
|
||||
@ -584,7 +589,7 @@ mod release_tests {
|
||||
state.slot -= 1;
|
||||
assert_eq!(
|
||||
op_pool
|
||||
.get_attestations(state, spec)
|
||||
.get_attestations(state, |_| true, spec)
|
||||
.expect("should have attestations")
|
||||
.len(),
|
||||
0
|
||||
@ -594,7 +599,7 @@ mod release_tests {
|
||||
state.slot += spec.min_attestation_inclusion_delay;
|
||||
|
||||
let block_attestations = op_pool
|
||||
.get_attestations(state, spec)
|
||||
.get_attestations(state, |_| true, spec)
|
||||
.expect("Should have block attestations");
|
||||
assert_eq!(block_attestations.len(), committees.len());
|
||||
|
||||
@ -764,7 +769,7 @@ mod release_tests {
|
||||
|
||||
state.slot += spec.min_attestation_inclusion_delay;
|
||||
let best_attestations = op_pool
|
||||
.get_attestations(state, spec)
|
||||
.get_attestations(state, |_| true, spec)
|
||||
.expect("should have best attestations");
|
||||
assert_eq!(best_attestations.len(), max_attestations);
|
||||
|
||||
@ -839,7 +844,7 @@ mod release_tests {
|
||||
|
||||
state.slot += spec.min_attestation_inclusion_delay;
|
||||
let best_attestations = op_pool
|
||||
.get_attestations(state, spec)
|
||||
.get_attestations(state, |_| true, spec)
|
||||
.expect("should have valid best attestations");
|
||||
assert_eq!(best_attestations.len(), max_attestations);
|
||||
|
||||
|
@ -407,4 +407,41 @@ impl ProtoArray {
|
||||
&& (node.finalized_epoch == self.finalized_epoch
|
||||
|| self.finalized_epoch == Epoch::new(0))
|
||||
}
|
||||
|
||||
/// Return a reverse iterator over the nodes which comprise the chain ending at `block_root`.
|
||||
pub fn iter_nodes<'a>(&'a self, block_root: &Hash256) -> Iter<'a> {
|
||||
let next_node_index = self.indices.get(block_root).copied();
|
||||
Iter {
|
||||
next_node_index,
|
||||
proto_array: self,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return a reverse iterator over the block roots of the chain ending at `block_root`.
|
||||
///
|
||||
/// Note that unlike many other iterators, this one WILL NOT yield anything at skipped slots.
|
||||
pub fn iter_block_roots<'a>(
|
||||
&'a self,
|
||||
block_root: &Hash256,
|
||||
) -> impl Iterator<Item = (Hash256, Slot)> + 'a {
|
||||
self.iter_nodes(block_root)
|
||||
.map(|node| (node.root, node.slot))
|
||||
}
|
||||
}
|
||||
|
||||
/// Reverse iterator over one path through a `ProtoArray`.
|
||||
pub struct Iter<'a> {
|
||||
next_node_index: Option<usize>,
|
||||
proto_array: &'a ProtoArray,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for Iter<'a> {
|
||||
type Item = &'a ProtoNode;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let next_node_index = self.next_node_index?;
|
||||
let node = self.proto_array.nodes.get(next_node_index)?;
|
||||
self.next_node_index = node.parent;
|
||||
Some(node)
|
||||
}
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ eth2_ssz = "0.1.2"
|
||||
eth2_ssz_types = { path = "../utils/ssz_types" }
|
||||
merkle_proof = { path = "../utils/merkle_proof" }
|
||||
log = "0.4.8"
|
||||
safe_arith = { path = "../utils/safe_arith" }
|
||||
tree_hash = "0.1.0"
|
||||
tree_hash_derive = "0.2"
|
||||
types = { path = "../types" }
|
||||
|
@ -1,6 +1,7 @@
|
||||
use eth2_hashing::hash;
|
||||
use int_to_bytes::int_to_bytes32;
|
||||
use merkle_proof::{MerkleTree, MerkleTreeError};
|
||||
use safe_arith::SafeArith;
|
||||
use types::Hash256;
|
||||
|
||||
/// Emulates the eth1 deposit contract merkle tree.
|
||||
@ -46,7 +47,7 @@ impl DepositDataTree {
|
||||
/// Add a deposit to the merkle tree.
|
||||
pub fn push_leaf(&mut self, leaf: Hash256) -> Result<(), MerkleTreeError> {
|
||||
self.tree.push_leaf(leaf, self.depth)?;
|
||||
self.mix_in_length += 1;
|
||||
self.mix_in_length.increment()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use integer_sqrt::IntegerSquareRoot;
|
||||
use safe_arith::SafeArith;
|
||||
use types::*;
|
||||
|
||||
/// Returns the base reward for some validator.
|
||||
@ -14,10 +15,10 @@ pub fn get_base_reward<T: EthSpec>(
|
||||
if total_active_balance == 0 {
|
||||
Ok(0)
|
||||
} else {
|
||||
Ok(
|
||||
state.get_effective_balance(index, spec)? * spec.base_reward_factor
|
||||
/ total_active_balance.integer_sqrt()
|
||||
/ spec.base_rewards_per_epoch,
|
||||
)
|
||||
Ok(state
|
||||
.get_effective_balance(index, spec)?
|
||||
.safe_mul(spec.base_reward_factor)?
|
||||
.safe_div(total_active_balance.integer_sqrt())?
|
||||
.safe_div(spec.base_rewards_per_epoch)?)
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::common::initiate_validator_exit;
|
||||
use safe_arith::SafeArith;
|
||||
use std::cmp;
|
||||
use types::{BeaconStateError as Error, *};
|
||||
|
||||
@ -27,18 +28,21 @@ pub fn slash_validator<T: EthSpec>(
|
||||
let validator_effective_balance = state.get_effective_balance(slashed_index, spec)?;
|
||||
state.set_slashings(
|
||||
epoch,
|
||||
state.get_slashings(epoch)? + validator_effective_balance,
|
||||
state
|
||||
.get_slashings(epoch)?
|
||||
.safe_add(validator_effective_balance)?,
|
||||
)?;
|
||||
safe_sub_assign!(
|
||||
state.balances[slashed_index],
|
||||
validator_effective_balance / spec.min_slashing_penalty_quotient
|
||||
validator_effective_balance.safe_div(spec.min_slashing_penalty_quotient)?
|
||||
);
|
||||
|
||||
// Apply proposer and whistleblower rewards
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?;
|
||||
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);
|
||||
let whistleblower_reward = validator_effective_balance / spec.whistleblower_reward_quotient;
|
||||
let proposer_reward = whistleblower_reward / spec.proposer_reward_quotient;
|
||||
let whistleblower_reward =
|
||||
validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?;
|
||||
let proposer_reward = whistleblower_reward.safe_div(spec.proposer_reward_quotient)?;
|
||||
|
||||
safe_add_assign!(state.balances[proposer_index], proposer_reward);
|
||||
safe_add_assign!(
|
||||
|
@ -1,5 +1,6 @@
|
||||
use super::per_block_processing::{errors::BlockProcessingError, process_deposit};
|
||||
use crate::common::DepositDataTree;
|
||||
use safe_arith::SafeArith;
|
||||
use tree_hash::TreeHash;
|
||||
use types::DEPOSIT_TREE_DEPTH;
|
||||
use types::*;
|
||||
@ -14,8 +15,9 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
|
||||
deposits: Vec<Deposit>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<BeaconState<T>, BlockProcessingError> {
|
||||
let genesis_time =
|
||||
eth1_timestamp - eth1_timestamp % spec.min_genesis_delay + 2 * spec.min_genesis_delay;
|
||||
let genesis_time = eth1_timestamp
|
||||
.safe_sub(eth1_timestamp.safe_rem(spec.min_genesis_delay)?)?
|
||||
.safe_add(2.safe_mul(spec.min_genesis_delay)?)?;
|
||||
let eth1_data = Eth1Data {
|
||||
// Temporary deposit root
|
||||
deposit_root: Hash256::zero(),
|
||||
@ -37,7 +39,7 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
|
||||
process_deposit(&mut state, &deposit, spec, true)?;
|
||||
}
|
||||
|
||||
process_activations(&mut state, spec);
|
||||
process_activations(&mut state, spec)?;
|
||||
|
||||
// Now that we have our validators, initialize the caches (including the committees)
|
||||
state.build_all_caches(spec)?;
|
||||
@ -60,11 +62,14 @@ pub fn is_valid_genesis_state<T: EthSpec>(state: &BeaconState<T>, spec: &ChainSp
|
||||
/// Activate genesis validators, if their balance is acceptable.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
pub fn process_activations<T: EthSpec>(state: &mut BeaconState<T>, spec: &ChainSpec) {
|
||||
pub fn process_activations<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), Error> {
|
||||
for (index, validator) in state.validators.iter_mut().enumerate() {
|
||||
let balance = state.balances[index];
|
||||
validator.effective_balance = std::cmp::min(
|
||||
balance - balance % spec.effective_balance_increment,
|
||||
balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?,
|
||||
spec.max_effective_balance,
|
||||
);
|
||||
if validator.effective_balance == spec.max_effective_balance {
|
||||
@ -72,4 +77,5 @@ pub fn process_activations<T: EthSpec>(state: &mut BeaconState<T>, spec: &ChainS
|
||||
validator.activation_epoch = T::genesis_epoch();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![deny(clippy::integer_arithmetic)]
|
||||
|
||||
#[macro_use]
|
||||
mod macros;
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use crate::common::{initiate_validator_exit, slash_validator};
|
||||
use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid, IntoWithIndex};
|
||||
use rayon::prelude::*;
|
||||
use safe_arith::{ArithError, SafeArith};
|
||||
use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set};
|
||||
use std::convert::TryInto;
|
||||
use tree_hash::TreeHash;
|
||||
@ -239,7 +240,7 @@ pub fn process_eth1_data<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
eth1_data: &Eth1Data,
|
||||
) -> Result<(), Error> {
|
||||
if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data) {
|
||||
if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data)? {
|
||||
state.eth1_data = new_eth1_data;
|
||||
}
|
||||
|
||||
@ -248,14 +249,14 @@ pub fn process_eth1_data<T: EthSpec>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `Some(eth1_data)` if adding the given `eth1_data` to `state.eth1_data_votes` would
|
||||
/// Returns `Ok(Some(eth1_data))` if adding the given `eth1_data` to `state.eth1_data_votes` would
|
||||
/// result in a change to `state.eth1_data`.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
pub fn get_new_eth1_data<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
eth1_data: &Eth1Data,
|
||||
) -> Option<Eth1Data> {
|
||||
) -> Result<Option<Eth1Data>, ArithError> {
|
||||
let num_votes = state
|
||||
.eth1_data_votes
|
||||
.iter()
|
||||
@ -263,10 +264,10 @@ pub fn get_new_eth1_data<T: EthSpec>(
|
||||
.count();
|
||||
|
||||
// The +1 is to account for the `eth1_data` supplied to the function.
|
||||
if 2 * (num_votes + 1) > T::SlotsPerEth1VotingPeriod::to_usize() {
|
||||
Some(eth1_data.clone())
|
||||
if num_votes.safe_add(1)?.safe_mul(2)? > T::SlotsPerEth1VotingPeriod::to_usize() {
|
||||
Ok(Some(eth1_data.clone()))
|
||||
} else {
|
||||
None
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,7 +319,8 @@ pub fn process_attester_slashings<T: EthSpec>(
|
||||
) -> Result<(), BlockProcessingError> {
|
||||
// Verify the `IndexedAttestation`s in parallel (these are the resource-consuming objects, not
|
||||
// the `AttesterSlashing`s themselves).
|
||||
let mut indexed_attestations: Vec<&_> = Vec::with_capacity(attester_slashings.len() * 2);
|
||||
let mut indexed_attestations: Vec<&_> =
|
||||
Vec::with_capacity(attester_slashings.len().safe_mul(2)?);
|
||||
for attester_slashing in attester_slashings {
|
||||
indexed_attestations.push(&attester_slashing.attestation_1);
|
||||
indexed_attestations.push(&attester_slashing.attestation_2);
|
||||
@ -432,7 +434,12 @@ pub fn process_deposits<T: EthSpec>(
|
||||
.par_iter()
|
||||
.enumerate()
|
||||
.try_for_each(|(i, deposit)| {
|
||||
verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index + i as u64, spec)
|
||||
verify_deposit_merkle_proof(
|
||||
state,
|
||||
deposit,
|
||||
state.eth1_deposit_index.safe_add(i as u64)?,
|
||||
spec,
|
||||
)
|
||||
.map_err(|e| e.into_with_index(i))
|
||||
})?;
|
||||
|
||||
@ -459,7 +466,7 @@ pub fn process_deposit<T: EthSpec>(
|
||||
.map_err(|e| e.into_with_index(deposit_index))?;
|
||||
}
|
||||
|
||||
state.eth1_deposit_index += 1;
|
||||
state.eth1_deposit_index.increment()?;
|
||||
|
||||
// Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the
|
||||
// depositing validator already exists in the registry.
|
||||
@ -495,7 +502,7 @@ pub fn process_deposit<T: EthSpec>(
|
||||
exit_epoch: spec.far_future_epoch,
|
||||
withdrawable_epoch: spec.far_future_epoch,
|
||||
effective_balance: std::cmp::min(
|
||||
amount - amount % spec.effective_balance_increment,
|
||||
amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?,
|
||||
spec.max_effective_balance,
|
||||
),
|
||||
slashed: false,
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
use super::signature_sets::{Error as SignatureSetError, Result as SignatureSetResult, *};
|
||||
use crate::common::get_indexed_attestation;
|
||||
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
||||
|
@ -1,9 +1,10 @@
|
||||
use super::signature_sets::Error as SignatureSetError;
|
||||
use merkle_proof::MerkleTreeError;
|
||||
use safe_arith::ArithError;
|
||||
use types::*;
|
||||
|
||||
/// The error returned from the `per_block_processing` function. Indicates that a block is either
|
||||
/// invalid, or we were unable to determine it's validity (we encountered an unexpected error).
|
||||
/// invalid, or we were unable to determine its validity (we encountered an unexpected error).
|
||||
///
|
||||
/// Any of the `...Error` variants indicate that at some point during block (and block operation)
|
||||
/// verification, there was an error. There is no indication as to _where_ that error happened
|
||||
@ -48,6 +49,7 @@ pub enum BlockProcessingError {
|
||||
SignatureSetError(SignatureSetError),
|
||||
SszTypesError(ssz_types::Error),
|
||||
MerkleTreeError(MerkleTreeError),
|
||||
ArithError(ArithError),
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for BlockProcessingError {
|
||||
@ -68,6 +70,12 @@ impl From<ssz_types::Error> for BlockProcessingError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ArithError> for BlockProcessingError {
|
||||
fn from(e: ArithError) -> Self {
|
||||
BlockProcessingError::ArithError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
|
||||
fn from(e: BlockOperationError<HeaderInvalid>) -> BlockProcessingError {
|
||||
match e {
|
||||
@ -75,6 +83,7 @@ impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
|
||||
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
|
||||
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
|
||||
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
|
||||
BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -101,6 +110,7 @@ macro_rules! impl_into_block_processing_error_with_index {
|
||||
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
|
||||
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
|
||||
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
|
||||
BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -130,6 +140,7 @@ pub enum BlockOperationError<T> {
|
||||
BeaconStateError(BeaconStateError),
|
||||
SignatureSetError(SignatureSetError),
|
||||
SszTypesError(ssz_types::Error),
|
||||
ArithError(ArithError),
|
||||
}
|
||||
|
||||
impl<T> BlockOperationError<T> {
|
||||
@ -155,6 +166,12 @@ impl<T> From<ssz_types::Error> for BlockOperationError<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<ArithError> for BlockOperationError<T> {
|
||||
fn from(e: ArithError) -> Self {
|
||||
BlockOperationError::ArithError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum HeaderInvalid {
|
||||
ProposalSignatureInvalid,
|
||||
@ -267,6 +284,7 @@ impl From<BlockOperationError<IndexedAttestationInvalid>>
|
||||
BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e),
|
||||
BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e),
|
||||
BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e),
|
||||
BlockOperationError::ArithError(e) => BlockOperationError::ArithError(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ use crate::per_block_processing::signature_sets::{
|
||||
deposit_pubkey_signature_message, deposit_signature_set,
|
||||
};
|
||||
use merkle_proof::verify_merkle_proof;
|
||||
use safe_arith::SafeArith;
|
||||
use tree_hash::TreeHash;
|
||||
use types::*;
|
||||
|
||||
@ -59,7 +60,7 @@ pub fn verify_deposit_merkle_proof<T: EthSpec>(
|
||||
verify_merkle_proof(
|
||||
leaf,
|
||||
&deposit.proof[..],
|
||||
spec.deposit_contract_tree_depth as usize + 1,
|
||||
spec.deposit_contract_tree_depth.safe_add(1)? as usize,
|
||||
deposit_index as usize,
|
||||
state.eth1_data.deposit_root,
|
||||
),
|
||||
|
@ -1,4 +1,5 @@
|
||||
use errors::EpochProcessingError as Error;
|
||||
use safe_arith::SafeArith;
|
||||
use tree_hash::TreeHash;
|
||||
use types::*;
|
||||
|
||||
@ -90,7 +91,11 @@ pub fn process_justification_and_finalization<T: EthSpec>(
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint.clone();
|
||||
state.justification_bits.shift_up(1)?;
|
||||
|
||||
if total_balances.previous_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 {
|
||||
if total_balances
|
||||
.previous_epoch_target_attesters()
|
||||
.safe_mul(3)?
|
||||
>= total_balances.current_epoch().safe_mul(2)?
|
||||
{
|
||||
state.current_justified_checkpoint = Checkpoint {
|
||||
epoch: previous_epoch,
|
||||
root: *state.get_block_root_at_epoch(previous_epoch)?,
|
||||
@ -98,7 +103,11 @@ pub fn process_justification_and_finalization<T: EthSpec>(
|
||||
state.justification_bits.set(1, true)?;
|
||||
}
|
||||
// If the current epoch gets justified, fill the last bit.
|
||||
if total_balances.current_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 {
|
||||
if total_balances
|
||||
.current_epoch_target_attesters()
|
||||
.safe_mul(3)?
|
||||
>= total_balances.current_epoch().safe_mul(2)?
|
||||
{
|
||||
state.current_justified_checkpoint = Checkpoint {
|
||||
epoch: current_epoch,
|
||||
root: *state.get_block_root_at_epoch(current_epoch)?,
|
||||
@ -152,17 +161,19 @@ pub fn process_final_updates<T: EthSpec>(
|
||||
}
|
||||
|
||||
// Update effective balances with hysteresis (lag).
|
||||
let hysteresis_increment = spec.effective_balance_increment / spec.hysteresis_quotient;
|
||||
let downward_threshold = hysteresis_increment * spec.hysteresis_downward_multiplier;
|
||||
let upward_threshold = hysteresis_increment * spec.hysteresis_upward_multiplier;
|
||||
let hysteresis_increment = spec
|
||||
.effective_balance_increment
|
||||
.safe_div(spec.hysteresis_quotient)?;
|
||||
let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?;
|
||||
let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?;
|
||||
for (index, validator) in state.validators.iter_mut().enumerate() {
|
||||
let balance = state.balances[index];
|
||||
|
||||
if balance + downward_threshold < validator.effective_balance
|
||||
|| validator.effective_balance + upward_threshold < balance
|
||||
if balance.safe_add(downward_threshold)? < validator.effective_balance
|
||||
|| validator.effective_balance.safe_add(upward_threshold)? < balance
|
||||
{
|
||||
validator.effective_balance = std::cmp::min(
|
||||
balance - balance % spec.effective_balance_increment,
|
||||
balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?,
|
||||
spec.max_effective_balance,
|
||||
);
|
||||
}
|
||||
@ -175,7 +186,11 @@ pub fn process_final_updates<T: EthSpec>(
|
||||
state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?;
|
||||
|
||||
// Set historical root accumulator
|
||||
if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 {
|
||||
if next_epoch
|
||||
.as_u64()
|
||||
.safe_rem(T::SlotsPerHistoricalRoot::to_u64().safe_div(T::slots_per_epoch())?)?
|
||||
== 0
|
||||
{
|
||||
let historical_batch = state.historical_batch();
|
||||
state
|
||||
.historical_roots
|
||||
|
@ -1,6 +1,7 @@
|
||||
use super::super::common::get_base_reward;
|
||||
use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses};
|
||||
use super::Error;
|
||||
use safe_arith::SafeArith;
|
||||
|
||||
use types::*;
|
||||
|
||||
@ -13,21 +14,21 @@ pub struct Delta {
|
||||
|
||||
impl Delta {
|
||||
/// Reward the validator with the `reward`.
|
||||
pub fn reward(&mut self, reward: u64) {
|
||||
self.rewards += reward;
|
||||
pub fn reward(&mut self, reward: u64) -> Result<(), Error> {
|
||||
self.rewards = self.rewards.safe_add(reward)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Penalize the validator with the `penalty`.
|
||||
pub fn penalize(&mut self, penalty: u64) {
|
||||
self.penalties += penalty;
|
||||
pub fn penalize(&mut self, penalty: u64) -> Result<(), Error> {
|
||||
self.penalties = self.penalties.safe_add(penalty)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::AddAssign for Delta {
|
||||
/// Use wrapping addition as that is how it's defined in the spec.
|
||||
fn add_assign(&mut self, other: Delta) {
|
||||
self.rewards += other.rewards;
|
||||
self.penalties += other.penalties;
|
||||
/// Combine two deltas.
|
||||
fn combine(&mut self, other: Delta) -> Result<(), Error> {
|
||||
self.reward(other.rewards)?;
|
||||
self.penalize(other.penalties)
|
||||
}
|
||||
}
|
||||
|
||||
@ -56,9 +57,10 @@ pub fn process_rewards_and_penalties<T: EthSpec>(
|
||||
|
||||
get_proposer_deltas(&mut deltas, state, validator_statuses, spec)?;
|
||||
|
||||
// Apply the deltas, over-flowing but not under-flowing (saturating at 0 instead).
|
||||
// Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0
|
||||
// instead).
|
||||
for (i, delta) in deltas.iter().enumerate() {
|
||||
state.balances[i] += delta.rewards;
|
||||
state.balances[i] = state.balances[i].safe_add(delta.rewards)?;
|
||||
state.balances[i] = state.balances[i].saturating_sub(delta.penalties);
|
||||
}
|
||||
|
||||
@ -91,7 +93,8 @@ fn get_proposer_deltas<T: EthSpec>(
|
||||
return Err(Error::ValidatorStatusesInconsistent);
|
||||
}
|
||||
|
||||
deltas[inclusion.proposer_index].reward(base_reward / spec.proposer_reward_quotient);
|
||||
deltas[inclusion.proposer_index]
|
||||
.reward(base_reward.safe_div(spec.proposer_reward_quotient)?)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,9 +126,9 @@ fn get_attestation_deltas<T: EthSpec>(
|
||||
base_reward,
|
||||
finality_delay,
|
||||
spec,
|
||||
);
|
||||
)?;
|
||||
|
||||
deltas[index] += delta;
|
||||
deltas[index].combine(delta)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -140,7 +143,7 @@ fn get_attestation_delta<T: EthSpec>(
|
||||
base_reward: u64,
|
||||
finality_delay: u64,
|
||||
spec: &ChainSpec,
|
||||
) -> Delta {
|
||||
) -> Result<Delta, Error> {
|
||||
let mut delta = Delta::default();
|
||||
|
||||
// Is this validator eligible to be rewarded or penalized?
|
||||
@ -149,7 +152,7 @@ fn get_attestation_delta<T: EthSpec>(
|
||||
|| (validator.is_slashed && !validator.is_withdrawable_in_current_epoch);
|
||||
|
||||
if !is_eligible {
|
||||
return delta;
|
||||
return Ok(delta);
|
||||
}
|
||||
|
||||
// Handle integer overflow by dividing these quantities by EFFECTIVE_BALANCE_INCREMENT
|
||||
@ -157,59 +160,78 @@ fn get_attestation_delta<T: EthSpec>(
|
||||
// - increment = EFFECTIVE_BALANCE_INCREMENT
|
||||
// - reward_numerator = get_base_reward(state, index) * (attesting_balance // increment)
|
||||
// - rewards[index] = reward_numerator // (total_balance // increment)
|
||||
let total_balance_ebi = total_balances.current_epoch() / spec.effective_balance_increment;
|
||||
let total_attesting_balance_ebi =
|
||||
total_balances.previous_epoch_attesters() / spec.effective_balance_increment;
|
||||
let matching_target_balance_ebi =
|
||||
total_balances.previous_epoch_target_attesters() / spec.effective_balance_increment;
|
||||
let matching_head_balance_ebi =
|
||||
total_balances.previous_epoch_head_attesters() / spec.effective_balance_increment;
|
||||
let total_balance_ebi = total_balances
|
||||
.current_epoch()
|
||||
.safe_div(spec.effective_balance_increment)?;
|
||||
let total_attesting_balance_ebi = total_balances
|
||||
.previous_epoch_attesters()
|
||||
.safe_div(spec.effective_balance_increment)?;
|
||||
let matching_target_balance_ebi = total_balances
|
||||
.previous_epoch_target_attesters()
|
||||
.safe_div(spec.effective_balance_increment)?;
|
||||
let matching_head_balance_ebi = total_balances
|
||||
.previous_epoch_head_attesters()
|
||||
.safe_div(spec.effective_balance_increment)?;
|
||||
|
||||
// Expected FFG source.
|
||||
// Spec:
|
||||
// - validator index in `get_unslashed_attesting_indices(state, matching_source_attestations)`
|
||||
if validator.is_previous_epoch_attester && !validator.is_slashed {
|
||||
delta.reward(base_reward * total_attesting_balance_ebi / total_balance_ebi);
|
||||
delta.reward(
|
||||
base_reward
|
||||
.safe_mul(total_attesting_balance_ebi)?
|
||||
.safe_div(total_balance_ebi)?,
|
||||
)?;
|
||||
// Inclusion speed bonus
|
||||
let proposer_reward = base_reward / spec.proposer_reward_quotient;
|
||||
let max_attester_reward = base_reward - proposer_reward;
|
||||
let proposer_reward = base_reward.safe_div(spec.proposer_reward_quotient)?;
|
||||
let max_attester_reward = base_reward.safe_sub(proposer_reward)?;
|
||||
let inclusion = validator
|
||||
.inclusion_info
|
||||
.expect("It is a logic error for an attester not to have an inclusion delay.");
|
||||
delta.reward(max_attester_reward / inclusion.delay);
|
||||
delta.reward(max_attester_reward.safe_div(inclusion.delay)?)?;
|
||||
} else {
|
||||
delta.penalize(base_reward);
|
||||
delta.penalize(base_reward)?;
|
||||
}
|
||||
|
||||
// Expected FFG target.
|
||||
// Spec:
|
||||
// - validator index in `get_unslashed_attesting_indices(state, matching_target_attestations)`
|
||||
if validator.is_previous_epoch_target_attester && !validator.is_slashed {
|
||||
delta.reward(base_reward * matching_target_balance_ebi / total_balance_ebi);
|
||||
delta.reward(
|
||||
base_reward
|
||||
.safe_mul(matching_target_balance_ebi)?
|
||||
.safe_div(total_balance_ebi)?,
|
||||
)?;
|
||||
} else {
|
||||
delta.penalize(base_reward);
|
||||
delta.penalize(base_reward)?;
|
||||
}
|
||||
|
||||
// Expected head.
|
||||
// Spec:
|
||||
// - validator index in `get_unslashed_attesting_indices(state, matching_head_attestations)`
|
||||
if validator.is_previous_epoch_head_attester && !validator.is_slashed {
|
||||
delta.reward(base_reward * matching_head_balance_ebi / total_balance_ebi);
|
||||
delta.reward(
|
||||
base_reward
|
||||
.safe_mul(matching_head_balance_ebi)?
|
||||
.safe_div(total_balance_ebi)?,
|
||||
)?;
|
||||
} else {
|
||||
delta.penalize(base_reward);
|
||||
delta.penalize(base_reward)?;
|
||||
}
|
||||
|
||||
// Inactivity penalty
|
||||
if finality_delay > spec.min_epochs_to_inactivity_penalty {
|
||||
// All eligible validators are penalized
|
||||
delta.penalize(spec.base_rewards_per_epoch * base_reward);
|
||||
delta.penalize(spec.base_rewards_per_epoch.safe_mul(base_reward)?)?;
|
||||
|
||||
// Additionally, all validators whose FFG target didn't match are penalized extra
|
||||
if !validator.is_previous_epoch_target_attester {
|
||||
delta.penalize(
|
||||
validator.current_epoch_effective_balance * finality_delay
|
||||
/ spec.inactivity_penalty_quotient,
|
||||
);
|
||||
validator
|
||||
.current_epoch_effective_balance
|
||||
.safe_mul(finality_delay)?
|
||||
.safe_div(spec.inactivity_penalty_quotient)?,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,5 +240,5 @@ fn get_attestation_delta<T: EthSpec>(
|
||||
// This function only computes the delta for a single validator, so it cannot also return a
|
||||
// delta for a validator.
|
||||
|
||||
delta
|
||||
Ok(delta)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ pub enum EpochProcessingError {
|
||||
BeaconStateError(BeaconStateError),
|
||||
InclusionError(InclusionError),
|
||||
SszTypesError(ssz_types::Error),
|
||||
ArithError(safe_arith::ArithError),
|
||||
}
|
||||
|
||||
impl From<InclusionError> for EpochProcessingError {
|
||||
@ -38,6 +39,12 @@ impl From<ssz_types::Error> for EpochProcessingError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<safe_arith::ArithError> for EpochProcessingError {
|
||||
fn from(e: safe_arith::ArithError) -> EpochProcessingError {
|
||||
EpochProcessingError::ArithError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum InclusionError {
|
||||
/// The validator did not participate in an attestation in this period.
|
||||
|
@ -1,3 +1,4 @@
|
||||
use safe_arith::SafeArith;
|
||||
use types::{BeaconStateError as Error, *};
|
||||
|
||||
/// Process slashings.
|
||||
@ -13,12 +14,17 @@ pub fn process_slashings<T: EthSpec>(
|
||||
|
||||
for (index, validator) in state.validators.iter().enumerate() {
|
||||
if validator.slashed
|
||||
&& epoch + T::EpochsPerSlashingsVector::to_u64() / 2 == validator.withdrawable_epoch
|
||||
&& epoch + T::EpochsPerSlashingsVector::to_u64().safe_div(2)?
|
||||
== validator.withdrawable_epoch
|
||||
{
|
||||
let increment = spec.effective_balance_increment;
|
||||
let penalty_numerator = validator.effective_balance / increment
|
||||
* std::cmp::min(sum_slashings * 3, total_balance);
|
||||
let penalty = penalty_numerator / total_balance * increment;
|
||||
let penalty_numerator = validator
|
||||
.effective_balance
|
||||
.safe_div(increment)?
|
||||
.safe_mul(std::cmp::min(sum_slashings.safe_mul(3)?, total_balance))?;
|
||||
let penalty = penalty_numerator
|
||||
.safe_div(total_balance)?
|
||||
.safe_mul(increment)?;
|
||||
|
||||
safe_sub_assign!(state.balances[index], penalty);
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::common::get_attesting_indices;
|
||||
use safe_arith::SafeArith;
|
||||
use types::*;
|
||||
|
||||
/// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self`
|
||||
@ -198,12 +199,16 @@ impl ValidatorStatuses {
|
||||
|
||||
if validator.is_active_at(state.current_epoch()) {
|
||||
status.is_active_in_current_epoch = true;
|
||||
total_balances.current_epoch += effective_balance;
|
||||
total_balances
|
||||
.current_epoch
|
||||
.safe_add_assign(effective_balance)?;
|
||||
}
|
||||
|
||||
if validator.is_active_at(state.previous_epoch()) {
|
||||
status.is_active_in_previous_epoch = true;
|
||||
total_balances.previous_epoch += effective_balance;
|
||||
total_balances
|
||||
.previous_epoch
|
||||
.safe_add_assign(effective_balance)?;
|
||||
}
|
||||
|
||||
statuses.push(status);
|
||||
@ -275,19 +280,29 @@ impl ValidatorStatuses {
|
||||
let validator_balance = state.get_effective_balance(index, spec)?;
|
||||
|
||||
if v.is_current_epoch_attester {
|
||||
self.total_balances.current_epoch_attesters += validator_balance;
|
||||
self.total_balances
|
||||
.current_epoch_attesters
|
||||
.safe_add_assign(validator_balance)?;
|
||||
}
|
||||
if v.is_current_epoch_target_attester {
|
||||
self.total_balances.current_epoch_target_attesters += validator_balance;
|
||||
self.total_balances
|
||||
.current_epoch_target_attesters
|
||||
.safe_add_assign(validator_balance)?;
|
||||
}
|
||||
if v.is_previous_epoch_attester {
|
||||
self.total_balances.previous_epoch_attesters += validator_balance;
|
||||
self.total_balances
|
||||
.previous_epoch_attesters
|
||||
.safe_add_assign(validator_balance)?;
|
||||
}
|
||||
if v.is_previous_epoch_target_attester {
|
||||
self.total_balances.previous_epoch_target_attesters += validator_balance;
|
||||
self.total_balances
|
||||
.previous_epoch_target_attesters
|
||||
.safe_add_assign(validator_balance)?;
|
||||
}
|
||||
if v.is_previous_epoch_head_attester {
|
||||
self.total_balances.previous_epoch_head_attesters += validator_balance;
|
||||
self.total_balances
|
||||
.previous_epoch_head_attesters
|
||||
.safe_add_assign(validator_balance)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ log = "0.4.8"
|
||||
merkle_proof = { path = "../utils/merkle_proof" }
|
||||
rayon = "1.2.0"
|
||||
rand = "0.7.2"
|
||||
safe_arith = { path = "../utils/safe_arith" }
|
||||
serde = "1.0.102"
|
||||
serde_derive = "1.0.102"
|
||||
slog = "2.5.2"
|
||||
|
@ -7,11 +7,13 @@ use compare_fields_derive::CompareFields;
|
||||
use eth2_hashing::hash;
|
||||
use int_to_bytes::{int_to_bytes4, int_to_bytes8};
|
||||
use pubkey_cache::PubkeyCache;
|
||||
use safe_arith::{ArithError, SafeArith};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz::ssz_encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::{typenum::Unsigned, BitVector, FixedVector};
|
||||
use std::convert::TryInto;
|
||||
use std::fmt;
|
||||
use swap_or_not_shuffle::compute_shuffled_index;
|
||||
use test_random_derive::TestRandom;
|
||||
use tree_hash::TreeHash;
|
||||
@ -76,6 +78,12 @@ pub enum Error {
|
||||
deposit_count: u64,
|
||||
deposit_index: u64,
|
||||
},
|
||||
/// An arithmetic operation occurred which would have overflowed or divided by 0.
|
||||
///
|
||||
/// This represents a serious bug in either the spec or Lighthouse!
|
||||
ArithError(ArithError),
|
||||
MissingBeaconBlock(SignedBeaconBlockHash),
|
||||
MissingBeaconState(BeaconStateHash),
|
||||
}
|
||||
|
||||
/// Control whether an epoch-indexed field can be indexed at the next epoch or not.
|
||||
@ -94,6 +102,33 @@ impl AllowNextEpoch {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
|
||||
pub struct BeaconStateHash(Hash256);
|
||||
|
||||
impl fmt::Debug for BeaconStateHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "BeaconStateHash({:?})", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for BeaconStateHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Hash256> for BeaconStateHash {
|
||||
fn from(hash: Hash256) -> BeaconStateHash {
|
||||
BeaconStateHash(hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateHash> for Hash256 {
|
||||
fn from(beacon_state_hash: BeaconStateHash) -> Hash256 {
|
||||
beacon_state_hash.0
|
||||
}
|
||||
}
|
||||
|
||||
/// The state of the `BeaconChain` at some slot.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
@ -413,7 +448,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
let mut i = 0;
|
||||
loop {
|
||||
let candidate_index = indices[compute_shuffled_index(
|
||||
i % indices.len(),
|
||||
i.safe_rem(indices.len())?,
|
||||
indices.len(),
|
||||
seed,
|
||||
spec.shuffle_round_count,
|
||||
@ -421,17 +456,19 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
.ok_or(Error::UnableToShuffle)?];
|
||||
let random_byte = {
|
||||
let mut preimage = seed.to_vec();
|
||||
preimage.append(&mut int_to_bytes8((i / 32) as u64));
|
||||
preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64));
|
||||
let hash = hash(&preimage);
|
||||
hash[i % 32]
|
||||
hash[i.safe_rem(32)?]
|
||||
};
|
||||
let effective_balance = self.validators[candidate_index].effective_balance;
|
||||
if effective_balance * MAX_RANDOM_BYTE
|
||||
>= spec.max_effective_balance * u64::from(random_byte)
|
||||
if effective_balance.safe_mul(MAX_RANDOM_BYTE)?
|
||||
>= spec
|
||||
.max_effective_balance
|
||||
.safe_mul(u64::from(random_byte))?
|
||||
{
|
||||
return Ok(candidate_index);
|
||||
}
|
||||
i += 1;
|
||||
i.increment()?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,8 +539,8 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
fn get_latest_block_roots_index(&self, slot: Slot) -> Result<usize, Error> {
|
||||
if (slot < self.slot) && (self.slot <= slot + self.block_roots.len() as u64) {
|
||||
Ok(slot.as_usize() % self.block_roots.len())
|
||||
if slot < self.slot && self.slot <= slot + self.block_roots.len() as u64 {
|
||||
Ok(slot.as_usize().safe_rem(self.block_roots.len())?)
|
||||
} else {
|
||||
Err(BeaconStateError::SlotOutOfBounds)
|
||||
}
|
||||
@ -555,7 +592,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
let len = T::EpochsPerHistoricalVector::to_u64();
|
||||
|
||||
if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) {
|
||||
Ok(epoch.as_usize() % len as usize)
|
||||
Ok(epoch.as_usize().safe_rem(len as usize)?)
|
||||
} else {
|
||||
Err(Error::EpochOutOfBounds)
|
||||
}
|
||||
@ -569,7 +606,9 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> {
|
||||
let i = epoch.as_usize() % T::EpochsPerHistoricalVector::to_usize();
|
||||
let i = epoch
|
||||
.as_usize()
|
||||
.safe_rem(T::EpochsPerHistoricalVector::to_usize())?;
|
||||
|
||||
let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature)));
|
||||
|
||||
@ -599,8 +638,8 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
fn get_latest_state_roots_index(&self, slot: Slot) -> Result<usize, Error> {
|
||||
if (slot < self.slot) && (self.slot <= slot + Slot::from(self.state_roots.len())) {
|
||||
Ok(slot.as_usize() % self.state_roots.len())
|
||||
if slot < self.slot && self.slot <= slot + self.state_roots.len() as u64 {
|
||||
Ok(slot.as_usize().safe_rem(self.state_roots.len())?)
|
||||
} else {
|
||||
Err(BeaconStateError::SlotOutOfBounds)
|
||||
}
|
||||
@ -631,6 +670,14 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
Ok(&self.block_roots[i])
|
||||
}
|
||||
|
||||
pub fn get_block_state_roots(
|
||||
&self,
|
||||
slot: Slot,
|
||||
) -> Result<(SignedBeaconBlockHash, BeaconStateHash), Error> {
|
||||
let i = self.get_latest_block_roots_index(slot)?;
|
||||
Ok((self.block_roots[i].into(), self.state_roots[i].into()))
|
||||
}
|
||||
|
||||
/// Sets the latest state root for slot.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
@ -654,7 +701,9 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
if current_epoch < epoch + T::EpochsPerSlashingsVector::to_u64()
|
||||
&& epoch <= allow_next_epoch.upper_bound_of(current_epoch)
|
||||
{
|
||||
Ok(epoch.as_usize() % T::EpochsPerSlashingsVector::to_usize())
|
||||
Ok(epoch
|
||||
.as_usize()
|
||||
.safe_rem(T::EpochsPerSlashingsVector::to_usize())?)
|
||||
} else {
|
||||
Err(Error::EpochOutOfBounds)
|
||||
}
|
||||
@ -713,20 +762,20 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
// == 0`.
|
||||
let mix = {
|
||||
let i = epoch + T::EpochsPerHistoricalVector::to_u64() - spec.min_seed_lookahead - 1;
|
||||
self.randao_mixes[i.as_usize() % self.randao_mixes.len()]
|
||||
self.randao_mixes[i.as_usize().safe_rem(self.randao_mixes.len())?]
|
||||
};
|
||||
let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type));
|
||||
let epoch_bytes = int_to_bytes8(epoch.as_u64());
|
||||
|
||||
const NUM_DOMAIN_BYTES: usize = 4;
|
||||
const NUM_EPOCH_BYTES: usize = 8;
|
||||
const MIX_OFFSET: usize = NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES;
|
||||
const NUM_MIX_BYTES: usize = 32;
|
||||
|
||||
let mut preimage = [0; NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES + NUM_MIX_BYTES];
|
||||
preimage[0..NUM_DOMAIN_BYTES].copy_from_slice(&domain_bytes);
|
||||
preimage[NUM_DOMAIN_BYTES..NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES]
|
||||
.copy_from_slice(&epoch_bytes);
|
||||
preimage[NUM_DOMAIN_BYTES + NUM_EPOCH_BYTES..].copy_from_slice(mix.as_bytes());
|
||||
preimage[NUM_DOMAIN_BYTES..MIX_OFFSET].copy_from_slice(&epoch_bytes);
|
||||
preimage[MIX_OFFSET..].copy_from_slice(mix.as_bytes());
|
||||
|
||||
Ok(Hash256::from_slice(&hash(&preimage)))
|
||||
}
|
||||
@ -760,9 +809,10 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
pub fn get_churn_limit(&self, spec: &ChainSpec) -> Result<u64, Error> {
|
||||
Ok(std::cmp::max(
|
||||
spec.min_per_epoch_churn_limit,
|
||||
self.committee_cache(RelativeEpoch::Current)?
|
||||
.active_validator_count() as u64
|
||||
/ spec.churn_limit_quotient,
|
||||
(self
|
||||
.committee_cache(RelativeEpoch::Current)?
|
||||
.active_validator_count() as u64)
|
||||
.safe_div(spec.churn_limit_quotient)?,
|
||||
))
|
||||
}
|
||||
|
||||
@ -792,7 +842,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
) -> Result<u64, Error> {
|
||||
validator_indices.iter().try_fold(0_u64, |acc, i| {
|
||||
self.get_effective_balance(*i, spec)
|
||||
.and_then(|bal| Ok(bal + acc))
|
||||
.and_then(|bal| Ok(acc.safe_add(bal)?))
|
||||
})
|
||||
}
|
||||
|
||||
@ -1098,3 +1148,9 @@ impl From<tree_hash::Error> for Error {
|
||||
Error::TreeHashError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ArithError> for Error {
|
||||
fn from(e: ArithError) -> Error {
|
||||
Error::ArithError(e)
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
use super::BeaconState;
|
||||
use crate::*;
|
||||
use core::num::NonZeroUsize;
|
||||
@ -43,7 +45,7 @@ impl CommitteeCache {
|
||||
}
|
||||
|
||||
let committees_per_slot =
|
||||
T::get_committee_count_per_slot(active_validator_indices.len(), spec) as u64;
|
||||
T::get_committee_count_per_slot(active_validator_indices.len(), spec)? as u64;
|
||||
|
||||
let seed = state.get_seed(epoch, Domain::BeaconAttester, spec)?;
|
||||
|
||||
@ -56,7 +58,7 @@ impl CommitteeCache {
|
||||
.ok_or_else(|| Error::UnableToShuffle)?;
|
||||
|
||||
// The use of `NonZeroUsize` reduces the maximum number of possible validators by one.
|
||||
if state.validators.len() > usize::max_value() - 1 {
|
||||
if state.validators.len() == usize::max_value() {
|
||||
return Err(Error::TooManyValidators);
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
use super::{BeaconStateError, ChainSpec, Epoch, Validator};
|
||||
use safe_arith::SafeArith;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@ -50,7 +51,10 @@ impl ExitCache {
|
||||
/// Must only be called once per exiting validator.
|
||||
pub fn record_validator_exit(&mut self, exit_epoch: Epoch) -> Result<(), BeaconStateError> {
|
||||
self.check_initialized()?;
|
||||
*self.exits_per_epoch.entry(exit_epoch).or_insert(0) += 1;
|
||||
self.exits_per_epoch
|
||||
.entry(exit_epoch)
|
||||
.or_insert(0)
|
||||
.increment()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ impl PubkeyCache {
|
||||
///
|
||||
/// The added index must equal the number of validators already added to the map. This ensures
|
||||
/// that an index is never skipped.
|
||||
#[allow(clippy::integer_arithmetic)]
|
||||
pub fn insert(&mut self, pubkey: PublicKeyBytes, index: ValidatorIndex) -> bool {
|
||||
if index == self.len {
|
||||
self.map.insert(pubkey, index);
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
use super::Error;
|
||||
use crate::{BeaconState, EthSpec, Hash256, Unsigned, Validator};
|
||||
use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache};
|
||||
@ -195,7 +197,7 @@ impl ValidatorsListTreeHashCache {
|
||||
/// This function makes assumptions that the `validators` list will only change in accordance
|
||||
/// with valid per-block/per-slot state transitions.
|
||||
fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result<Hash256, Error> {
|
||||
let mut list_arena = std::mem::replace(&mut self.list_arena, CacheArena::default());
|
||||
let mut list_arena = std::mem::take(&mut self.list_arena);
|
||||
|
||||
let leaves = self
|
||||
.values
|
||||
|
@ -270,10 +270,10 @@ impl ChainSpec {
|
||||
/*
|
||||
* Gwei values
|
||||
*/
|
||||
min_deposit_amount: u64::pow(2, 0) * u64::pow(10, 9),
|
||||
max_effective_balance: u64::pow(2, 5) * u64::pow(10, 9),
|
||||
ejection_balance: u64::pow(2, 4) * u64::pow(10, 9),
|
||||
effective_balance_increment: u64::pow(2, 0) * u64::pow(10, 9),
|
||||
min_deposit_amount: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)),
|
||||
max_effective_balance: u64::pow(2, 5).saturating_mul(u64::pow(10, 9)),
|
||||
ejection_balance: u64::pow(2, 4).saturating_mul(u64::pow(10, 9)),
|
||||
effective_balance_increment: u64::pow(2, 0).saturating_mul(u64::pow(10, 9)),
|
||||
|
||||
/*
|
||||
* Initial Values
|
||||
@ -560,6 +560,7 @@ impl Default for YamlConfig {
|
||||
|
||||
/// Spec v0.11.1
|
||||
impl YamlConfig {
|
||||
#[allow(clippy::integer_arithmetic)]
|
||||
pub fn from_spec<T: EthSpec>(spec: &ChainSpec) -> Self {
|
||||
Self {
|
||||
// ChainSpec
|
||||
@ -595,7 +596,7 @@ impl YamlConfig {
|
||||
proposer_reward_quotient: spec.proposer_reward_quotient,
|
||||
inactivity_penalty_quotient: spec.inactivity_penalty_quotient,
|
||||
min_slashing_penalty_quotient: spec.min_slashing_penalty_quotient,
|
||||
genesis_fork_version: spec.genesis_fork_version.clone(),
|
||||
genesis_fork_version: spec.genesis_fork_version,
|
||||
safe_slots_to_update_justified: spec.safe_slots_to_update_justified,
|
||||
domain_beacon_proposer: spec.domain_beacon_proposer,
|
||||
domain_beacon_attester: spec.domain_beacon_attester,
|
||||
@ -680,7 +681,7 @@ impl YamlConfig {
|
||||
effective_balance_increment: self.effective_balance_increment,
|
||||
genesis_slot: Slot::from(self.genesis_slot),
|
||||
bls_withdrawal_prefix_byte: self.bls_withdrawal_prefix,
|
||||
milliseconds_per_slot: self.seconds_per_slot * 1000,
|
||||
milliseconds_per_slot: self.seconds_per_slot.saturating_mul(1000),
|
||||
min_attestation_inclusion_delay: self.min_attestation_inclusion_delay,
|
||||
min_seed_lookahead: Epoch::from(self.min_seed_lookahead),
|
||||
max_seed_lookahead: Epoch::from(self.max_seed_lookahead),
|
||||
@ -700,7 +701,7 @@ impl YamlConfig {
|
||||
domain_deposit: self.domain_deposit,
|
||||
domain_voluntary_exit: self.domain_voluntary_exit,
|
||||
boot_nodes: chain_spec.boot_nodes.clone(),
|
||||
genesis_fork_version: self.genesis_fork_version.clone(),
|
||||
genesis_fork_version: self.genesis_fork_version,
|
||||
eth1_follow_distance: self.eth1_follow_distance,
|
||||
..*chain_spec
|
||||
})
|
||||
|
@ -1,4 +1,5 @@
|
||||
use crate::*;
|
||||
use safe_arith::SafeArith;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz_types::typenum::{
|
||||
Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, U4096, U64,
|
||||
@ -64,16 +65,21 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
|
||||
/// the `active_validator_count` during the slot's epoch.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
fn get_committee_count_per_slot(active_validator_count: usize, spec: &ChainSpec) -> usize {
|
||||
fn get_committee_count_per_slot(
|
||||
active_validator_count: usize,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<usize, Error> {
|
||||
let slots_per_epoch = Self::SlotsPerEpoch::to_usize();
|
||||
|
||||
std::cmp::max(
|
||||
Ok(std::cmp::max(
|
||||
1,
|
||||
std::cmp::min(
|
||||
spec.max_committees_per_slot,
|
||||
active_validator_count / slots_per_epoch / spec.target_committee_size,
|
||||
active_validator_count
|
||||
.safe_div(slots_per_epoch)?
|
||||
.safe_div(spec.target_committee_size)?,
|
||||
),
|
||||
)
|
||||
))
|
||||
}
|
||||
|
||||
/// Returns the minimum number of validators required for this spec.
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
// Required for big type-level numbers
|
||||
#![recursion_limit = "128"]
|
||||
// Clippy lint set up
|
||||
#![deny(clippy::integer_arithmetic)]
|
||||
|
||||
#[macro_use]
|
||||
pub mod test_utils;
|
||||
@ -76,11 +78,11 @@ pub use crate::proposer_slashing::ProposerSlashing;
|
||||
pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch};
|
||||
pub use crate::selection_proof::SelectionProof;
|
||||
pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof;
|
||||
pub use crate::signed_beacon_block::SignedBeaconBlock;
|
||||
pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash};
|
||||
pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader;
|
||||
pub use crate::signed_voluntary_exit::SignedVoluntaryExit;
|
||||
pub use crate::signing_root::{SignedRoot, SigningRoot};
|
||||
pub use crate::slot_epoch::{Epoch, Slot, FAR_FUTURE_EPOCH};
|
||||
pub use crate::slot_epoch::{Epoch, Slot};
|
||||
pub use crate::subnet_id::SubnetId;
|
||||
pub use crate::validator::Validator;
|
||||
pub use crate::voluntary_exit::VoluntaryExit;
|
||||
|
@ -1,11 +1,40 @@
|
||||
use crate::{test_utils::TestRandom, BeaconBlock, EthSpec, Hash256, Slot};
|
||||
use bls::Signature;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use test_random_derive::TestRandom;
|
||||
use tree_hash::TreeHash;
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
|
||||
pub struct SignedBeaconBlockHash(Hash256);
|
||||
|
||||
impl fmt::Debug for SignedBeaconBlockHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "SignedBeaconBlockHash({:?})", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SignedBeaconBlockHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Hash256> for SignedBeaconBlockHash {
|
||||
fn from(hash: Hash256) -> SignedBeaconBlockHash {
|
||||
SignedBeaconBlockHash(hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignedBeaconBlockHash> for Hash256 {
|
||||
fn from(signed_beacon_block_hash: SignedBeaconBlockHash) -> Hash256 {
|
||||
signed_beacon_block_hash.0
|
||||
}
|
||||
}
|
||||
|
||||
/// A `BeaconBlock` and a signature from its proposer.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
|
@ -14,7 +14,6 @@ use crate::test_utils::TestRandom;
|
||||
use crate::SignedRoot;
|
||||
use rand::RngCore;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use slog;
|
||||
use ssz::{ssz_encode, Decode, DecodeError, Encode};
|
||||
use std::cmp::{Ord, Ordering};
|
||||
use std::fmt;
|
||||
@ -22,13 +21,11 @@ use std::hash::{Hash, Hasher};
|
||||
use std::iter::Iterator;
|
||||
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign};
|
||||
|
||||
pub const FAR_FUTURE_EPOCH: Epoch = Epoch(u64::max_value());
|
||||
|
||||
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct Slot(u64);
|
||||
|
||||
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
pub struct Epoch(u64);
|
||||
|
||||
impl_common!(Slot);
|
||||
@ -40,7 +37,7 @@ impl Slot {
|
||||
}
|
||||
|
||||
pub fn epoch(self, slots_per_epoch: u64) -> Epoch {
|
||||
Epoch::from(self.0 / slots_per_epoch)
|
||||
Epoch::from(self.0) / Epoch::from(slots_per_epoch)
|
||||
}
|
||||
|
||||
pub fn max_value() -> Slot {
|
||||
@ -79,8 +76,8 @@ impl Epoch {
|
||||
let start = self.start_slot(slots_per_epoch);
|
||||
let end = self.end_slot(slots_per_epoch);
|
||||
|
||||
if (slot >= start) && (slot <= end) {
|
||||
Some(slot.as_usize() - start.as_usize())
|
||||
if slot >= start && slot <= end {
|
||||
slot.as_usize().checked_sub(start.as_usize())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -113,7 +110,7 @@ impl<'a> Iterator for SlotIter<'a> {
|
||||
} else {
|
||||
let start_slot = self.epoch.start_slot(self.slots_per_epoch);
|
||||
let previous = self.current_iteration;
|
||||
self.current_iteration += 1;
|
||||
self.current_iteration = self.current_iteration.checked_add(1)?;
|
||||
Some(start_slot + previous)
|
||||
}
|
||||
}
|
||||
|
@ -107,20 +107,21 @@ macro_rules! impl_math_between {
|
||||
|
||||
fn div(self, rhs: $other) -> $main {
|
||||
let rhs: u64 = rhs.into();
|
||||
if rhs == 0 {
|
||||
panic!("Cannot divide by zero-valued Slot/Epoch")
|
||||
}
|
||||
$main::from(self.0 / rhs)
|
||||
$main::from(
|
||||
self.0
|
||||
.checked_div(rhs)
|
||||
.expect("Cannot divide by zero-valued Slot/Epoch"),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl DivAssign<$other> for $main {
|
||||
fn div_assign(&mut self, rhs: $other) {
|
||||
let rhs: u64 = rhs.into();
|
||||
if rhs == 0 {
|
||||
panic!("Cannot divide by zero-valued Slot/Epoch")
|
||||
}
|
||||
self.0 = self.0 / rhs
|
||||
self.0 = self
|
||||
.0
|
||||
.checked_div(rhs)
|
||||
.expect("Cannot divide by zero-valued Slot/Epoch");
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,7 +130,11 @@ macro_rules! impl_math_between {
|
||||
|
||||
fn rem(self, modulus: $other) -> $main {
|
||||
let modulus: u64 = modulus.into();
|
||||
$main::from(self.0 % modulus)
|
||||
$main::from(
|
||||
self.0
|
||||
.checked_rem(modulus)
|
||||
.expect("Cannot divide by zero-valued Slot/Epoch"),
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -190,6 +195,16 @@ macro_rules! impl_display {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_debug {
|
||||
($type: ident) => {
|
||||
impl fmt::Debug for $type {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}({:?})", stringify!($type), self.0)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_ssz {
|
||||
($type: ident) => {
|
||||
impl Encode for $type {
|
||||
@ -234,7 +249,7 @@ macro_rules! impl_ssz {
|
||||
}
|
||||
|
||||
fn tree_hash_packing_factor() -> usize {
|
||||
32 / 8
|
||||
32usize.wrapping_div(8)
|
||||
}
|
||||
|
||||
fn tree_hash_root(&self) -> tree_hash::Hash256 {
|
||||
@ -270,6 +285,7 @@ macro_rules! impl_common {
|
||||
impl_math_between!($type, u64);
|
||||
impl_math!($type);
|
||||
impl_display!($type);
|
||||
impl_debug!($type);
|
||||
impl_ssz!($type);
|
||||
impl_hash!($type);
|
||||
};
|
||||
|
@ -2,7 +2,6 @@ use super::super::{generate_deterministic_keypairs, KeypairsFile};
|
||||
use crate::test_utils::{AttestationTestTask, TestingPendingAttestationBuilder};
|
||||
use crate::*;
|
||||
use bls::get_withdrawal_credentials;
|
||||
use dirs;
|
||||
use log::debug;
|
||||
use rayon::prelude::*;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -36,7 +36,7 @@ impl TestingDepositBuilder {
|
||||
let mut secret_key = keypair.sk.clone();
|
||||
|
||||
match test_task {
|
||||
DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk.clone()),
|
||||
DepositTestTask::BadPubKey => pubkeybytes = PublicKeyBytes::from(new_key.pk),
|
||||
DepositTestTask::InvalidPubKey => {
|
||||
// Creating invalid public key bytes
|
||||
let mut public_key_bytes: Vec<u8> = vec![0; 48];
|
||||
|
@ -50,7 +50,7 @@ impl TestingProposerSlashingBuilder {
|
||||
message: BeaconBlockHeader {
|
||||
parent_root: hash_2,
|
||||
slot: slot_2,
|
||||
..signed_header_1.message.clone()
|
||||
..signed_header_1.message
|
||||
},
|
||||
signature: Signature::empty_signature(),
|
||||
};
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![allow(clippy::integer_arithmetic)]
|
||||
|
||||
#[macro_use]
|
||||
mod macros;
|
||||
mod builders;
|
||||
|
@ -32,13 +32,11 @@ impl CachedTreeHash<TreeHashCache> for Validator {
|
||||
// Fields pubkey and withdrawal_credentials are constant
|
||||
if (i == 0 || i == 1) && cache.initialized {
|
||||
None
|
||||
} else {
|
||||
if process_field_by_index(self, i, leaf, !cache.initialized) {
|
||||
} else if process_field_by_index(self, i, leaf, !cache.initialized) {
|
||||
Some(i)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
use hex;
|
||||
use serde::de::Error;
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
|
||||
|
@ -19,9 +19,7 @@ impl AggregatePublicKey {
|
||||
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
|
||||
let pubkey = RawAggregatePublicKey::from_bytes(&bytes).map_err(|_| {
|
||||
DecodeError::BytesInvalid(
|
||||
format!("Invalid AggregatePublicKey bytes: {:?}", bytes).to_string(),
|
||||
)
|
||||
DecodeError::BytesInvalid(format!("Invalid AggregatePublicKey bytes: {:?}", bytes))
|
||||
})?;
|
||||
|
||||
Ok(AggregatePublicKey(pubkey))
|
||||
|
@ -164,8 +164,8 @@ impl TreeHashCache {
|
||||
fn lift_dirty(dirty_indices: &[usize]) -> SmallVec8<usize> {
|
||||
let mut new_dirty = SmallVec8::with_capacity(dirty_indices.len());
|
||||
|
||||
for i in 0..dirty_indices.len() {
|
||||
new_dirty.push(dirty_indices[i] / 2)
|
||||
for index in dirty_indices {
|
||||
new_dirty.push(index / 2)
|
||||
}
|
||||
|
||||
new_dirty.dedup();
|
||||
|
@ -89,6 +89,7 @@ impl<T: Encode + Decode> CacheArena<T> {
|
||||
/// To reiterate, the given `range` should be relative to the given `alloc_id`, not
|
||||
/// `self.backing`. E.g., if the allocation has an offset of `20` and the range is `0..1`, then
|
||||
/// the splice will translate to `self.backing[20..21]`.
|
||||
#[allow(clippy::comparison_chain)]
|
||||
fn splice_forgetful<I: IntoIterator<Item = T>>(
|
||||
&mut self,
|
||||
alloc_id: usize,
|
||||
|
@ -8,6 +8,7 @@ edition = "2018"
|
||||
ethereum-types = "0.8.0"
|
||||
eth2_hashing = "0.1.0"
|
||||
lazy_static = "1.4.0"
|
||||
safe_arith = { path = "../safe_arith" }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "0.9.0"
|
||||
|
@ -1,6 +1,7 @@
|
||||
use eth2_hashing::{hash, hash32_concat, ZERO_HASHES};
|
||||
use ethereum_types::H256;
|
||||
use lazy_static::lazy_static;
|
||||
use safe_arith::ArithError;
|
||||
|
||||
const MAX_TREE_DEPTH: usize = 32;
|
||||
const EMPTY_SLICE: &[H256] = &[];
|
||||
@ -38,6 +39,8 @@ pub enum MerkleTreeError {
|
||||
Invalid,
|
||||
// Incorrect Depth provided
|
||||
DepthTooSmall,
|
||||
// Overflow occurred
|
||||
ArithError,
|
||||
}
|
||||
|
||||
impl MerkleTree {
|
||||
@ -232,6 +235,12 @@ fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usi
|
||||
H256::from_slice(&merkle_root)
|
||||
}
|
||||
|
||||
impl From<ArithError> for MerkleTreeError {
|
||||
fn from(_: ArithError) -> Self {
|
||||
MerkleTreeError::ArithError
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
9
eth2/utils/safe_arith/Cargo.toml
Normal file
9
eth2/utils/safe_arith/Cargo.toml
Normal file
@ -0,0 +1,9 @@
|
||||
[package]
|
||||
name = "safe_arith"
|
||||
version = "0.1.0"
|
||||
authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
161
eth2/utils/safe_arith/src/lib.rs
Normal file
161
eth2/utils/safe_arith/src/lib.rs
Normal file
@ -0,0 +1,161 @@
|
||||
//! Library for safe arithmetic on integers, avoiding overflow and division by zero.
|
||||
|
||||
/// Error representing the failure of an arithmetic operation.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum ArithError {
|
||||
Overflow,
|
||||
DivisionByZero,
|
||||
}
|
||||
|
||||
type Result<T> = std::result::Result<T, ArithError>;
|
||||
|
||||
macro_rules! assign_method {
|
||||
($name:ident, $op:ident, $doc_op:expr) => {
|
||||
assign_method!($name, $op, Self, $doc_op);
|
||||
};
|
||||
($name:ident, $op:ident, $rhs_ty:ty, $doc_op:expr) => {
|
||||
#[doc = "Safe variant of `"]
|
||||
#[doc = $doc_op]
|
||||
#[doc = "`."]
|
||||
fn $name(&mut self, other: $rhs_ty) -> Result<()> {
|
||||
*self = self.$op(other)?;
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Trait providing safe arithmetic operations for built-in types.
|
||||
pub trait SafeArith: Sized + Copy {
|
||||
const ZERO: Self;
|
||||
const ONE: Self;
|
||||
|
||||
/// Safe variant of `+` that guards against overflow.
|
||||
fn safe_add(&self, other: Self) -> Result<Self>;
|
||||
|
||||
/// Safe variant of `-` that guards against overflow.
|
||||
fn safe_sub(&self, other: Self) -> Result<Self>;
|
||||
|
||||
/// Safe variant of `*` that guards against overflow.
|
||||
fn safe_mul(&self, other: Self) -> Result<Self>;
|
||||
|
||||
/// Safe variant of `/` that guards against division by 0.
|
||||
fn safe_div(&self, other: Self) -> Result<Self>;
|
||||
|
||||
/// Safe variant of `%` that guards against division by 0.
|
||||
fn safe_rem(&self, other: Self) -> Result<Self>;
|
||||
|
||||
/// Safe variant of `<<` that guards against overflow.
|
||||
fn safe_shl(&self, other: u32) -> Result<Self>;
|
||||
|
||||
/// Safe variant of `>>` that guards against overflow.
|
||||
fn safe_shr(&self, other: u32) -> Result<Self>;
|
||||
|
||||
assign_method!(safe_add_assign, safe_add, "+=");
|
||||
assign_method!(safe_sub_assign, safe_sub, "-=");
|
||||
assign_method!(safe_mul_assign, safe_mul, "*=");
|
||||
assign_method!(safe_div_assign, safe_div, "/=");
|
||||
assign_method!(safe_rem_assign, safe_rem, "%=");
|
||||
assign_method!(safe_shl_assign, safe_shl, u32, "<<=");
|
||||
assign_method!(safe_shr_assign, safe_shr, u32, ">>=");
|
||||
|
||||
/// Mutate `self` by adding 1, erroring on overflow.
|
||||
fn increment(&mut self) -> Result<()> {
|
||||
self.safe_add_assign(Self::ONE)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_safe_arith {
|
||||
($typ:ty) => {
|
||||
impl SafeArith for $typ {
|
||||
const ZERO: Self = 0;
|
||||
const ONE: Self = 1;
|
||||
|
||||
fn safe_add(&self, other: Self) -> Result<Self> {
|
||||
self.checked_add(other).ok_or(ArithError::Overflow)
|
||||
}
|
||||
|
||||
fn safe_sub(&self, other: Self) -> Result<Self> {
|
||||
self.checked_sub(other).ok_or(ArithError::Overflow)
|
||||
}
|
||||
|
||||
fn safe_mul(&self, other: Self) -> Result<Self> {
|
||||
self.checked_mul(other).ok_or(ArithError::Overflow)
|
||||
}
|
||||
|
||||
fn safe_div(&self, other: Self) -> Result<Self> {
|
||||
self.checked_div(other).ok_or(ArithError::DivisionByZero)
|
||||
}
|
||||
|
||||
fn safe_rem(&self, other: Self) -> Result<Self> {
|
||||
self.checked_rem(other).ok_or(ArithError::DivisionByZero)
|
||||
}
|
||||
|
||||
fn safe_shl(&self, other: u32) -> Result<Self> {
|
||||
self.checked_shl(other).ok_or(ArithError::Overflow)
|
||||
}
|
||||
|
||||
fn safe_shr(&self, other: u32) -> Result<Self> {
|
||||
self.checked_shr(other).ok_or(ArithError::Overflow)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_safe_arith!(u8);
|
||||
impl_safe_arith!(u16);
|
||||
impl_safe_arith!(u32);
|
||||
impl_safe_arith!(u64);
|
||||
impl_safe_arith!(usize);
|
||||
impl_safe_arith!(i8);
|
||||
impl_safe_arith!(i16);
|
||||
impl_safe_arith!(i32);
|
||||
impl_safe_arith!(i64);
|
||||
impl_safe_arith!(isize);
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let x = 10u32;
|
||||
let y = 11;
|
||||
assert_eq!(x.safe_add(y), Ok(x + y));
|
||||
assert_eq!(y.safe_sub(x), Ok(y - x));
|
||||
assert_eq!(x.safe_mul(y), Ok(x * y));
|
||||
assert_eq!(x.safe_div(y), Ok(x / y));
|
||||
assert_eq!(x.safe_rem(y), Ok(x % y));
|
||||
|
||||
assert_eq!(x.safe_shl(1), Ok(x << 1));
|
||||
assert_eq!(x.safe_shr(1), Ok(x >> 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mutate() {
|
||||
let mut x = 0u8;
|
||||
x.increment().unwrap();
|
||||
x.increment().unwrap();
|
||||
assert_eq!(x, 2);
|
||||
x.safe_sub_assign(1).unwrap();
|
||||
assert_eq!(x, 1);
|
||||
x.safe_shl_assign(1).unwrap();
|
||||
assert_eq!(x, 2);
|
||||
x.safe_mul_assign(3).unwrap();
|
||||
assert_eq!(x, 6);
|
||||
x.safe_div_assign(4).unwrap();
|
||||
assert_eq!(x, 1);
|
||||
x.safe_shr_assign(1).unwrap();
|
||||
assert_eq!(x, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn errors() {
|
||||
assert!(u32::max_value().safe_add(1).is_err());
|
||||
assert!(u32::min_value().safe_sub(1).is_err());
|
||||
assert!(u32::max_value().safe_mul(2).is_err());
|
||||
assert!(u32::max_value().safe_div(0).is_err());
|
||||
assert!(u32::max_value().safe_rem(0).is_err());
|
||||
assert!(u32::max_value().safe_shl(32).is_err());
|
||||
assert!(u32::max_value().safe_shr(32).is_err());
|
||||
}
|
||||
}
|
@ -1,4 +1,3 @@
|
||||
use hex;
|
||||
use hex::ToHex;
|
||||
use serde::de::{self, Visitor};
|
||||
use std::fmt;
|
||||
|
@ -21,10 +21,72 @@ pub enum DecodeError {
|
||||
/// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`).
|
||||
/// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset.
|
||||
OutOfBoundsByte { i: usize },
|
||||
/// An offset points “backwards” into the fixed-bytes portion of the message, essentially
|
||||
/// double-decoding bytes that will also be decoded as fixed-length.
|
||||
///
|
||||
/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion
|
||||
OffsetIntoFixedPortion(usize),
|
||||
/// The first offset does not point to the byte that follows the fixed byte portion,
|
||||
/// essentially skipping a variable-length byte.
|
||||
///
|
||||
/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte
|
||||
OffsetSkipsVariableBytes(usize),
|
||||
/// An offset points to bytes prior to the previous offset. Depending on how you look at it,
|
||||
/// this either double-decodes bytes or makes the first offset a negative-length.
|
||||
///
|
||||
/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing
|
||||
OffsetsAreDecreasing(usize),
|
||||
/// An offset references byte indices that do not exist in the source bytes.
|
||||
///
|
||||
/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds
|
||||
OffsetOutOfBounds(usize),
|
||||
/// A variable-length list does not have a fixed portion that is cleanly divisible by
|
||||
/// `BYTES_PER_LENGTH_OFFSET`.
|
||||
InvalidListFixedBytesLen(usize),
|
||||
/// Some item has a `ssz_fixed_len` of zero. This is illegal.
|
||||
ZeroLengthItem,
|
||||
/// The given bytes were invalid for some application-level reason.
|
||||
BytesInvalid(String),
|
||||
}
|
||||
|
||||
/// Performs checks on the `offset` based upon the other parameters provided.
|
||||
///
|
||||
/// ## Detail
|
||||
///
|
||||
/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`).
|
||||
/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the
|
||||
/// previously-read offset. Used to ensure offsets are not decreasing.
|
||||
/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not
|
||||
/// out of bounds.
|
||||
/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure
|
||||
/// that the first offset doesn't skip any variable bytes.
|
||||
///
|
||||
/// ## References
|
||||
///
|
||||
/// The checks here are derived from this document:
|
||||
///
|
||||
/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view
|
||||
pub fn sanitize_offset(
|
||||
offset: usize,
|
||||
previous_offset: Option<usize>,
|
||||
num_bytes: usize,
|
||||
num_fixed_bytes: Option<usize>,
|
||||
) -> Result<usize, DecodeError> {
|
||||
if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) {
|
||||
Err(DecodeError::OffsetIntoFixedPortion(offset))
|
||||
} else if previous_offset.is_none()
|
||||
&& num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes)
|
||||
{
|
||||
Err(DecodeError::OffsetSkipsVariableBytes(offset))
|
||||
} else if offset > num_bytes {
|
||||
Err(DecodeError::OffsetOutOfBounds(offset))
|
||||
} else if previous_offset.map_or(false, |prev| prev > offset) {
|
||||
Err(DecodeError::OffsetsAreDecreasing(offset))
|
||||
} else {
|
||||
Ok(offset)
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method.
|
||||
///
|
||||
/// See `examples/` for manual implementations or the crate root for implementations using
|
||||
@ -97,21 +159,14 @@ impl<'a> SszDecoderBuilder<'a> {
|
||||
|
||||
self.items.push(slice);
|
||||
} else {
|
||||
let offset = read_offset(&self.bytes[self.items_index..])?;
|
||||
|
||||
let previous_offset = self
|
||||
.offsets
|
||||
.last()
|
||||
.map(|o| o.offset)
|
||||
.unwrap_or_else(|| BYTES_PER_LENGTH_OFFSET);
|
||||
|
||||
if (previous_offset > offset) || (offset > self.bytes.len()) {
|
||||
return Err(DecodeError::OutOfBoundsByte { i: offset });
|
||||
}
|
||||
|
||||
self.offsets.push(Offset {
|
||||
position: self.items.len(),
|
||||
offset,
|
||||
offset: sanitize_offset(
|
||||
read_offset(&self.bytes[self.items_index..])?,
|
||||
self.offsets.last().map(|o| o.offset),
|
||||
self.bytes.len(),
|
||||
None,
|
||||
)?,
|
||||
});
|
||||
|
||||
// Push an empty slice into items; it will be replaced later.
|
||||
@ -124,13 +179,13 @@ impl<'a> SszDecoderBuilder<'a> {
|
||||
}
|
||||
|
||||
fn finalize(&mut self) -> Result<(), DecodeError> {
|
||||
if !self.offsets.is_empty() {
|
||||
if let Some(first_offset) = self.offsets.first().map(|o| o.offset) {
|
||||
// Check to ensure the first offset points to the byte immediately following the
|
||||
// fixed-length bytes.
|
||||
if self.offsets[0].offset != self.items_index {
|
||||
return Err(DecodeError::OutOfBoundsByte {
|
||||
i: self.offsets[0].offset,
|
||||
});
|
||||
if first_offset < self.items_index {
|
||||
return Err(DecodeError::OffsetIntoFixedPortion(first_offset));
|
||||
} else if first_offset > self.items_index {
|
||||
return Err(DecodeError::OffsetSkipsVariableBytes(first_offset));
|
||||
}
|
||||
|
||||
// Iterate through each pair of offsets, grabbing the slice between each of the offsets.
|
||||
|
@ -366,7 +366,7 @@ impl_decodable_for_u8_array!(4);
|
||||
impl_decodable_for_u8_array!(32);
|
||||
|
||||
macro_rules! impl_for_vec {
|
||||
($type: ty) => {
|
||||
($type: ty, $max_len: expr) => {
|
||||
impl<T: Decode> Decode for $type {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
false
|
||||
@ -381,22 +381,22 @@ macro_rules! impl_for_vec {
|
||||
.map(|chunk| T::from_ssz_bytes(chunk))
|
||||
.collect()
|
||||
} else {
|
||||
decode_list_of_variable_length_items(bytes).map(|vec| vec.into())
|
||||
decode_list_of_variable_length_items(bytes, $max_len).map(|vec| vec.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_for_vec!(Vec<T>);
|
||||
impl_for_vec!(SmallVec<[T; 1]>);
|
||||
impl_for_vec!(SmallVec<[T; 2]>);
|
||||
impl_for_vec!(SmallVec<[T; 3]>);
|
||||
impl_for_vec!(SmallVec<[T; 4]>);
|
||||
impl_for_vec!(SmallVec<[T; 5]>);
|
||||
impl_for_vec!(SmallVec<[T; 6]>);
|
||||
impl_for_vec!(SmallVec<[T; 7]>);
|
||||
impl_for_vec!(SmallVec<[T; 8]>);
|
||||
impl_for_vec!(Vec<T>, None);
|
||||
impl_for_vec!(SmallVec<[T; 1]>, Some(1));
|
||||
impl_for_vec!(SmallVec<[T; 2]>, Some(2));
|
||||
impl_for_vec!(SmallVec<[T; 3]>, Some(3));
|
||||
impl_for_vec!(SmallVec<[T; 4]>, Some(4));
|
||||
impl_for_vec!(SmallVec<[T; 5]>, Some(5));
|
||||
impl_for_vec!(SmallVec<[T; 6]>, Some(6));
|
||||
impl_for_vec!(SmallVec<[T; 7]>, Some(7));
|
||||
impl_for_vec!(SmallVec<[T; 8]>, Some(8));
|
||||
|
||||
/// Decodes `bytes` as if it were a list of variable-length items.
|
||||
///
|
||||
@ -405,43 +405,52 @@ impl_for_vec!(SmallVec<[T; 8]>);
|
||||
/// differing types.
|
||||
pub fn decode_list_of_variable_length_items<T: Decode>(
|
||||
bytes: &[u8],
|
||||
max_len: Option<usize>,
|
||||
) -> Result<Vec<T>, DecodeError> {
|
||||
let mut next_variable_byte = read_offset(bytes)?;
|
||||
|
||||
// The value of the first offset must not point back into the same bytes that defined
|
||||
// it.
|
||||
if next_variable_byte < BYTES_PER_LENGTH_OFFSET {
|
||||
return Err(DecodeError::OutOfBoundsByte {
|
||||
i: next_variable_byte,
|
||||
});
|
||||
if bytes.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let num_items = next_variable_byte / BYTES_PER_LENGTH_OFFSET;
|
||||
let first_offset = read_offset(bytes)?;
|
||||
sanitize_offset(first_offset, None, bytes.len(), Some(first_offset))?;
|
||||
|
||||
// The fixed-length section must be a clean multiple of `BYTES_PER_LENGTH_OFFSET`.
|
||||
if next_variable_byte != num_items * BYTES_PER_LENGTH_OFFSET {
|
||||
return Err(DecodeError::InvalidByteLength {
|
||||
len: next_variable_byte,
|
||||
expected: num_items * BYTES_PER_LENGTH_OFFSET,
|
||||
});
|
||||
if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET {
|
||||
return Err(DecodeError::InvalidListFixedBytesLen(first_offset));
|
||||
}
|
||||
|
||||
let mut values = Vec::with_capacity(num_items);
|
||||
for i in 1..=num_items {
|
||||
let slice_option = if i == num_items {
|
||||
bytes.get(next_variable_byte..)
|
||||
let num_items = first_offset / BYTES_PER_LENGTH_OFFSET;
|
||||
|
||||
if max_len.map_or(false, |max| num_items > max) {
|
||||
return Err(DecodeError::BytesInvalid(format!(
|
||||
"Variable length list of {} items exceeds maximum of {:?}",
|
||||
num_items, max_len
|
||||
)));
|
||||
}
|
||||
|
||||
// Only initialize the vec with a capacity if a maximum length is provided.
|
||||
//
|
||||
// We assume that if a max length is provided then the application is able to handle an
|
||||
// allocation of this size.
|
||||
let mut values = if max_len.is_some() {
|
||||
Vec::with_capacity(num_items)
|
||||
} else {
|
||||
let offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?;
|
||||
|
||||
let start = next_variable_byte;
|
||||
next_variable_byte = offset;
|
||||
|
||||
bytes.get(start..next_variable_byte)
|
||||
vec![]
|
||||
};
|
||||
|
||||
let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte {
|
||||
i: next_variable_byte,
|
||||
})?;
|
||||
let mut offset = first_offset;
|
||||
for i in 1..=num_items {
|
||||
let slice_option = if i == num_items {
|
||||
bytes.get(offset..)
|
||||
} else {
|
||||
let start = offset;
|
||||
|
||||
let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?;
|
||||
offset = sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?;
|
||||
|
||||
bytes.get(start..offset)
|
||||
};
|
||||
|
||||
let slice = slice_option.ok_or_else(|| DecodeError::OutOfBoundsByte { i: offset })?;
|
||||
|
||||
values.push(T::from_ssz_bytes(slice)?);
|
||||
}
|
||||
@ -519,26 +528,34 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_list() {
|
||||
let vec: Vec<Vec<u16>> = vec![];
|
||||
let bytes = vec.as_ssz_bytes();
|
||||
assert!(bytes.is_empty());
|
||||
assert_eq!(Vec::from_ssz_bytes(&bytes), Ok(vec),);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn first_length_points_backwards() {
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[0, 0, 0, 0]),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 0 })
|
||||
Err(DecodeError::InvalidListFixedBytesLen(0))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[1, 0, 0, 0]),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 1 })
|
||||
Err(DecodeError::InvalidListFixedBytesLen(1))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[2, 0, 0, 0]),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 2 })
|
||||
Err(DecodeError::InvalidListFixedBytesLen(2))
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[3, 0, 0, 0]),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 3 })
|
||||
Err(DecodeError::InvalidListFixedBytesLen(3))
|
||||
);
|
||||
}
|
||||
|
||||
@ -546,7 +563,7 @@ mod tests {
|
||||
fn lengths_are_decreasing() {
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[12, 0, 0, 0, 14, 0, 0, 0, 12, 0, 0, 0, 1, 0, 1, 0]),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 12 })
|
||||
Err(DecodeError::OffsetsAreDecreasing(12))
|
||||
);
|
||||
}
|
||||
|
||||
@ -554,10 +571,7 @@ mod tests {
|
||||
fn awkward_fixed_length_portion() {
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]),
|
||||
Err(DecodeError::InvalidByteLength {
|
||||
len: 10,
|
||||
expected: 8
|
||||
})
|
||||
Err(DecodeError::InvalidListFixedBytesLen(10))
|
||||
);
|
||||
}
|
||||
|
||||
@ -565,14 +579,15 @@ mod tests {
|
||||
fn length_out_of_bounds() {
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[5, 0, 0, 0]),
|
||||
Err(DecodeError::InvalidByteLength {
|
||||
len: 5,
|
||||
expected: 4
|
||||
})
|
||||
Err(DecodeError::OffsetOutOfBounds(5))
|
||||
);
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[8, 0, 0, 0, 9, 0, 0, 0]),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 9 })
|
||||
Err(DecodeError::OffsetOutOfBounds(9))
|
||||
);
|
||||
assert_eq!(
|
||||
<Vec<Vec<u16>>>::from_ssz_bytes(&[8, 0, 0, 0, 16, 0, 0, 0]),
|
||||
Err(DecodeError::OffsetOutOfBounds(16))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ mod round_trip {
|
||||
|
||||
assert_eq!(
|
||||
VariableLen::from_ssz_bytes(&bytes),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 9 })
|
||||
Err(DecodeError::OffsetIntoFixedPortion(9))
|
||||
);
|
||||
}
|
||||
|
||||
@ -182,7 +182,7 @@ mod round_trip {
|
||||
|
||||
assert_eq!(
|
||||
VariableLen::from_ssz_bytes(&bytes),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 11 })
|
||||
Err(DecodeError::OffsetSkipsVariableBytes(11))
|
||||
);
|
||||
}
|
||||
|
||||
@ -284,7 +284,7 @@ mod round_trip {
|
||||
|
||||
assert_eq!(
|
||||
ThreeVariableLen::from_ssz_bytes(&bytes),
|
||||
Err(DecodeError::OutOfBoundsByte { i: 14 })
|
||||
Err(DecodeError::OffsetsAreDecreasing(14))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -86,6 +86,7 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream {
|
||||
let field_types_f = field_types_a.clone();
|
||||
|
||||
let output = quote! {
|
||||
#[allow(clippy::integer_arithmetic)]
|
||||
impl #impl_generics ssz::Encode for #name #ty_generics #where_clause {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
#(
|
||||
@ -221,6 +222,7 @@ pub fn ssz_decode_derive(input: TokenStream) -> TokenStream {
|
||||
}
|
||||
|
||||
let output = quote! {
|
||||
#[allow(clippy::integer_arithmetic)]
|
||||
impl #impl_generics ssz::Decode for #name #ty_generics #where_clause {
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
#(
|
||||
|
@ -224,29 +224,44 @@ where
|
||||
}
|
||||
|
||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
let fixed_len = N::to_usize();
|
||||
|
||||
if bytes.is_empty() {
|
||||
Err(ssz::DecodeError::InvalidByteLength {
|
||||
len: 0,
|
||||
expected: 1,
|
||||
})
|
||||
} else if T::is_ssz_fixed_len() {
|
||||
let num_items = bytes
|
||||
.len()
|
||||
.checked_div(T::ssz_fixed_len())
|
||||
.ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?;
|
||||
|
||||
if num_items != fixed_len {
|
||||
return Err(ssz::DecodeError::BytesInvalid(format!(
|
||||
"FixedVector of {} items has {} items",
|
||||
num_items, fixed_len
|
||||
)));
|
||||
}
|
||||
|
||||
bytes
|
||||
.chunks(T::ssz_fixed_len())
|
||||
.map(|chunk| T::from_ssz_bytes(chunk))
|
||||
.collect::<Result<Vec<T>, _>>()
|
||||
.and_then(|vec| {
|
||||
if vec.len() == N::to_usize() {
|
||||
if vec.len() == fixed_len {
|
||||
Ok(vec.into())
|
||||
} else {
|
||||
Err(ssz::DecodeError::BytesInvalid(format!(
|
||||
"wrong number of vec elements, got: {}, expected: {}",
|
||||
"Wrong number of FixedVector elements, got: {}, expected: {}",
|
||||
vec.len(),
|
||||
N::to_usize()
|
||||
)))
|
||||
}
|
||||
})
|
||||
} else {
|
||||
ssz::decode_list_of_variable_length_items(bytes).and_then(|vec| Ok(vec.into()))
|
||||
ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len))
|
||||
.and_then(|vec| Ok(vec.into()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -218,22 +218,41 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N: Unsigned> ssz::Decode for VariableList<T, N>
|
||||
impl<T, N> ssz::Decode for VariableList<T, N>
|
||||
where
|
||||
T: ssz::Decode,
|
||||
N: Unsigned,
|
||||
{
|
||||
fn is_ssz_fixed_len() -> bool {
|
||||
<Vec<T>>::is_ssz_fixed_len()
|
||||
}
|
||||
|
||||
fn ssz_fixed_len() -> usize {
|
||||
<Vec<T>>::ssz_fixed_len()
|
||||
false
|
||||
}
|
||||
|
||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
||||
let vec = <Vec<T>>::from_ssz_bytes(bytes)?;
|
||||
let max_len = N::to_usize();
|
||||
|
||||
Self::new(vec).map_err(|e| ssz::DecodeError::BytesInvalid(format!("VariableList {:?}", e)))
|
||||
if bytes.is_empty() {
|
||||
Ok(vec![].into())
|
||||
} else if T::is_ssz_fixed_len() {
|
||||
let num_items = bytes
|
||||
.len()
|
||||
.checked_div(T::ssz_fixed_len())
|
||||
.ok_or_else(|| ssz::DecodeError::ZeroLengthItem)?;
|
||||
|
||||
if num_items > max_len {
|
||||
return Err(ssz::DecodeError::BytesInvalid(format!(
|
||||
"VariableList of {} items exceeds maximum of {}",
|
||||
num_items, max_len
|
||||
)));
|
||||
}
|
||||
|
||||
bytes
|
||||
.chunks(T::ssz_fixed_len())
|
||||
.map(|chunk| T::from_ssz_bytes(chunk))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map(Into::into)
|
||||
} else {
|
||||
ssz::decode_list_of_variable_length_items(bytes, Some(max_len)).map(|vec| vec.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -274,8 +274,7 @@ impl MerkleHasher {
|
||||
loop {
|
||||
if let Some(root) = self.root {
|
||||
break Ok(root);
|
||||
} else {
|
||||
if let Some(node) = self.half_nodes.last() {
|
||||
} else if let Some(node) = self.half_nodes.last() {
|
||||
let right_child = node.id * 2 + 1;
|
||||
self.process_right_node(right_child, self.zero_hash(right_child));
|
||||
} else if self.next_leaf == 1 {
|
||||
@ -292,7 +291,6 @@ impl MerkleHasher {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a node that will become the left-hand node of some parent. The supplied `id` is
|
||||
/// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this
|
||||
|
@ -29,6 +29,14 @@ pub fn parse_path_with_default_in_home_dir(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_path(matches: &ArgMatches, name: &'static str) -> Result<PathBuf, String> {
|
||||
matches
|
||||
.value_of(name)
|
||||
.ok_or_else(|| format!("{} not specified", name))?
|
||||
.parse::<PathBuf>()
|
||||
.map_err(|e| format!("Unable to parse {}: {}", name, e))
|
||||
}
|
||||
|
||||
pub fn parse_u64(matches: &ArgMatches, name: &'static str) -> Result<u64, String> {
|
||||
matches
|
||||
.value_of(name)
|
||||
|
40
lcli/src/parse_ssz.rs
Normal file
40
lcli/src/parse_ssz.rs
Normal file
@ -0,0 +1,40 @@
|
||||
use crate::helpers::parse_path;
|
||||
use clap::ArgMatches;
|
||||
use serde::Serialize;
|
||||
use ssz::Decode;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use types::{EthSpec, SignedBeaconBlock};
|
||||
|
||||
pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
||||
let type_str = matches
|
||||
.value_of("type")
|
||||
.ok_or_else(|| "No type supplied".to_string())?;
|
||||
let path = parse_path(matches, "path")?;
|
||||
|
||||
info!("Type: {:?}", type_str);
|
||||
|
||||
let mut bytes = vec![];
|
||||
let mut file = File::open(&path).map_err(|e| format!("Unable to open {:?}: {}", path, e))?;
|
||||
file.read_to_end(&mut bytes)
|
||||
.map_err(|e| format!("Unable to read {:?}: {}", path, e))?;
|
||||
|
||||
match type_str {
|
||||
"SignedBeaconBlock" => decode_and_print::<SignedBeaconBlock<T>>(&bytes)?,
|
||||
other => return Err(format!("Unknown type: {}", other)),
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decode_and_print<T: Decode + Serialize>(bytes: &[u8]) -> Result<(), String> {
|
||||
let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e))?;
|
||||
|
||||
println!(
|
||||
"{}",
|
||||
serde_yaml::to_string(&item)
|
||||
.map_err(|e| format!("Unable to write object to YAML: {:?}", e))?
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
Loading…
Reference in New Issue
Block a user