Merge latest master in v0.2.0
This commit is contained in:
commit
1779aa6a8a
5
.github/ISSUE_TEMPLATE.md
vendored
5
.github/ISSUE_TEMPLATE.md
vendored
@ -2,6 +2,11 @@
|
||||
|
||||
Please provide a brief description of the issue.
|
||||
|
||||
## Version
|
||||
|
||||
Please provide your Lighthouse and Rust version. Are you building from
|
||||
`master`, which commit?
|
||||
|
||||
## Present Behaviour
|
||||
|
||||
Describe the present behaviour of the application, with regards to this
|
||||
|
7
.github/workflows/test-suite.yml
vendored
7
.github/workflows/test-suite.yml
vendored
@ -67,3 +67,10 @@ jobs:
|
||||
run: sudo npm install -g ganache-cli
|
||||
- name: Run the beacon chain sim without an eth1 connection
|
||||
run: cargo run --release --bin simulator no-eth1-sim
|
||||
check-benchmarks:
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Typecheck benchmark code without running it
|
||||
run: make check-benches
|
||||
|
142
Cargo.lock
generated
142
Cargo.lock
generated
@ -330,9 +330,13 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
|
||||
|
||||
[[package]]
|
||||
name = "bitvec"
|
||||
version = "0.15.2"
|
||||
version = "0.17.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a993f74b4c99c1908d156b8d2e0fb6277736b0ecbd833982fd1241d39b2766a6"
|
||||
checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c"
|
||||
dependencies = [
|
||||
"either",
|
||||
"radium",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
@ -937,7 +941,7 @@ name = "deposit_contract"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"eth2_ssz",
|
||||
"ethabi 9.0.1",
|
||||
"ethabi 11.0.0",
|
||||
"reqwest",
|
||||
"serde_json",
|
||||
"tree_hash",
|
||||
@ -1116,9 +1120,12 @@ dependencies = [
|
||||
name = "environment"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"beacon_node",
|
||||
"clap",
|
||||
"ctrlc",
|
||||
"env_logger 0.6.2",
|
||||
"eth2_config",
|
||||
"eth2_testnet_config",
|
||||
"futures",
|
||||
"logging",
|
||||
"parking_lot 0.7.1",
|
||||
@ -1314,17 +1321,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "ethabi"
|
||||
version = "9.0.1"
|
||||
version = "11.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "965126c64662832991f5a748893577630b558e47fa94e7f35aefcd20d737cef7"
|
||||
checksum = "97652a7d1f2504d6c885c87e242a06ccef5bd3054093d3fb742d8fb64806231a"
|
||||
dependencies = [
|
||||
"error-chain",
|
||||
"ethereum-types 0.8.0",
|
||||
"rustc-hex",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"tiny-keccak",
|
||||
"uint 0.8.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -1407,7 +1413,7 @@ version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"syn 1.0.17",
|
||||
"synstructure",
|
||||
@ -1643,9 +1649,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.8"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8"
|
||||
checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
@ -1916,9 +1922,9 @@ checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.36"
|
||||
version = "0.3.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cb931d43e71f560c81badb0191596562bafad2be06a3f9025b845c847c60df5"
|
||||
checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
@ -1975,6 +1981,7 @@ name = "lcli"
|
||||
version = "0.2.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"deposit_contract",
|
||||
"dirs",
|
||||
"environment",
|
||||
"eth1_test_rig",
|
||||
@ -1989,6 +1996,7 @@ dependencies = [
|
||||
"serde_yaml",
|
||||
"simple_logger",
|
||||
"state_processing",
|
||||
"tree_hash",
|
||||
"types",
|
||||
"web3",
|
||||
]
|
||||
@ -2913,9 +2921,9 @@ checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.28"
|
||||
version = "0.10.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "973293749822d7dd6370d6da1e523b0d1db19f06c459134c658b2a4261378b52"
|
||||
checksum = "cee6d85f4cb4c4f59a6a85d5b68a233d280c82e29e822913b9c8b129fbf20bdd"
|
||||
dependencies = [
|
||||
"bitflags 1.2.1",
|
||||
"cfg-if",
|
||||
@ -2933,9 +2941,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.54"
|
||||
version = "0.9.55"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1024c0a59774200a555087a6da3f253a9095a5f344e353b212ac4c8b8e450986"
|
||||
checksum = "7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0",
|
||||
"cc",
|
||||
@ -3012,9 +3020,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "parity-scale-codec"
|
||||
version = "1.2.0"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f509c5e67ca0605ee17dcd3f91ef41cadd685c75a298fb6261b781a5acb3f910"
|
||||
checksum = "329c8f7f4244ddb5c37c103641027a76c530e65e8e4b8240b29f81ea40508b17"
|
||||
dependencies = [
|
||||
"arrayvec 0.5.1",
|
||||
"bitvec",
|
||||
@ -3192,9 +3200,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.14"
|
||||
version = "0.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fcfdefadc3d57ca21cf17990a28ef4c0f7c61383a28cb7604cf4a18e6ede1420"
|
||||
checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
@ -3207,9 +3215,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.9"
|
||||
version = "1.0.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435"
|
||||
checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3"
|
||||
dependencies = [
|
||||
"unicode-xid 0.2.0",
|
||||
]
|
||||
@ -3305,9 +3313,15 @@ version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "radium"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.3.23"
|
||||
@ -3687,15 +3701,16 @@ dependencies = [
|
||||
"eth2_ssz_derive",
|
||||
"rayon",
|
||||
"serde",
|
||||
"state_processing",
|
||||
"tree_hash",
|
||||
"types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.16.11"
|
||||
version = "0.16.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "741ba1704ae21999c00942f9f5944f801e977f54302af346b596287599ad1862"
|
||||
checksum = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"lazy_static",
|
||||
@ -3844,21 +3859,22 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "security-framework"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97bbedbe81904398b6ebb054b3e912f99d55807125790f3198ac990d98def5b0"
|
||||
checksum = "572dfa3a0785509e7a44b5b4bebcf94d41ba34e9ed9eb9df722545c3b3c4144a"
|
||||
dependencies = [
|
||||
"bitflags 1.2.1",
|
||||
"core-foundation",
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
"security-framework-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "security-framework-sys"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06fd2f23e31ef68dd2328cc383bd493142e46107a3a0e24f7d734e3f3b80fe4c"
|
||||
checksum = "8ddb15a5fec93b7021b8a9e96009c5d8d51c15673569f7c0f6b7204e5b7b404f"
|
||||
dependencies = [
|
||||
"core-foundation-sys",
|
||||
"libc",
|
||||
@ -3887,20 +3903,20 @@ checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.105"
|
||||
version = "1.0.106"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e707fbbf255b8fc8c3b99abb91e7257a622caeb20a9818cbadbeeede4e0932ff"
|
||||
checksum = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.105"
|
||||
version = "1.0.106"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac5d00fc561ba2724df6758a17de23df5914f20e41cb00f94d5b7ae42fffaff8"
|
||||
checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"syn 1.0.17",
|
||||
]
|
||||
@ -3915,9 +3931,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.48"
|
||||
version = "1.0.51"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25"
|
||||
checksum = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
@ -3930,7 +3946,7 @@ version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd02c7587ec314570041b2754829f84d873ced14a96d1fd1823531e11db40573"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"syn 1.0.17",
|
||||
]
|
||||
@ -4354,7 +4370,7 @@ version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"unicode-xid 0.2.0",
|
||||
]
|
||||
@ -4365,7 +4381,7 @@ version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"syn 1.0.17",
|
||||
"unicode-xid 0.2.0",
|
||||
@ -5145,9 +5161,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.59"
|
||||
version = "0.2.60"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3557c397ab5a8e347d434782bcd31fc1483d927a6826804cec05cc792ee2519d"
|
||||
checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"wasm-bindgen-macro",
|
||||
@ -5155,14 +5171,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.59"
|
||||
version = "0.2.60"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0da9c9a19850d3af6df1cb9574970b566d617ecfaf36eb0b706b6f3ef9bd2f8"
|
||||
checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"lazy_static",
|
||||
"log 0.4.8",
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"syn 1.0.17",
|
||||
"wasm-bindgen-shared",
|
||||
@ -5183,9 +5199,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-futures"
|
||||
version = "0.4.9"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "457414a91863c0ec00090dba537f88ab955d93ca6555862c29b6d860990b8a8a"
|
||||
checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"js-sys",
|
||||
@ -5195,9 +5211,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.59"
|
||||
version = "0.2.60"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f6fde1d36e75a714b5fe0cffbb78978f222ea6baebb726af13c78869fdb4205"
|
||||
checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4"
|
||||
dependencies = [
|
||||
"quote 1.0.3",
|
||||
"wasm-bindgen-macro-support",
|
||||
@ -5205,11 +5221,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.59"
|
||||
version = "0.2.60"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "25bda4168030a6412ea8a047e27238cadf56f0e53516e1e83fec0a8b7c786f6d"
|
||||
checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
"syn 1.0.17",
|
||||
"wasm-bindgen-backend",
|
||||
@ -5218,31 +5234,31 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.59"
|
||||
version = "0.2.60"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc9f36ad51f25b0219a3d4d13b90eb44cd075dff8b6280cca015775d7acaddd8"
|
||||
checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-test"
|
||||
version = "0.3.9"
|
||||
version = "0.3.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "449aeba7035e4a4710cd263bbac33519fa3828bff1c6f642fa8896601e7016ad"
|
||||
checksum = "648da3460c6d2aa04b715a936329e2e311180efe650b2127d6267f4193ccac14"
|
||||
dependencies = [
|
||||
"console_error_panic_hook",
|
||||
"js-sys",
|
||||
"scoped-tls 1.0.0",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-futures 0.4.9",
|
||||
"wasm-bindgen-futures 0.4.10",
|
||||
"wasm-bindgen-test-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-test-macro"
|
||||
version = "0.3.9"
|
||||
version = "0.3.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49449f8dcedc192bd0cf11b5711982decdd4dbad1029f92370e2b1215031dd59"
|
||||
checksum = "cf2f86cd78a2aa7b1fb4bb6ed854eccb7f9263089c79542dca1576a1518a8467"
|
||||
dependencies = [
|
||||
"proc-macro2 1.0.9",
|
||||
"proc-macro2 1.0.10",
|
||||
"quote 1.0.3",
|
||||
]
|
||||
|
||||
@ -5262,9 +5278,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.36"
|
||||
version = "0.3.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "721c6263e2c66fd44501cc5efbfa2b7dfa775d13e4ea38c46299646ed1f9c70a"
|
||||
checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
@ -5384,9 +5400,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80"
|
||||
checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e"
|
||||
dependencies = [
|
||||
"winapi 0.3.8",
|
||||
]
|
||||
|
4
Makefile
4
Makefile
@ -22,6 +22,10 @@ test-debug:
|
||||
cargo-fmt:
|
||||
cargo fmt --all -- --check
|
||||
|
||||
# Typechecks benchmark code
|
||||
check-benches:
|
||||
cargo check --all --benches
|
||||
|
||||
# Runs only the ef-test vectors.
|
||||
run-ef-tests:
|
||||
cargo test --release --manifest-path=$(EF_TESTS)/Cargo.toml --features "ef_tests"
|
||||
|
@ -39,7 +39,7 @@ Like all Ethereum 2.0 clients, Lighthouse is a work-in-progress.
|
||||
|
||||
Current development overview:
|
||||
|
||||
- Specification `v0.10.1` implemented, optimized and passing test vectors.
|
||||
- Specification `v0.11.1` implemented, optimized and passing test vectors.
|
||||
- Rust-native libp2p with Gossipsub and Discv5.
|
||||
- RESTful JSON API via HTTP server.
|
||||
- Events via WebSocket.
|
||||
|
@ -140,6 +140,8 @@ pub struct HeadInfo {
|
||||
pub current_justified_checkpoint: types::Checkpoint,
|
||||
pub finalized_checkpoint: types::Checkpoint,
|
||||
pub fork: Fork,
|
||||
pub genesis_time: u64,
|
||||
pub genesis_validators_root: Hash256,
|
||||
}
|
||||
|
||||
pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||
@ -176,6 +178,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>,
|
||||
/// The root of the genesis block.
|
||||
pub genesis_block_root: Hash256,
|
||||
/// The root of the list of genesis validators, used during syncing.
|
||||
pub genesis_validators_root: Hash256,
|
||||
/// A state-machine that is updated with information from the network and chooses a canonical
|
||||
/// head block.
|
||||
pub fork_choice: ForkChoice<T>,
|
||||
@ -468,6 +472,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
current_justified_checkpoint: head.beacon_state.current_justified_checkpoint.clone(),
|
||||
finalized_checkpoint: head.beacon_state.finalized_checkpoint.clone(),
|
||||
fork: head.beacon_state.fork.clone(),
|
||||
genesis_time: head.beacon_state.genesis_time,
|
||||
genesis_validators_root: head.beacon_state.genesis_validators_root,
|
||||
})
|
||||
}
|
||||
|
||||
@ -814,7 +820,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
root: target_root,
|
||||
},
|
||||
},
|
||||
signature: AggregateSignature::new(),
|
||||
signature: AggregateSignature::empty_signature(),
|
||||
})
|
||||
}
|
||||
|
||||
@ -1084,11 +1090,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?;
|
||||
|
||||
let fork = self
|
||||
let (fork, genesis_validators_root) = self
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
|
||||
.map(|head| head.beacon_state.fork.clone())?;
|
||||
.map(|head| {
|
||||
(
|
||||
head.beacon_state.fork.clone(),
|
||||
head.beacon_state.genesis_validators_root,
|
||||
)
|
||||
})?;
|
||||
|
||||
let signature_set = indexed_attestation_signature_set_from_pubkeys(
|
||||
|validator_index| {
|
||||
@ -1099,6 +1110,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&attestation.signature,
|
||||
&indexed_attestation,
|
||||
&fork,
|
||||
genesis_validators_root,
|
||||
&self.spec,
|
||||
)
|
||||
.map_err(Error::SignatureSetError)?;
|
||||
@ -1175,10 +1187,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let index = attestation.data.index;
|
||||
let slot = attestation.data.slot;
|
||||
|
||||
match self
|
||||
.op_pool
|
||||
.insert_attestation(attestation, &fork, &self.spec)
|
||||
{
|
||||
match self.op_pool.insert_attestation(
|
||||
attestation,
|
||||
&fork,
|
||||
genesis_validators_root,
|
||||
&self.spec,
|
||||
) {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
error!(
|
||||
@ -1674,6 +1688,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let mut block = SignedBeaconBlock {
|
||||
message: BeaconBlock {
|
||||
slot: state.slot,
|
||||
proposer_index: state.get_beacon_proposer_index(state.slot, &self.spec)? as u64,
|
||||
parent_root,
|
||||
state_root: Hash256::zero(),
|
||||
body: BeaconBlockBody {
|
||||
@ -2014,7 +2029,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// If we are unable to read the slot clock we assume that it is prior to genesis and
|
||||
// therefore use the genesis slot.
|
||||
let slot = self.slot().unwrap_or_else(|_| self.spec.genesis_slot);
|
||||
self.spec.enr_fork_id(slot)
|
||||
|
||||
self.spec.enr_fork_id(slot, self.genesis_validators_root)
|
||||
}
|
||||
|
||||
/// Calculates the `Duration` to the next fork, if one exists.
|
||||
|
@ -187,6 +187,19 @@ where
|
||||
.map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e))
|
||||
}
|
||||
|
||||
/// Returns true if `self.store` contains a persisted beacon chain.
|
||||
pub fn store_contains_beacon_chain(&self) -> Result<bool, String> {
|
||||
let store = self
|
||||
.store
|
||||
.clone()
|
||||
.ok_or_else(|| "load_from_store requires a store.".to_string())?;
|
||||
|
||||
Ok(store
|
||||
.get::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||
.is_some())
|
||||
}
|
||||
|
||||
/// Attempt to load an existing chain from the builder's `Store`.
|
||||
///
|
||||
/// May initialize several components; including the op_pool and finalized checkpoints.
|
||||
@ -418,6 +431,7 @@ where
|
||||
// TODO: allow for persisting and loading the pool from disk.
|
||||
naive_aggregation_pool: <_>::default(),
|
||||
eth1_chain: self.eth1_chain,
|
||||
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
|
||||
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
|
||||
genesis_block_root: self
|
||||
.genesis_block_root
|
||||
|
@ -274,11 +274,12 @@ mod tests {
|
||||
a
|
||||
}
|
||||
|
||||
fn sign(a: &mut Attestation<E>, i: usize) {
|
||||
fn sign(a: &mut Attestation<E>, i: usize, genesis_validators_root: Hash256) {
|
||||
a.sign(
|
||||
&generate_deterministic_keypair(i).sk,
|
||||
i,
|
||||
&Fork::default(),
|
||||
genesis_validators_root,
|
||||
&E::default_spec(),
|
||||
)
|
||||
.expect("should sign attestation");
|
||||
@ -302,7 +303,7 @@ mod tests {
|
||||
"should not accept attestation without any signatures"
|
||||
);
|
||||
|
||||
sign(&mut a, 0);
|
||||
sign(&mut a, 0, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
@ -324,7 +325,7 @@ mod tests {
|
||||
"retrieved attestation should equal the one inserted"
|
||||
);
|
||||
|
||||
sign(&mut a, 1);
|
||||
sign(&mut a, 1, Hash256::random());
|
||||
|
||||
assert_eq!(
|
||||
pool.insert(&a),
|
||||
@ -338,8 +339,9 @@ mod tests {
|
||||
let mut a_0 = get_attestation(Slot::new(0));
|
||||
let mut a_1 = a_0.clone();
|
||||
|
||||
sign(&mut a_0, 0);
|
||||
sign(&mut a_1, 1);
|
||||
let genesis_validators_root = Hash256::random();
|
||||
sign(&mut a_0, 0, genesis_validators_root);
|
||||
sign(&mut a_1, 1, genesis_validators_root);
|
||||
|
||||
let pool = NaiveAggregationPool::default();
|
||||
|
||||
@ -374,7 +376,7 @@ mod tests {
|
||||
let mut a_different = a_0.clone();
|
||||
let different_root = Hash256::from_low_u64_be(1337);
|
||||
unset_bit(&mut a_different, 0);
|
||||
sign(&mut a_different, 2);
|
||||
sign(&mut a_different, 2, genesis_validators_root);
|
||||
assert!(a_different.data.beacon_block_root != different_root);
|
||||
a_different.data.beacon_block_root = different_root;
|
||||
|
||||
@ -396,7 +398,7 @@ mod tests {
|
||||
#[test]
|
||||
fn auto_pruning() {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0);
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
|
||||
let pool = NaiveAggregationPool::default();
|
||||
|
||||
@ -450,7 +452,7 @@ mod tests {
|
||||
#[test]
|
||||
fn max_attestations() {
|
||||
let mut base = get_attestation(Slot::new(0));
|
||||
sign(&mut base, 0);
|
||||
sign(&mut base, 0, Hash256::random());
|
||||
|
||||
let pool = NaiveAggregationPool::default();
|
||||
|
||||
|
@ -307,7 +307,9 @@ where
|
||||
|
||||
let randao_reveal = {
|
||||
let epoch = slot.epoch(E::slots_per_epoch());
|
||||
let domain = self.spec.get_domain(epoch, Domain::Randao, fork);
|
||||
let domain =
|
||||
self.spec
|
||||
.get_domain(epoch, Domain::Randao, fork, state.genesis_validators_root);
|
||||
let message = epoch.signing_root(domain);
|
||||
Signature::new(message.as_bytes(), sk)
|
||||
};
|
||||
@ -317,7 +319,7 @@ where
|
||||
.produce_block_on_state(state, slot, randao_reveal)
|
||||
.expect("should produce block");
|
||||
|
||||
let signed_block = block.sign(sk, &state.fork, &self.spec);
|
||||
let signed_block = block.sign(sk, &state.fork, state.genesis_validators_root, &self.spec);
|
||||
|
||||
(signed_block, state)
|
||||
}
|
||||
@ -402,6 +404,7 @@ where
|
||||
attestation.data.target.epoch,
|
||||
Domain::BeaconAttester,
|
||||
fork,
|
||||
state.genesis_validators_root,
|
||||
);
|
||||
|
||||
let message = attestation.data.signing_root(domain);
|
||||
|
@ -106,7 +106,7 @@ fn produces_attestations() {
|
||||
);
|
||||
assert_eq!(
|
||||
attestation.signature,
|
||||
AggregateSignature::new(),
|
||||
AggregateSignature::empty_signature(),
|
||||
"bad signature"
|
||||
);
|
||||
assert_eq!(data.index, index, "bad index");
|
||||
|
@ -89,12 +89,12 @@ fn update_proposal_signatures(
|
||||
.get(proposer_index)
|
||||
.expect("proposer keypair should be available");
|
||||
|
||||
snapshot.beacon_block =
|
||||
snapshot
|
||||
.beacon_block
|
||||
.message
|
||||
.clone()
|
||||
.sign(&keypair.sk, &state.fork, spec);
|
||||
snapshot.beacon_block = snapshot.beacon_block.message.clone().sign(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -352,7 +352,6 @@ fn invalid_signatures() {
|
||||
*/
|
||||
let mut snapshots = CHAIN_SEGMENT.clone();
|
||||
let proposer_slashing = ProposerSlashing {
|
||||
proposer_index: 0,
|
||||
signed_header_1: SignedBeaconBlockHeader {
|
||||
message: snapshots[block_index].beacon_block.message.block_header(),
|
||||
signature: junk_signature(),
|
||||
|
@ -153,6 +153,26 @@ where
|
||||
Ok((builder, spec, context))
|
||||
})
|
||||
.and_then(move |(builder, spec, context)| {
|
||||
let chain_exists = builder
|
||||
.store_contains_beacon_chain()
|
||||
.unwrap_or_else(|_| false);
|
||||
|
||||
// If the client is expect to resume but there's no beacon chain in the database,
|
||||
// use the `DepositContract` method. This scenario is quite common when the client
|
||||
// is shutdown before finding genesis via eth1.
|
||||
//
|
||||
// Alternatively, if there's a beacon chain in the database then always resume
|
||||
// using it.
|
||||
let client_genesis = if client_genesis == ClientGenesis::Resume && !chain_exists {
|
||||
info!(context.log, "Defaulting to deposit contract genesis");
|
||||
|
||||
ClientGenesis::DepositContract
|
||||
} else if chain_exists {
|
||||
ClientGenesis::Resume
|
||||
} else {
|
||||
client_genesis
|
||||
};
|
||||
|
||||
let genesis_state_future: Box<dyn Future<Item = _, Error = _> + Send> =
|
||||
match client_genesis {
|
||||
ClientGenesis::Interop {
|
||||
|
@ -12,7 +12,7 @@ const TESTNET_SPEC_CONSTANTS: &str = "minimal";
|
||||
const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db";
|
||||
|
||||
/// Defines how the client should initialize the `BeaconChain` and other components.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ClientGenesis {
|
||||
/// Reads the genesis state and other persisted data from the `Store`.
|
||||
Resume,
|
||||
|
214
beacon_node/network/src/sync/block_processor.rs
Normal file
214
beacon_node/network/src/sync/block_processor.rs
Normal file
@ -0,0 +1,214 @@
|
||||
use crate::router::processor::FUTURE_SLOT_TOLERANCE;
|
||||
use crate::sync::manager::SyncMessage;
|
||||
use crate::sync::range_sync::BatchId;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
|
||||
use eth2_libp2p::PeerId;
|
||||
use slog::{crit, debug, error, trace, warn};
|
||||
use std::sync::{Arc, Weak};
|
||||
use tokio::sync::mpsc;
|
||||
use types::SignedBeaconBlock;
|
||||
|
||||
/// Id associated to a block processing request, either a batch or a single block.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum ProcessId {
|
||||
/// Processing Id of a range syncing batch.
|
||||
RangeBatchId(BatchId),
|
||||
/// Processing Id of the parent lookup of a block
|
||||
ParentLookup(PeerId),
|
||||
}
|
||||
|
||||
/// The result of a block processing request.
|
||||
// TODO: When correct batch error handling occurs, we will include an error type.
|
||||
#[derive(Debug)]
|
||||
pub enum BatchProcessResult {
|
||||
/// The batch was completed successfully.
|
||||
Success,
|
||||
/// The batch processing failed.
|
||||
Failed,
|
||||
}
|
||||
|
||||
/// Spawns a thread handling the block processing of a request: range syncing or parent lookup.
|
||||
pub fn spawn_block_processor<T: BeaconChainTypes>(
|
||||
chain: Weak<BeaconChain<T>>,
|
||||
process_id: ProcessId,
|
||||
downloaded_blocks: Vec<SignedBeaconBlock<T::EthSpec>>,
|
||||
mut sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
|
||||
log: slog::Logger,
|
||||
) {
|
||||
std::thread::spawn(move || {
|
||||
match process_id {
|
||||
// this a request from the range sync
|
||||
ProcessId::RangeBatchId(batch_id) => {
|
||||
debug!(log, "Processing batch"; "id" => *batch_id, "blocks" => downloaded_blocks.len());
|
||||
let result = match process_blocks(chain, downloaded_blocks.iter(), &log) {
|
||||
Ok(_) => {
|
||||
debug!(log, "Batch processed"; "id" => *batch_id );
|
||||
BatchProcessResult::Success
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(log, "Batch processing failed"; "id" => *batch_id, "error" => e);
|
||||
BatchProcessResult::Failed
|
||||
}
|
||||
};
|
||||
|
||||
let msg = SyncMessage::BatchProcessed {
|
||||
batch_id: batch_id,
|
||||
downloaded_blocks: downloaded_blocks,
|
||||
result,
|
||||
};
|
||||
sync_send.try_send(msg).unwrap_or_else(|_| {
|
||||
debug!(
|
||||
log,
|
||||
"Block processor could not inform range sync result. Likely shutting down."
|
||||
);
|
||||
});
|
||||
}
|
||||
// this a parent lookup request from the sync manager
|
||||
ProcessId::ParentLookup(peer_id) => {
|
||||
debug!(log, "Processing parent lookup"; "last_peer_id" => format!("{}", peer_id), "blocks" => downloaded_blocks.len());
|
||||
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
||||
// reverse
|
||||
match process_blocks(chain, downloaded_blocks.iter().rev(), &log) {
|
||||
Err(e) => {
|
||||
warn!(log, "Parent lookup failed"; "last_peer_id" => format!("{}", peer_id), "error" => e);
|
||||
sync_send
|
||||
.try_send(SyncMessage::ParentLookupFailed(peer_id))
|
||||
.unwrap_or_else(|_| {
|
||||
// on failure, inform to downvote the peer
|
||||
debug!(
|
||||
log,
|
||||
"Block processor could not inform parent lookup result. Likely shutting down."
|
||||
);
|
||||
});
|
||||
}
|
||||
Ok(_) => {
|
||||
debug!(log, "Parent lookup processed successfully");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Helper function to process blocks batches which only consumes the chain and blocks to process.
|
||||
// TODO: Verify the fork choice logic and the correct error handling from `process_chain_segment`.
|
||||
// Ensure fork-choice doesn't need to be run during the failed errors.
|
||||
fn process_blocks<
|
||||
'a,
|
||||
T: BeaconChainTypes,
|
||||
I: Iterator<Item = &'a SignedBeaconBlock<T::EthSpec>>,
|
||||
>(
|
||||
chain: Weak<BeaconChain<T>>,
|
||||
downloaded_blocks: I,
|
||||
log: &slog::Logger,
|
||||
) -> Result<(), String> {
|
||||
if let Some(chain) = chain.upgrade() {
|
||||
let blocks = downloaded_blocks.cloned().collect::<Vec<_>>();
|
||||
match chain.process_chain_segment(blocks) {
|
||||
Ok(roots) => {
|
||||
if roots.is_empty() {
|
||||
debug!(log, "All blocks already known");
|
||||
} else {
|
||||
debug!(
|
||||
log, "Imported blocks from network";
|
||||
"count" => roots.len(),
|
||||
);
|
||||
// Batch completed successfully with at least one block, run fork choice.
|
||||
// TODO: Verify this logic
|
||||
run_fork_choice(chain, log);
|
||||
}
|
||||
}
|
||||
Err(BlockError::ParentUnknown(parent)) => {
|
||||
// blocks should be sequential and all parents should exist
|
||||
warn!(
|
||||
log, "Parent block is unknown";
|
||||
"parent_root" => format!("{}", parent),
|
||||
);
|
||||
return Err(format!("Block has an unknown parent: {}", parent));
|
||||
}
|
||||
Err(BlockError::BlockIsAlreadyKnown) => {
|
||||
// TODO: Check handling of this
|
||||
crit!(log, "Unknown handling of block error");
|
||||
}
|
||||
Err(BlockError::FutureSlot {
|
||||
present_slot,
|
||||
block_slot,
|
||||
}) => {
|
||||
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
|
||||
// The block is too far in the future, drop it.
|
||||
warn!(
|
||||
log, "Block is ahead of our slot clock";
|
||||
"msg" => "block for future slot rejected, check your time",
|
||||
"present_slot" => present_slot,
|
||||
"block_slot" => block_slot,
|
||||
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
|
||||
);
|
||||
} else {
|
||||
// The block is in the future, but not too far.
|
||||
debug!(
|
||||
log, "Block is slightly ahead of our slot clock, ignoring.";
|
||||
"present_slot" => present_slot,
|
||||
"block_slot" => block_slot,
|
||||
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
|
||||
);
|
||||
}
|
||||
return Err(format!(
|
||||
"Block with slot {} is higher than the current slot {}",
|
||||
block_slot, present_slot
|
||||
));
|
||||
}
|
||||
Err(BlockError::WouldRevertFinalizedSlot { .. }) => {
|
||||
//TODO: Check handling. Run fork choice?
|
||||
debug!(
|
||||
log, "Finalized or earlier block processed";
|
||||
);
|
||||
// block reached our finalized slot or was earlier, move to the next block
|
||||
// TODO: How does this logic happen for the chain segment. We would want to
|
||||
// continue processing in this case.
|
||||
}
|
||||
Err(BlockError::GenesisBlock) => {
|
||||
debug!(
|
||||
log, "Genesis block was processed";
|
||||
);
|
||||
// TODO: Similarly here. Prefer to continue processing.
|
||||
}
|
||||
Err(BlockError::BeaconChainError(e)) => {
|
||||
// TODO: Run fork choice?
|
||||
warn!(
|
||||
log, "BlockProcessingFailure";
|
||||
"msg" => "unexpected condition in processing block.",
|
||||
"outcome" => format!("{:?}", e)
|
||||
);
|
||||
return Err(format!("Internal error whilst processing block: {:?}", e));
|
||||
}
|
||||
other => {
|
||||
// TODO: Run fork choice?
|
||||
warn!(
|
||||
log, "Invalid block received";
|
||||
"msg" => "peer sent invalid block",
|
||||
"outcome" => format!("{:?}", other),
|
||||
);
|
||||
return Err(format!("Peer sent invalid block. Reason: {:?}", other));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs fork-choice on a given chain. This is used during block processing after one successful
|
||||
/// block import.
|
||||
fn run_fork_choice<T: BeaconChainTypes>(chain: Arc<BeaconChain<T>>, log: &slog::Logger) {
|
||||
match chain.fork_choice() {
|
||||
Ok(()) => trace!(
|
||||
log,
|
||||
"Fork choice success";
|
||||
"location" => "batch processing"
|
||||
),
|
||||
Err(e) => error!(
|
||||
log,
|
||||
"Fork choice failed";
|
||||
"error" => format!("{:?}", e),
|
||||
"location" => "batch import error"
|
||||
),
|
||||
}
|
||||
}
|
@ -33,8 +33,9 @@
|
||||
//! if an attestation references an unknown block) this manager can search for the block and
|
||||
//! subsequently search for parents if needed.
|
||||
|
||||
use super::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId};
|
||||
use super::network_context::SyncNetworkContext;
|
||||
use super::range_sync::{Batch, BatchProcessResult, RangeSync};
|
||||
use super::range_sync::{BatchId, RangeSync};
|
||||
use crate::router::processor::PeerSyncInfo;
|
||||
use crate::service::NetworkMessage;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
||||
@ -99,10 +100,13 @@ pub enum SyncMessage<T: EthSpec> {
|
||||
|
||||
/// A batch has been processed by the block processor thread.
|
||||
BatchProcessed {
|
||||
process_id: u64,
|
||||
batch: Box<Batch<T>>,
|
||||
batch_id: BatchId,
|
||||
downloaded_blocks: Vec<SignedBeaconBlock<T>>,
|
||||
result: BatchProcessResult,
|
||||
},
|
||||
|
||||
/// A parent lookup has failed for a block given by this `peer_id`.
|
||||
ParentLookupFailed(PeerId),
|
||||
}
|
||||
|
||||
/// Maintains a sequential list of parents to lookup and the lookup's current state.
|
||||
@ -172,6 +176,9 @@ pub struct SyncManager<T: BeaconChainTypes> {
|
||||
|
||||
/// The logger for the import manager.
|
||||
log: Logger,
|
||||
|
||||
/// The sending part of input_channel
|
||||
sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
|
||||
}
|
||||
|
||||
/// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon
|
||||
@ -202,6 +209,7 @@ pub fn spawn<T: BeaconChainTypes>(
|
||||
single_block_lookups: FnvHashMap::default(),
|
||||
full_peers: HashSet::new(),
|
||||
log: log.clone(),
|
||||
sync_send: sync_send.clone(),
|
||||
};
|
||||
|
||||
// spawn the sync manager thread
|
||||
@ -590,8 +598,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
// If the last block in the queue has an unknown parent, we continue the parent
|
||||
// lookup-search.
|
||||
|
||||
let total_blocks_to_process = parent_request.downloaded_blocks.len();
|
||||
|
||||
if let Some(chain) = self.chain.upgrade() {
|
||||
let newest_block = parent_request
|
||||
.downloaded_blocks
|
||||
@ -606,7 +612,15 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
return;
|
||||
}
|
||||
Ok(BlockProcessingOutcome::Processed { .. })
|
||||
| Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => {}
|
||||
| Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => {
|
||||
spawn_block_processor(
|
||||
self.chain.clone(),
|
||||
ProcessId::ParentLookup(parent_request.last_submitted_peer.clone()),
|
||||
parent_request.downloaded_blocks,
|
||||
self.sync_send.clone(),
|
||||
self.log.clone(),
|
||||
);
|
||||
}
|
||||
Ok(outcome) => {
|
||||
// all else we consider the chain a failure and downvote the peer that sent
|
||||
// us the last block
|
||||
@ -634,64 +648,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
// chain doesn't exist, drop the parent queue and return
|
||||
return;
|
||||
}
|
||||
|
||||
//TODO: Shift this to a block processing thread
|
||||
|
||||
// the last received block has been successfully processed, process all other blocks in the
|
||||
// chain
|
||||
while let Some(block) = parent_request.downloaded_blocks.pop() {
|
||||
// check if the chain exists
|
||||
if let Some(chain) = self.chain.upgrade() {
|
||||
match BlockProcessingOutcome::shim(chain.process_block(block)) {
|
||||
Ok(BlockProcessingOutcome::Processed { .. })
|
||||
| Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => {} // continue to the next block
|
||||
|
||||
// all else is considered a failure
|
||||
Ok(outcome) => {
|
||||
// the previous blocks have failed, notify the user the chain lookup has
|
||||
// failed and drop the parent queue
|
||||
debug!(
|
||||
self.log, "Invalid parent chain. Past blocks failure";
|
||||
"outcome" => format!("{:?}", outcome),
|
||||
"peer" => format!("{:?}", parent_request.last_submitted_peer),
|
||||
);
|
||||
self.network
|
||||
.downvote_peer(parent_request.last_submitted_peer.clone());
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
self.log, "Parent chain processing error.";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
self.network
|
||||
.downvote_peer(parent_request.last_submitted_peer.clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// chain doesn't exist, end the processing
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// at least one block has been processed, run fork-choice
|
||||
if let Some(chain) = self.chain.upgrade() {
|
||||
match chain.fork_choice() {
|
||||
Ok(()) => trace!(
|
||||
self.log,
|
||||
"Fork choice success";
|
||||
"block_imports" => total_blocks_to_process - parent_request.downloaded_blocks.len(),
|
||||
"location" => "parent request"
|
||||
),
|
||||
Err(e) => error!(
|
||||
self.log,
|
||||
"Fork choice failed";
|
||||
"error" => format!("{:?}", e),
|
||||
"location" => "parent request"
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -782,17 +738,20 @@ impl<T: BeaconChainTypes> Future for SyncManager<T> {
|
||||
self.inject_error(peer_id, request_id);
|
||||
}
|
||||
SyncMessage::BatchProcessed {
|
||||
process_id,
|
||||
batch,
|
||||
batch_id,
|
||||
downloaded_blocks,
|
||||
result,
|
||||
} => {
|
||||
self.range_sync.handle_block_process_result(
|
||||
&mut self.network,
|
||||
process_id,
|
||||
*batch,
|
||||
batch_id,
|
||||
downloaded_blocks,
|
||||
result,
|
||||
);
|
||||
}
|
||||
SyncMessage::ParentLookupFailed(peer_id) => {
|
||||
self.network.downvote_peer(peer_id);
|
||||
}
|
||||
},
|
||||
Ok(Async::NotReady) => break,
|
||||
Ok(Async::Ready(None)) => {
|
||||
|
@ -1,6 +1,7 @@
|
||||
//! Syncing for lighthouse.
|
||||
//!
|
||||
//! Stores the various syncing methods for the beacon chain.
|
||||
mod block_processor;
|
||||
pub mod manager;
|
||||
mod network_context;
|
||||
mod range_sync;
|
||||
|
@ -1,166 +0,0 @@
|
||||
use super::batch::Batch;
|
||||
use crate::router::processor::FUTURE_SLOT_TOLERANCE;
|
||||
use crate::sync::manager::SyncMessage;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
|
||||
use slog::{debug, error, trace, warn};
|
||||
use std::sync::{Arc, Weak};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// The result of attempting to process a batch of blocks.
|
||||
// TODO: When correct batch error handling occurs, we will include an error type.
|
||||
#[derive(Debug)]
|
||||
pub enum BatchProcessResult {
|
||||
/// The batch was completed successfully.
|
||||
Success,
|
||||
/// The batch processing failed.
|
||||
Failed,
|
||||
}
|
||||
|
||||
// TODO: Refactor to async fn, with stable futures
|
||||
pub fn spawn_batch_processor<T: BeaconChainTypes>(
|
||||
chain: Weak<BeaconChain<T>>,
|
||||
process_id: u64,
|
||||
batch: Batch<T::EthSpec>,
|
||||
mut sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
|
||||
log: slog::Logger,
|
||||
) {
|
||||
std::thread::spawn(move || {
|
||||
debug!(log, "Processing batch"; "id" => *batch.id);
|
||||
let result = match process_batch(chain, &batch, &log) {
|
||||
Ok(_) => BatchProcessResult::Success,
|
||||
Err(_) => BatchProcessResult::Failed,
|
||||
};
|
||||
|
||||
debug!(log, "Batch processed"; "id" => *batch.id, "result" => format!("{:?}", result));
|
||||
|
||||
sync_send
|
||||
.try_send(SyncMessage::BatchProcessed {
|
||||
process_id,
|
||||
batch: Box::new(batch),
|
||||
result,
|
||||
})
|
||||
.unwrap_or_else(|_| {
|
||||
debug!(
|
||||
log,
|
||||
"Batch result could not inform sync. Likely shutting down."
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Helper function to process block batches which only consumes the chain and blocks to process
|
||||
fn process_batch<T: BeaconChainTypes>(
|
||||
chain: Weak<BeaconChain<T>>,
|
||||
batch: &Batch<T::EthSpec>,
|
||||
log: &slog::Logger,
|
||||
) -> Result<(), String> {
|
||||
if let Some(chain) = chain.upgrade() {
|
||||
match chain.process_chain_segment(batch.downloaded_blocks.clone()) {
|
||||
Ok(roots) => {
|
||||
trace!(
|
||||
log, "Imported blocks from network";
|
||||
"count" => roots.len(),
|
||||
);
|
||||
}
|
||||
Err(BlockError::ParentUnknown(parent)) => {
|
||||
// blocks should be sequential and all parents should exist
|
||||
warn!(
|
||||
log, "Parent block is unknown";
|
||||
"parent_root" => format!("{}", parent),
|
||||
);
|
||||
|
||||
return Err(format!("Block has an unknown parent: {}", parent));
|
||||
}
|
||||
Err(BlockError::BlockIsAlreadyKnown) => {
|
||||
// this block is already known to us, move to the next
|
||||
debug!(
|
||||
log, "Imported a block that is already known";
|
||||
);
|
||||
}
|
||||
Err(BlockError::FutureSlot {
|
||||
present_slot,
|
||||
block_slot,
|
||||
}) => {
|
||||
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
|
||||
// The block is too far in the future, drop it.
|
||||
warn!(
|
||||
log, "Block is ahead of our slot clock";
|
||||
"msg" => "block for future slot rejected, check your time",
|
||||
"present_slot" => present_slot,
|
||||
"block_slot" => block_slot,
|
||||
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
|
||||
);
|
||||
} else {
|
||||
// The block is in the future, but not too far.
|
||||
debug!(
|
||||
log, "Block is slightly ahead of our slot clock, ignoring.";
|
||||
"present_slot" => present_slot,
|
||||
"block_slot" => block_slot,
|
||||
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
|
||||
);
|
||||
}
|
||||
|
||||
return Err(format!(
|
||||
"Block with slot {} is higher than the current slot {}",
|
||||
block_slot, present_slot
|
||||
));
|
||||
}
|
||||
Err(BlockError::WouldRevertFinalizedSlot { .. }) => {
|
||||
debug!(
|
||||
log, "Finalized or earlier block processed";
|
||||
);
|
||||
// block reached our finalized slot or was earlier, move to the next block
|
||||
}
|
||||
Err(BlockError::GenesisBlock) => {
|
||||
debug!(
|
||||
log, "Genesis block was processed";
|
||||
);
|
||||
}
|
||||
Err(BlockError::BeaconChainError(e)) => {
|
||||
warn!(
|
||||
log, "BlockProcessingFailure";
|
||||
"msg" => "unexpected condition in processing block.",
|
||||
"outcome" => format!("{:?}", e)
|
||||
);
|
||||
|
||||
return Err(format!("Internal error whilst processing block: {:?}", e));
|
||||
}
|
||||
other => {
|
||||
warn!(
|
||||
log, "Invalid block received";
|
||||
"msg" => "peer sent invalid block",
|
||||
"outcome" => format!("{:?}", other),
|
||||
);
|
||||
|
||||
return Err(format!("Peer sent invalid block. Reason: {:?}", other));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Ok(()); // terminate early due to dropped beacon chain
|
||||
}
|
||||
|
||||
// Batch completed successfully, run fork choice.
|
||||
if let Some(chain) = chain.upgrade() {
|
||||
run_fork_choice(chain, log);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs fork-choice on a given chain. This is used during block processing after one successful
|
||||
/// block import.
|
||||
fn run_fork_choice<T: BeaconChainTypes>(chain: Arc<BeaconChain<T>>, log: &slog::Logger) {
|
||||
match chain.fork_choice() {
|
||||
Ok(()) => trace!(
|
||||
log,
|
||||
"Fork choice success";
|
||||
"location" => "batch processing"
|
||||
),
|
||||
Err(e) => error!(
|
||||
log,
|
||||
"Fork choice failed";
|
||||
"error" => format!("{:?}", e),
|
||||
"location" => "batch import error"
|
||||
),
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
use super::batch::{Batch, BatchId, PendingBatches};
|
||||
use super::batch_processing::{spawn_batch_processor, BatchProcessResult};
|
||||
use crate::sync::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId};
|
||||
use crate::sync::network_context::SyncNetworkContext;
|
||||
use crate::sync::SyncMessage;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
@ -76,7 +76,7 @@ pub struct SyncingChain<T: BeaconChainTypes> {
|
||||
|
||||
/// A random id given to a batch process request. This is None if there is no ongoing batch
|
||||
/// process.
|
||||
current_processing_id: Option<u64>,
|
||||
current_processing_batch: Option<Batch<T::EthSpec>>,
|
||||
|
||||
/// A send channel to the sync manager. This is given to the batch processor thread to report
|
||||
/// back once batch processing has completed.
|
||||
@ -120,7 +120,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
to_be_downloaded_id: BatchId(1),
|
||||
to_be_processed_id: BatchId(1),
|
||||
state: ChainSyncingState::Stopped,
|
||||
current_processing_id: None,
|
||||
current_processing_batch: None,
|
||||
sync_send,
|
||||
chain,
|
||||
log,
|
||||
@ -167,15 +167,16 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
// An entire batch of blocks has been received. This functions checks to see if it can be processed,
|
||||
// remove any batches waiting to be verified and if this chain is syncing, request new
|
||||
// blocks for the peer.
|
||||
debug!(self.log, "Completed batch received"; "id"=> *batch.id, "blocks"=>batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len());
|
||||
debug!(self.log, "Completed batch received"; "id"=> *batch.id, "blocks" => &batch.downloaded_blocks.len(), "awaiting_batches" => self.completed_batches.len());
|
||||
|
||||
// verify the range of received blocks
|
||||
// Note that the order of blocks is verified in block processing
|
||||
if let Some(last_slot) = batch.downloaded_blocks.last().map(|b| b.slot()) {
|
||||
// the batch is non-empty
|
||||
if batch.start_slot > batch.downloaded_blocks[0].slot() || batch.end_slot < last_slot {
|
||||
let first_slot = batch.downloaded_blocks[0].slot();
|
||||
if batch.start_slot > first_slot || batch.end_slot < last_slot {
|
||||
warn!(self.log, "BlocksByRange response returned out of range blocks";
|
||||
"response_initial_slot" => batch.downloaded_blocks[0].slot(),
|
||||
"response_initial_slot" => first_slot,
|
||||
"requested_initial_slot" => batch.start_slot);
|
||||
network.downvote_peer(batch.current_peer);
|
||||
self.to_be_processed_id = batch.id; // reset the id back to here, when incrementing, it will check against completed batches
|
||||
@ -218,7 +219,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
}
|
||||
|
||||
// Only process one batch at a time
|
||||
if self.current_processing_id.is_some() {
|
||||
if self.current_processing_batch.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -238,14 +239,14 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
}
|
||||
|
||||
/// Sends a batch to the batch processor.
|
||||
fn process_batch(&mut self, batch: Batch<T::EthSpec>) {
|
||||
// only spawn one instance at a time
|
||||
let processing_id: u64 = rand::random();
|
||||
self.current_processing_id = Some(processing_id);
|
||||
spawn_batch_processor(
|
||||
fn process_batch(&mut self, mut batch: Batch<T::EthSpec>) {
|
||||
let downloaded_blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new());
|
||||
let batch_id = ProcessId::RangeBatchId(batch.id.clone());
|
||||
self.current_processing_batch = Some(batch);
|
||||
spawn_block_processor(
|
||||
self.chain.clone(),
|
||||
processing_id,
|
||||
batch,
|
||||
batch_id,
|
||||
downloaded_blocks,
|
||||
self.sync_send.clone(),
|
||||
self.log.clone(),
|
||||
);
|
||||
@ -256,30 +257,41 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
||||
pub fn on_batch_process_result(
|
||||
&mut self,
|
||||
network: &mut SyncNetworkContext<T::EthSpec>,
|
||||
processing_id: u64,
|
||||
batch: &mut Option<Batch<T::EthSpec>>,
|
||||
batch_id: BatchId,
|
||||
downloaded_blocks: &mut Option<Vec<SignedBeaconBlock<T::EthSpec>>>,
|
||||
result: &BatchProcessResult,
|
||||
) -> Option<ProcessingResult> {
|
||||
if Some(processing_id) != self.current_processing_id {
|
||||
// batch process doesn't belong to this chain
|
||||
if let Some(current_batch) = &self.current_processing_batch {
|
||||
if current_batch.id != batch_id {
|
||||
// batch process does not belong to this chain
|
||||
return None;
|
||||
}
|
||||
// Continue. This is our processing request
|
||||
} else {
|
||||
// not waiting on a processing result
|
||||
return None;
|
||||
}
|
||||
|
||||
// Consume the batch option
|
||||
let batch = batch.take().or_else(|| {
|
||||
// claim the result by consuming the option
|
||||
let downloaded_blocks = downloaded_blocks.take().or_else(|| {
|
||||
// if taken by another chain, we are no longer waiting on a result.
|
||||
self.current_processing_batch = None;
|
||||
crit!(self.log, "Processed batch taken by another chain");
|
||||
None
|
||||
})?;
|
||||
|
||||
// No longer waiting on a processing result
|
||||
let mut batch = self.current_processing_batch.take().unwrap();
|
||||
// These are the blocks of this batch
|
||||
batch.downloaded_blocks = downloaded_blocks;
|
||||
|
||||
// double check batches are processed in order TODO: Remove for prod
|
||||
if batch.id != self.to_be_processed_id {
|
||||
crit!(self.log, "Batch processed out of order";
|
||||
"processed_batch_id" => *batch.id,
|
||||
"expected_id" => *self.to_be_processed_id);
|
||||
"processed_batch_id" => *batch.id,
|
||||
"expected_id" => *self.to_be_processed_id);
|
||||
}
|
||||
|
||||
self.current_processing_id = None;
|
||||
|
||||
let res = match result {
|
||||
BatchProcessResult::Success => {
|
||||
*self.to_be_processed_id += 1;
|
||||
|
@ -2,11 +2,10 @@
|
||||
//! peers.
|
||||
|
||||
mod batch;
|
||||
mod batch_processing;
|
||||
mod chain;
|
||||
mod chain_collection;
|
||||
mod range;
|
||||
|
||||
pub use batch::Batch;
|
||||
pub use batch_processing::BatchProcessResult;
|
||||
pub use batch::BatchId;
|
||||
pub use range::RangeSync;
|
||||
|
@ -41,8 +41,9 @@
|
||||
|
||||
use super::chain::ProcessingResult;
|
||||
use super::chain_collection::{ChainCollection, SyncState};
|
||||
use super::{Batch, BatchProcessResult};
|
||||
use super::BatchId;
|
||||
use crate::router::processor::PeerSyncInfo;
|
||||
use crate::sync::block_processor::BatchProcessResult;
|
||||
use crate::sync::manager::SyncMessage;
|
||||
use crate::sync::network_context::SyncNetworkContext;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
@ -130,8 +131,8 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
||||
},
|
||||
None => {
|
||||
return warn!(self.log,
|
||||
"Beacon chain dropped. Peer not considered for sync";
|
||||
"peer_id" => format!("{:?}", peer_id));
|
||||
"Beacon chain dropped. Peer not considered for sync";
|
||||
"peer_id" => format!("{:?}", peer_id));
|
||||
}
|
||||
};
|
||||
|
||||
@ -256,15 +257,15 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
||||
pub fn handle_block_process_result(
|
||||
&mut self,
|
||||
network: &mut SyncNetworkContext<T::EthSpec>,
|
||||
processing_id: u64,
|
||||
batch: Batch<T::EthSpec>,
|
||||
batch_id: BatchId,
|
||||
downloaded_blocks: Vec<SignedBeaconBlock<T::EthSpec>>,
|
||||
result: BatchProcessResult,
|
||||
) {
|
||||
// build an option for passing the batch to each chain
|
||||
let mut batch = Some(batch);
|
||||
// build an option for passing the downloaded_blocks to each chain
|
||||
let mut downloaded_blocks = Some(downloaded_blocks);
|
||||
|
||||
match self.chains.finalized_request(|chain| {
|
||||
chain.on_batch_process_result(network, processing_id, &mut batch, &result)
|
||||
chain.on_batch_process_result(network, batch_id, &mut downloaded_blocks, &result)
|
||||
}) {
|
||||
Some((index, ProcessingResult::RemoveChain)) => {
|
||||
let chain = self.chains.remove_finalized_chain(index);
|
||||
@ -293,7 +294,12 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
||||
Some((_, ProcessingResult::KeepChain)) => {}
|
||||
None => {
|
||||
match self.chains.head_request(|chain| {
|
||||
chain.on_batch_process_result(network, processing_id, &mut batch, &result)
|
||||
chain.on_batch_process_result(
|
||||
network,
|
||||
batch_id,
|
||||
&mut downloaded_blocks,
|
||||
&result,
|
||||
)
|
||||
}) {
|
||||
Some((index, ProcessingResult::RemoveChain)) => {
|
||||
let chain = self.chains.remove_head_chain(index);
|
||||
@ -308,7 +314,7 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
||||
None => {
|
||||
// This can happen if a chain gets purged due to being out of date whilst a
|
||||
// batch process is in progress.
|
||||
debug!(self.log, "No chains match the block processing id"; "id" => processing_id);
|
||||
debug!(self.log, "No chains match the block processing id"; "id" => *batch_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -438,7 +438,15 @@ pub fn get_genesis_time<T: BeaconChainTypes>(
|
||||
req: Request<Body>,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
) -> ApiResult {
|
||||
ResponseBuilder::new(&req)?.body(&beacon_chain.head()?.beacon_state.genesis_time)
|
||||
ResponseBuilder::new(&req)?.body(&beacon_chain.head_info()?.genesis_time)
|
||||
}
|
||||
|
||||
/// Read the `genesis_validators_root` from the current beacon chain state.
|
||||
pub fn get_genesis_validators_root<T: BeaconChainTypes>(
|
||||
req: Request<Body>,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
) -> ApiResult {
|
||||
ResponseBuilder::new(&req)?.body(&beacon_chain.head_info()?.genesis_validators_root)
|
||||
}
|
||||
|
||||
pub fn proposer_slashing<T: BeaconChainTypes>(
|
||||
|
@ -4,11 +4,12 @@ use crate::{ApiError, ApiResult, BoxFut, UrlQuery};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use futures::{Future, Stream};
|
||||
use hyper::{Body, Request};
|
||||
use rest_types::{IndividualVotesRequest, IndividualVotesResponse};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatus, ValidatorStatuses};
|
||||
use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatuses};
|
||||
use std::sync::Arc;
|
||||
use types::{Epoch, EthSpec, PublicKeyBytes};
|
||||
use types::EthSpec;
|
||||
|
||||
/// The results of validators voting during an epoch.
|
||||
///
|
||||
@ -37,13 +38,13 @@ pub struct VoteCount {
|
||||
impl Into<VoteCount> for TotalBalances {
|
||||
fn into(self) -> VoteCount {
|
||||
VoteCount {
|
||||
current_epoch_active_gwei: self.current_epoch,
|
||||
previous_epoch_active_gwei: self.previous_epoch,
|
||||
current_epoch_attesting_gwei: self.current_epoch_attesters,
|
||||
current_epoch_target_attesting_gwei: self.current_epoch_target_attesters,
|
||||
previous_epoch_attesting_gwei: self.previous_epoch_attesters,
|
||||
previous_epoch_target_attesting_gwei: self.previous_epoch_target_attesters,
|
||||
previous_epoch_head_attesting_gwei: self.previous_epoch_head_attesters,
|
||||
current_epoch_active_gwei: self.current_epoch(),
|
||||
previous_epoch_active_gwei: self.previous_epoch(),
|
||||
current_epoch_attesting_gwei: self.current_epoch_attesters(),
|
||||
current_epoch_target_attesting_gwei: self.current_epoch_target_attesters(),
|
||||
previous_epoch_attesting_gwei: self.previous_epoch_attesters(),
|
||||
previous_epoch_target_attesting_gwei: self.previous_epoch_target_attesters(),
|
||||
previous_epoch_head_attesting_gwei: self.previous_epoch_head_attesters(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,68 +71,6 @@ pub fn get_vote_count<T: BeaconChainTypes>(
|
||||
ResponseBuilder::new(&req)?.body(&report)
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
|
||||
pub struct IndividualVotesRequest {
|
||||
pub epoch: Epoch,
|
||||
pub pubkeys: Vec<PublicKeyBytes>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
|
||||
pub struct IndividualVote {
|
||||
/// True if the validator has been slashed, ever.
|
||||
pub is_slashed: bool,
|
||||
/// True if the validator can withdraw in the current epoch.
|
||||
pub is_withdrawable_in_current_epoch: bool,
|
||||
/// True if the validator was active in the state's _current_ epoch.
|
||||
pub is_active_in_current_epoch: bool,
|
||||
/// True if the validator was active in the state's _previous_ epoch.
|
||||
pub is_active_in_previous_epoch: bool,
|
||||
/// The validator's effective balance in the _current_ epoch.
|
||||
pub current_epoch_effective_balance_gwei: u64,
|
||||
/// True if the validator had an attestation included in the _current_ epoch.
|
||||
pub is_current_epoch_attester: bool,
|
||||
/// True if the validator's beacon block root attestation for the first slot of the _current_
|
||||
/// epoch matches the block root known to the state.
|
||||
pub is_current_epoch_target_attester: bool,
|
||||
/// True if the validator had an attestation included in the _previous_ epoch.
|
||||
pub is_previous_epoch_attester: bool,
|
||||
/// True if the validator's beacon block root attestation for the first slot of the _previous_
|
||||
/// epoch matches the block root known to the state.
|
||||
pub is_previous_epoch_target_attester: bool,
|
||||
/// True if the validator's beacon block root attestation in the _previous_ epoch at the
|
||||
/// attestation's slot (`attestation_data.slot`) matches the block root known to the state.
|
||||
pub is_previous_epoch_head_attester: bool,
|
||||
}
|
||||
|
||||
impl Into<IndividualVote> for ValidatorStatus {
|
||||
fn into(self) -> IndividualVote {
|
||||
IndividualVote {
|
||||
is_slashed: self.is_slashed,
|
||||
is_withdrawable_in_current_epoch: self.is_withdrawable_in_current_epoch,
|
||||
is_active_in_current_epoch: self.is_active_in_current_epoch,
|
||||
is_active_in_previous_epoch: self.is_active_in_previous_epoch,
|
||||
current_epoch_effective_balance_gwei: self.current_epoch_effective_balance,
|
||||
is_current_epoch_attester: self.is_current_epoch_attester,
|
||||
is_current_epoch_target_attester: self.is_current_epoch_target_attester,
|
||||
is_previous_epoch_attester: self.is_previous_epoch_attester,
|
||||
is_previous_epoch_target_attester: self.is_previous_epoch_target_attester,
|
||||
is_previous_epoch_head_attester: self.is_previous_epoch_head_attester,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
|
||||
pub struct IndividualVotesResponse {
|
||||
/// The epoch which is considered the "current" epoch.
|
||||
pub epoch: Epoch,
|
||||
/// The validators public key.
|
||||
pub pubkey: PublicKeyBytes,
|
||||
/// The index of the validator in state.validators.
|
||||
pub validator_index: Option<usize>,
|
||||
/// Voting statistics for the validator, if they voted in the given epoch.
|
||||
pub vote: Option<IndividualVote>,
|
||||
}
|
||||
|
||||
pub fn post_individual_votes<T: BeaconChainTypes>(
|
||||
req: Request<Body>,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
@ -156,12 +95,16 @@ pub fn post_individual_votes<T: BeaconChainTypes>(
|
||||
// This is the last slot of the given epoch (one prior to the first slot of the next epoch).
|
||||
let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
||||
|
||||
let (_root, state) = state_at_slot(&beacon_chain, target_slot)?;
|
||||
let (_root, mut state) = state_at_slot(&beacon_chain, target_slot)?;
|
||||
let spec = &beacon_chain.spec;
|
||||
|
||||
let mut validator_statuses = ValidatorStatuses::new(&state, spec)?;
|
||||
validator_statuses.process_attestations(&state, spec)?;
|
||||
|
||||
state.update_pubkey_cache().map_err(|e| {
|
||||
ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e))
|
||||
})?;
|
||||
|
||||
body.pubkeys
|
||||
.into_iter()
|
||||
.map(|pubkey| {
|
||||
|
@ -82,6 +82,9 @@ pub fn route<T: BeaconChainTypes>(
|
||||
(&Method::GET, "/beacon/genesis_time") => {
|
||||
into_boxfut(beacon::get_genesis_time::<T>(req, beacon_chain))
|
||||
}
|
||||
(&Method::GET, "/beacon/genesis_validators_root") => {
|
||||
into_boxfut(beacon::get_genesis_validators_root::<T>(req, beacon_chain))
|
||||
}
|
||||
(&Method::GET, "/beacon/validators") => {
|
||||
into_boxfut(beacon::get_validators::<T>(req, beacon_chain))
|
||||
}
|
||||
|
@ -209,19 +209,12 @@ fn return_validator_duties<T: BeaconChainTypes>(
|
||||
// The `beacon_chain` can return a validator index that does not exist in all states.
|
||||
// Therefore, we must check to ensure that the validator index is valid for our
|
||||
// `state`.
|
||||
let validator_index = if let Some(i) = beacon_chain
|
||||
let validator_index = beacon_chain
|
||||
.validator_index(&validator_pubkey)
|
||||
.map_err(|e| {
|
||||
ApiError::ServerError(format!("Unable to get validator index: {:?}", e))
|
||||
})? {
|
||||
if i < state.validators.len() {
|
||||
Some(i)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
ApiError::ServerError(format!("Unable to get validator index: {:?}", e))
|
||||
})?
|
||||
.filter(|i| *i < state.validators.len());
|
||||
|
||||
if let Some(validator_index) = validator_index {
|
||||
let duties = state
|
||||
@ -554,6 +547,7 @@ pub fn publish_aggregate_and_proofs<T: BeaconChainTypes>(
|
||||
// TODO: More efficient way of getting a fork?
|
||||
let fork = &beacon_chain.head()?.beacon_state.fork;
|
||||
|
||||
// TODO: Update to shift this task to dedicated task using await
|
||||
signed_proofs.par_iter().try_for_each(|signed_proof| {
|
||||
let agg_proof = &signed_proof.message;
|
||||
let validator_pubkey = &beacon_chain.validator_pubkey(agg_proof.aggregator_index as usize)?.ok_or_else(|| {
|
||||
@ -573,7 +567,7 @@ pub fn publish_aggregate_and_proofs<T: BeaconChainTypes>(
|
||||
* I (Paul H) will pick this up in a future PR.
|
||||
*/
|
||||
|
||||
if signed_proof.is_valid(validator_pubkey, fork, &beacon_chain.spec) {
|
||||
if signed_proof.is_valid(validator_pubkey, fork, beacon_chain.genesis_validators_root, &beacon_chain.spec) {
|
||||
let attestation = &agg_proof.aggregate;
|
||||
|
||||
match beacon_chain.process_attestation(attestation.clone(), AttestationType::Aggregated) {
|
||||
|
@ -48,17 +48,15 @@ fn get_randao_reveal<T: BeaconChainTypes>(
|
||||
slot: Slot,
|
||||
spec: &ChainSpec,
|
||||
) -> Signature {
|
||||
let fork = beacon_chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_state
|
||||
.fork;
|
||||
let head = beacon_chain.head().expect("should get head");
|
||||
let fork = head.beacon_state.fork;
|
||||
let genesis_validators_root = head.beacon_state.genesis_validators_root;
|
||||
let proposer_index = beacon_chain
|
||||
.block_proposer(slot)
|
||||
.expect("should get proposer index");
|
||||
let keypair = generate_deterministic_keypair(proposer_index);
|
||||
let epoch = slot.epoch(E::slots_per_epoch());
|
||||
let domain = spec.get_domain(epoch, Domain::Randao, &fork);
|
||||
let domain = spec.get_domain(epoch, Domain::Randao, &fork, genesis_validators_root);
|
||||
let message = epoch.signing_root(domain);
|
||||
Signature::new(message.as_bytes(), &keypair.sk)
|
||||
}
|
||||
@ -69,16 +67,14 @@ fn sign_block<T: BeaconChainTypes>(
|
||||
block: BeaconBlock<T::EthSpec>,
|
||||
spec: &ChainSpec,
|
||||
) -> SignedBeaconBlock<T::EthSpec> {
|
||||
let fork = beacon_chain
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_state
|
||||
.fork;
|
||||
let head = beacon_chain.head().expect("should get head");
|
||||
let fork = head.beacon_state.fork;
|
||||
let genesis_validators_root = head.beacon_state.genesis_validators_root;
|
||||
let proposer_index = beacon_chain
|
||||
.block_proposer(block.slot)
|
||||
.expect("should get proposer index");
|
||||
let keypair = generate_deterministic_keypair(proposer_index);
|
||||
block.sign(&keypair.sk, &fork, spec)
|
||||
block.sign(&keypair.sk, &fork, genesis_validators_root, spec)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -94,6 +90,7 @@ fn validator_produce_attestation() {
|
||||
.client
|
||||
.beacon_chain()
|
||||
.expect("client should have beacon chain");
|
||||
let genesis_validators_root = beacon_chain.genesis_validators_root;
|
||||
let state = beacon_chain.head().expect("should get head").beacon_state;
|
||||
|
||||
let validator_index = 0;
|
||||
@ -192,6 +189,7 @@ fn validator_produce_attestation() {
|
||||
.attestation_committee_position
|
||||
.expect("should have committee position"),
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
.expect("should sign attestation");
|
||||
@ -228,6 +226,7 @@ fn validator_produce_attestation() {
|
||||
aggregated_attestation,
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
@ -635,6 +634,31 @@ fn genesis_time() {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn genesis_validators_root() {
|
||||
let mut env = build_env();
|
||||
|
||||
let node = build_node(&mut env, testing_client_config());
|
||||
let remote_node = node.remote_node().expect("should produce remote node");
|
||||
|
||||
let genesis_validators_root = env
|
||||
.runtime()
|
||||
.block_on(remote_node.http.beacon().get_genesis_validators_root())
|
||||
.expect("should fetch genesis time from http api");
|
||||
|
||||
assert_eq!(
|
||||
node.client
|
||||
.beacon_chain()
|
||||
.expect("should have beacon chain")
|
||||
.head()
|
||||
.expect("should get head")
|
||||
.beacon_state
|
||||
.genesis_validators_root,
|
||||
genesis_validators_root,
|
||||
"should match genesis time from head state"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fork() {
|
||||
let mut env = build_env();
|
||||
@ -974,6 +998,7 @@ fn proposer_slashing() {
|
||||
proposer_index as u64,
|
||||
&key,
|
||||
fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
@ -998,6 +1023,7 @@ fn proposer_slashing() {
|
||||
proposer_index as u64,
|
||||
&key,
|
||||
fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
invalid_proposer_slashing.signed_header_2 = invalid_proposer_slashing.signed_header_1.clone();
|
||||
@ -1052,6 +1078,7 @@ fn attester_slashing() {
|
||||
&validator_indices[..],
|
||||
&secret_keys[..],
|
||||
fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
@ -1077,6 +1104,7 @@ fn attester_slashing() {
|
||||
&validator_indices[..],
|
||||
&secret_keys[..],
|
||||
fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
invalid_attester_slashing.attestation_2 = invalid_attester_slashing.attestation_1.clone();
|
||||
|
@ -218,7 +218,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
Arg::with_name("eth1-endpoint")
|
||||
.long("eth1-endpoint")
|
||||
.value_name("HTTP-ENDPOINT")
|
||||
.help("Specifies the server for a web3 connection to the Eth1 chain.")
|
||||
.help("Specifies the server for a web3 connection to the Eth1 chain. Also enables the --eth1 flag.")
|
||||
.takes_value(true)
|
||||
.default_value("http://127.0.0.1:8545")
|
||||
)
|
||||
@ -339,14 +339,5 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.required(true)
|
||||
.help("A file from which to read the state"))
|
||||
)
|
||||
/*
|
||||
* `prysm`
|
||||
*
|
||||
* Connect to the Prysmatic Labs testnet.
|
||||
*/
|
||||
.subcommand(SubCommand::with_name("prysm")
|
||||
.about("Connect to the Prysmatic Labs testnet on Goerli. Not guaranteed to be \
|
||||
up-to-date or functioning.")
|
||||
)
|
||||
)
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
use clap::ArgMatches;
|
||||
use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis, Eth2Config};
|
||||
use environment::ETH2_CONFIG_FILENAME;
|
||||
use eth2_config::{read_from_file, write_to_file};
|
||||
use eth2_libp2p::{Enr, Multiaddr};
|
||||
use eth2_testnet_config::Eth2TestnetConfig;
|
||||
@ -14,14 +15,12 @@ use std::path::PathBuf;
|
||||
use types::EthSpec;
|
||||
|
||||
pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml";
|
||||
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
|
||||
pub const BEACON_NODE_DIR: &str = "beacon";
|
||||
pub const NETWORK_DIR: &str = "network";
|
||||
|
||||
type Result<T> = std::result::Result<T, String>;
|
||||
type Config = (ClientConfig, Eth2Config, Logger);
|
||||
|
||||
/// Gets the fully-initialized global client and eth2 configuration objects.
|
||||
/// Gets the fully-initialized global client.
|
||||
///
|
||||
/// The top-level `clap` arguments should be provided as `cli_args`.
|
||||
///
|
||||
@ -29,26 +28,17 @@ type Config = (ClientConfig, Eth2Config, Logger);
|
||||
/// may be influenced by other external services like the contents of the file system or the
|
||||
/// response of some remote server.
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
pub fn get_configs<E: EthSpec>(
|
||||
pub fn get_config<E: EthSpec>(
|
||||
cli_args: &ArgMatches,
|
||||
mut eth2_config: Eth2Config,
|
||||
eth2_config: Eth2Config,
|
||||
core_log: Logger,
|
||||
) -> Result<Config> {
|
||||
) -> Result<ClientConfig> {
|
||||
let log = core_log.clone();
|
||||
|
||||
let mut client_config = ClientConfig::default();
|
||||
|
||||
client_config.spec_constants = eth2_config.spec_constants.clone();
|
||||
|
||||
// Read the `--datadir` flag.
|
||||
//
|
||||
// If it's not present, try and find the home directory (`~`) and push the default data
|
||||
// directory onto it.
|
||||
client_config.data_dir = cli_args
|
||||
.value_of("datadir")
|
||||
.map(|path| PathBuf::from(path).join(BEACON_NODE_DIR))
|
||||
.or_else(|| dirs::home_dir().map(|home| home.join(DEFAULT_DATADIR).join(BEACON_NODE_DIR)))
|
||||
.unwrap_or_else(|| PathBuf::from("."));
|
||||
client_config.data_dir = get_data_dir(cli_args);
|
||||
|
||||
// Load the client config, if it exists .
|
||||
let path = client_config.data_dir.join(CLIENT_CONFIG_FILENAME);
|
||||
@ -58,32 +48,7 @@ pub fn get_configs<E: EthSpec>(
|
||||
.ok_or_else(|| format!("{:?} file does not exist", path))?;
|
||||
}
|
||||
|
||||
// Load the eth2 config, if it exists .
|
||||
let path = client_config.data_dir.join(ETH2_CONFIG_FILENAME);
|
||||
if path.exists() {
|
||||
let loaded_eth2_config: Eth2Config = read_from_file(path.clone())
|
||||
.map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))?
|
||||
.ok_or_else(|| format!("{:?} file does not exist", path))?;
|
||||
|
||||
// The loaded spec must be using the same spec constants (e.g., minimal, mainnet) as the
|
||||
// client expects.
|
||||
if loaded_eth2_config.spec_constants == client_config.spec_constants {
|
||||
eth2_config = loaded_eth2_config
|
||||
} else {
|
||||
return Err(
|
||||
format!(
|
||||
"Eth2 config loaded from disk does not match client spec version. Got {} expected {}",
|
||||
&loaded_eth2_config.spec_constants,
|
||||
&client_config.spec_constants
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Read the `--testnet-dir` flag.
|
||||
if let Some(val) = cli_args.value_of("testnet-dir") {
|
||||
client_config.testnet_dir = Some(PathBuf::from(val));
|
||||
}
|
||||
client_config.testnet_dir = get_testnet_dir(cli_args);
|
||||
|
||||
/*
|
||||
* Networking
|
||||
@ -251,12 +216,13 @@ pub fn get_configs<E: EthSpec>(
|
||||
|
||||
// Defines the URL to reach the eth1 node.
|
||||
if let Some(val) = cli_args.value_of("eth1-endpoint") {
|
||||
client_config.sync_eth1_chain = true;
|
||||
client_config.eth1.endpoint = val.to_string();
|
||||
}
|
||||
|
||||
match cli_args.subcommand() {
|
||||
("testnet", Some(sub_cmd_args)) => {
|
||||
process_testnet_subcommand(&mut client_config, &mut eth2_config, sub_cmd_args)?
|
||||
process_testnet_subcommand(&mut client_config, ð2_config, sub_cmd_args)?
|
||||
}
|
||||
// No sub-command assumes a resume operation.
|
||||
_ => {
|
||||
@ -272,7 +238,7 @@ pub fn get_configs<E: EthSpec>(
|
||||
"Starting from an empty database";
|
||||
"data_dir" => format!("{:?}", client_config.data_dir)
|
||||
);
|
||||
init_new_client::<E>(&mut client_config, &mut eth2_config)?
|
||||
init_new_client::<E>(&mut client_config, ð2_config)?
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
@ -342,7 +308,42 @@ pub fn get_configs<E: EthSpec>(
|
||||
client_config.websocket_server.port = 0;
|
||||
}
|
||||
|
||||
Ok((client_config, eth2_config, log))
|
||||
Ok(client_config)
|
||||
}
|
||||
|
||||
/// Gets the datadir which should be used.
|
||||
pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf {
|
||||
// Read the `--datadir` flag.
|
||||
//
|
||||
// If it's not present, try and find the home directory (`~`) and push the default data
|
||||
// directory onto it.
|
||||
cli_args
|
||||
.value_of("datadir")
|
||||
.map(|path| PathBuf::from(path).join(BEACON_NODE_DIR))
|
||||
.or_else(|| dirs::home_dir().map(|home| home.join(DEFAULT_DATADIR).join(BEACON_NODE_DIR)))
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
}
|
||||
|
||||
/// Gets the testnet dir which should be used.
|
||||
pub fn get_testnet_dir(cli_args: &ArgMatches) -> Option<PathBuf> {
|
||||
// Read the `--testnet-dir` flag.
|
||||
if let Some(val) = cli_args.value_of("testnet-dir") {
|
||||
Some(PathBuf::from(val))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_eth2_testnet_config<E: EthSpec>(
|
||||
testnet_dir: &Option<PathBuf>,
|
||||
) -> Result<Eth2TestnetConfig<E>> {
|
||||
Ok(if let Some(testnet_dir) = testnet_dir {
|
||||
Eth2TestnetConfig::load(testnet_dir.clone())
|
||||
.map_err(|e| format!("Unable to open testnet dir at {:?}: {}", testnet_dir, e))?
|
||||
} else {
|
||||
Eth2TestnetConfig::hard_coded()
|
||||
.map_err(|e| format!("Unable to load hard-coded testnet dir: {}", e))?
|
||||
})
|
||||
}
|
||||
|
||||
/// Load from an existing database.
|
||||
@ -377,37 +378,17 @@ fn load_from_datadir(client_config: &mut ClientConfig) -> Result<()> {
|
||||
/// Create a new client with the default configuration.
|
||||
fn init_new_client<E: EthSpec>(
|
||||
client_config: &mut ClientConfig,
|
||||
eth2_config: &mut Eth2Config,
|
||||
eth2_config: &Eth2Config,
|
||||
) -> Result<()> {
|
||||
let eth2_testnet_config: Eth2TestnetConfig<E> =
|
||||
if let Some(testnet_dir) = &client_config.testnet_dir {
|
||||
Eth2TestnetConfig::load(testnet_dir.clone())
|
||||
.map_err(|e| format!("Unable to open testnet dir at {:?}: {}", testnet_dir, e))?
|
||||
} else {
|
||||
Eth2TestnetConfig::hard_coded()
|
||||
.map_err(|e| format!("Unable to load hard-coded testnet dir: {}", e))?
|
||||
};
|
||||
|
||||
eth2_config.spec = eth2_testnet_config
|
||||
.yaml_config
|
||||
.as_ref()
|
||||
.ok_or_else(|| "The testnet directory must contain a spec config".to_string())?
|
||||
.apply_to_chain_spec::<E>(ð2_config.spec)
|
||||
.ok_or_else(|| {
|
||||
format!(
|
||||
"The loaded config is not compatible with the {} spec",
|
||||
ð2_config.spec_constants
|
||||
)
|
||||
})?;
|
||||
|
||||
let spec = &mut eth2_config.spec;
|
||||
get_eth2_testnet_config(&client_config.testnet_dir)?;
|
||||
|
||||
client_config.eth1.deposit_contract_address =
|
||||
format!("{:?}", eth2_testnet_config.deposit_contract_address()?);
|
||||
client_config.eth1.deposit_contract_deploy_block =
|
||||
eth2_testnet_config.deposit_contract_deploy_block;
|
||||
|
||||
client_config.eth1.follow_distance = spec.eth1_follow_distance / 2;
|
||||
client_config.eth1.follow_distance = eth2_config.spec.eth1_follow_distance / 2;
|
||||
client_config.eth1.lowest_cached_block_number = client_config
|
||||
.eth1
|
||||
.deposit_contract_deploy_block
|
||||
@ -470,7 +451,7 @@ pub fn create_new_datadir(client_config: &ClientConfig, eth2_config: &Eth2Config
|
||||
/// Process the `testnet` CLI subcommand arguments, updating the `builder`.
|
||||
fn process_testnet_subcommand(
|
||||
client_config: &mut ClientConfig,
|
||||
eth2_config: &mut Eth2Config,
|
||||
eth2_config: &Eth2Config,
|
||||
cli_args: &ArgMatches,
|
||||
) -> Result<()> {
|
||||
// Specifies that a random datadir should be used.
|
||||
@ -501,15 +482,6 @@ fn process_testnet_subcommand(
|
||||
client_config.network.propagation_percentage = Some(percentage);
|
||||
}
|
||||
|
||||
// Modify the `SECONDS_PER_SLOT` "constant".
|
||||
if let Some(slot_time) = cli_args.value_of("slot-time") {
|
||||
let slot_time = slot_time
|
||||
.parse::<u64>()
|
||||
.map_err(|e| format!("Unable to parse slot-time: {:?}", e))?;
|
||||
|
||||
eth2_config.spec.milliseconds_per_slot = slot_time;
|
||||
}
|
||||
|
||||
// Start matching on the second subcommand (e.g., `testnet bootstrap ...`).
|
||||
match cli_args.subcommand() {
|
||||
("recent", Some(cli_args)) => {
|
||||
@ -570,24 +542,6 @@ fn process_testnet_subcommand(
|
||||
|
||||
client_config.genesis = start_method;
|
||||
}
|
||||
("prysm", Some(_)) => {
|
||||
let mut spec = &mut eth2_config.spec;
|
||||
|
||||
spec.min_deposit_amount = 100;
|
||||
spec.max_effective_balance = 3_200_000_000;
|
||||
spec.ejection_balance = 1_600_000_000;
|
||||
spec.effective_balance_increment = 100_000_000;
|
||||
spec.min_genesis_time = 0;
|
||||
spec.genesis_fork_version = [0, 0, 0, 2];
|
||||
|
||||
client_config.eth1.deposit_contract_address =
|
||||
"0x802dF6aAaCe28B2EEb1656bb18dF430dDC42cc2e".to_string();
|
||||
client_config.eth1.deposit_contract_deploy_block = 1_487_270;
|
||||
client_config.eth1.follow_distance = 16;
|
||||
client_config.dummy_eth1_backend = false;
|
||||
|
||||
client_config.genesis = ClientGenesis::DepositContract;
|
||||
}
|
||||
(cmd, Some(_)) => {
|
||||
return Err(format!(
|
||||
"Invalid valid method specified: {}. See 'testnet --help'.",
|
||||
|
@ -7,6 +7,7 @@ mod config;
|
||||
pub use beacon_chain;
|
||||
pub use cli::cli_app;
|
||||
pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis};
|
||||
pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir};
|
||||
pub use eth2_config::Eth2Config;
|
||||
|
||||
use beacon_chain::{
|
||||
@ -14,7 +15,7 @@ use beacon_chain::{
|
||||
slot_clock::SystemTimeSlotClock,
|
||||
};
|
||||
use clap::ArgMatches;
|
||||
use config::get_configs;
|
||||
use config::get_config;
|
||||
use environment::RuntimeContext;
|
||||
use futures::{Future, IntoFuture};
|
||||
use slog::{info, warn};
|
||||
@ -51,20 +52,12 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
||||
/// given `matches` and potentially configuration files on the local filesystem or other
|
||||
/// configurations hosted remotely.
|
||||
pub fn new_from_cli<'a, 'b>(
|
||||
mut context: RuntimeContext<E>,
|
||||
context: RuntimeContext<E>,
|
||||
matches: &ArgMatches<'b>,
|
||||
) -> impl Future<Item = Self, Error = String> + 'a {
|
||||
let log = context.log.clone();
|
||||
|
||||
// TODO: the eth2 config in the env is being modified.
|
||||
//
|
||||
// See https://github.com/sigp/lighthouse/issues/602
|
||||
get_configs::<E>(&matches, context.eth2_config.clone(), log)
|
||||
get_config::<E>(&matches, context.eth2_config.clone(), context.log.clone())
|
||||
.into_future()
|
||||
.and_then(move |(client_config, eth2_config, _log)| {
|
||||
context.eth2_config = eth2_config;
|
||||
Self::new(context, client_config)
|
||||
})
|
||||
.and_then(move |client_config| Self::new(context, client_config))
|
||||
}
|
||||
|
||||
/// Starts a new beacon node `Client` in the given `environment`.
|
||||
|
@ -10,7 +10,7 @@ use crate::{
|
||||
leveldb_store::LevelDB, DBColumn, Error, PartialBeaconState, SimpleStoreItem, Store, StoreItem,
|
||||
};
|
||||
use lru::LruCache;
|
||||
use parking_lot::RwLock;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use slog::{debug, trace, warn, Logger};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
@ -45,7 +45,7 @@ pub struct HotColdDB<E: EthSpec> {
|
||||
/// The hot database also contains all blocks.
|
||||
pub(crate) hot_db: LevelDB<E>,
|
||||
/// LRU cache of deserialized blocks. Updated whenever a block is loaded.
|
||||
block_cache: RwLock<LruCache<Hash256, SignedBeaconBlock<E>>>,
|
||||
block_cache: Mutex<LruCache<Hash256, SignedBeaconBlock<E>>>,
|
||||
/// Chain spec.
|
||||
spec: ChainSpec,
|
||||
/// Logger.
|
||||
@ -109,7 +109,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
||||
self.put(block_root, &block)?;
|
||||
|
||||
// Update cache.
|
||||
self.block_cache.write().put(*block_root, block);
|
||||
self.block_cache.lock().put(*block_root, block);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -119,7 +119,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
||||
metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT);
|
||||
|
||||
// Check the cache.
|
||||
if let Some(block) = self.block_cache.write().get(block_root) {
|
||||
if let Some(block) = self.block_cache.lock().get(block_root) {
|
||||
metrics::inc_counter(&metrics::BEACON_BLOCK_CACHE_HIT_COUNT);
|
||||
return Ok(Some(block.clone()));
|
||||
}
|
||||
@ -128,7 +128,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
||||
match self.get::<SignedBeaconBlock<E>>(block_root)? {
|
||||
Some(block) => {
|
||||
// Add to cache.
|
||||
self.block_cache.write().put(*block_root, block.clone());
|
||||
self.block_cache.lock().put(*block_root, block.clone());
|
||||
Ok(Some(block))
|
||||
}
|
||||
None => Ok(None),
|
||||
@ -137,7 +137,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
||||
|
||||
/// Delete a block from the store and the block cache.
|
||||
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
|
||||
self.block_cache.write().pop(block_root);
|
||||
self.block_cache.lock().pop(block_root);
|
||||
self.delete::<SignedBeaconBlock<E>>(block_root)
|
||||
}
|
||||
|
||||
@ -338,7 +338,7 @@ impl<E: EthSpec> HotColdDB<E> {
|
||||
split: RwLock::new(Split::default()),
|
||||
cold_db: LevelDB::open(cold_path)?,
|
||||
hot_db: LevelDB::open(hot_path)?,
|
||||
block_cache: RwLock::new(LruCache::new(config.block_cache_size)),
|
||||
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
|
||||
config,
|
||||
spec,
|
||||
log,
|
||||
|
@ -11,7 +11,7 @@ use types::*;
|
||||
///
|
||||
/// Utilises lazy-loading from separate storage for its vector fields.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Encode, Decode)]
|
||||
pub struct PartialBeaconState<T>
|
||||
where
|
||||
@ -19,6 +19,7 @@ where
|
||||
{
|
||||
// Versioning
|
||||
pub genesis_time: u64,
|
||||
pub genesis_validators_root: Hash256,
|
||||
pub slot: Slot,
|
||||
pub fork: Fork,
|
||||
|
||||
@ -72,6 +73,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
||||
// TODO: could use references/Cow for fields to avoid cloning
|
||||
PartialBeaconState {
|
||||
genesis_time: s.genesis_time,
|
||||
genesis_validators_root: s.genesis_validators_root,
|
||||
slot: s.slot,
|
||||
fork: s.fork.clone(),
|
||||
|
||||
@ -181,6 +183,7 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
||||
|
||||
Ok(BeaconState {
|
||||
genesis_time: self.genesis_time,
|
||||
genesis_validators_root: self.genesis_validators_root,
|
||||
slot: self.slot,
|
||||
fork: self.fork,
|
||||
|
||||
|
@ -22,3 +22,4 @@
|
||||
* [Database Configuration](./advanced_database.md)
|
||||
* [Contributing](./contributing.md)
|
||||
* [Development Environment](./setup.md)
|
||||
* [FAQs](./faq.md)
|
||||
|
@ -12,18 +12,28 @@ binary yourself.
|
||||
> experience with docker-compose to integrate your locally built docker image
|
||||
> with the docker-compose environment.
|
||||
|
||||
### 1. Clone the repository
|
||||
## 0. Install Docker Compose
|
||||
|
||||
Once you have docker-compose
|
||||
[installed](https://docs.docker.com/compose/install/), clone the
|
||||
[sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repository.
|
||||
Docker Compose relies on Docker Engine for any meaningful work, so make sure you have Docker Engine installed either locally or remote, depending on your setup.
|
||||
|
||||
- On desktop systems like [Docker Desktop for Mac](https://docs.docker.com/docker-for-mac/install/) and [Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/install/), Docker Compose is included as part of those desktop installs, so the desktop install is all you need.
|
||||
|
||||
- On Linux systems, you'll need to first [install the Docker for your OS](https://docs.docker.com/install/#server) and then [follow the instuctions here](https://docs.docker.com/compose/install/#install-compose-on-linux-systems).
|
||||
|
||||
> For more on installing Compose, see [here](https://docs.docker.com/compose/install/).
|
||||
|
||||
|
||||
## 1. Clone the repository
|
||||
|
||||
Once you have Docker Compose installed, clone the
|
||||
[sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repository:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/sigp/lighthouse-docker
|
||||
$ cd lighthouse-docker
|
||||
git clone https://github.com/sigp/lighthouse-docker
|
||||
cd lighthouse-docker
|
||||
```
|
||||
|
||||
### 2. Configure the docker environment
|
||||
## 2. Configure the Docker environment
|
||||
|
||||
Then, create a file named `.env` with the following contents (these values are
|
||||
documented
|
||||
@ -41,36 +51,58 @@ DEPOSIT_VALUE=3200000000
|
||||
_This `.env` file should live in the `lighthouse-docker` directory alongside the
|
||||
`docker-compose.yml` file_.
|
||||
|
||||
### 3. Start Lighthouse
|
||||
## 3. Start Lighthouse
|
||||
|
||||
Start the docker-compose environment (you may need to use `sudo`):
|
||||
Start the docker-compose environment (you may need to prefix the below command with `sudo`):
|
||||
|
||||
```bash
|
||||
$ docker-compose up
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
Watch the output of this command for the `Saved new validator to disk` log, as
|
||||
the `voting_pubkey` is the primary identifier for your new validator. This is
|
||||
useful for finding your validator in block explorers. Here's an example of the
|
||||
log:
|
||||
it contains your `voting_pubkey` -- the primary identifier for your new validator. This key is useful for finding your validator in block explorers. Here's an example of the log:
|
||||
|
||||
```bash
|
||||
validator_client_1 | Jan 10 12:06:05.632 INFO Saved new validator to disk voting_pubkey: 0x8fc28504448783b10b0a7f5a321505b07ad2ad8d6a8430b8868a0fcdedee43766bee725855506626085776e020dfa472
|
||||
validator_client_1 | Jan 10 12:06:05.632 INFO Saved new validator to disk
|
||||
voting_pubkey: 0x8fc28504448783b10b0a7f5a321505b07ad2ad8d6a8430b8868a0fcdedee43766bee725855506626085776e020dfa472
|
||||
```
|
||||
This is one of the first logs outputted, so you may have to scroll up or perform a search in your terminal to find it.
|
||||
|
||||
> Note: the docker-compose setup includes a fast-synced geth node. You can
|
||||
> Note: `docker-compose up` generates a new sub-directory -- to store your validator's deposit data, along with its voting and withdrawal keys -- in the `.lighthouse/validators` directory. This sub-directory is identified by your validator's `voting_pubkey` (the same `voting_pubkey` you see in the logs). So this is another way you can find it.
|
||||
|
||||
> Note: the docker-compose setup includes a fast-synced geth node. So you can
|
||||
> expect the `beacon_node` to log some eth1-related errors whilst the geth node
|
||||
> boots and becomes synced. This will only happen on the first start of the
|
||||
> compose environment or if geth loses sync.
|
||||
|
||||
### Installation complete!
|
||||
To find an estimate for how long your beacon node will take to finish syncing, look for logs that look like this:
|
||||
|
||||
In the next step you'll need to locate your `eth1_deposit_data.rlp` file from
|
||||
your `.lighthouse/validators` directory.
|
||||
```bash
|
||||
beacon_node_1 | Mar 16 11:33:53.979 INFO Syncing
|
||||
est_time: 47 mins, speed: 16.67 slots/sec, distance: 47296 slots (7 days 14 hrs), peers: 3, service: slot_notifier
|
||||
```
|
||||
|
||||
The `./lighthouse` directory is in the root of the `lighthouse-docker`
|
||||
repository. For example, if you ran Step 1 in `/home/karlm/` then you can find
|
||||
your validator directory in
|
||||
`/home/karlm/lighthouse-docker/.lighthouse/validators/`.
|
||||
You'll find the estimated time under `est_time`. In the example above, that's `47 mins`.
|
||||
|
||||
You can now go to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli).
|
||||
If your beacon node hasn't finished syncing yet, you'll see some ERRO messages indicating that your node hasn't synced yet:
|
||||
|
||||
```bash
|
||||
validator_client_1 | Mar 16 11:34:36.086 ERRO Beacon node is not synced current_epoch: 6999, node_head_epoch: 5531, service: duties
|
||||
```
|
||||
|
||||
It's safest to wait for your node to sync before moving on to the next step, otherwise your validator may activate before you're able to produce blocks and attestations (and you may be penalized as a result).
|
||||
|
||||
However, since it generally takes somewhere between [4 and 8 hours](./faq.md) after depositing for a validator to become active, if your `est_time` is less than 4 hours, you _should_ be fine to just move on to the next step. After all, this is a testnet and you're only risking Goerli ETH!
|
||||
|
||||
## Installation complete!
|
||||
|
||||
In the [next step](become-a-validator.html#2-submit-your-deposit-to-goerli) you'll need to upload your validator's deposit data. This data is stored in a file called `eth1_deposit_data.rlp`.
|
||||
|
||||
You'll find it in `lighthouse-docker/.lighthouse/validators/` -- in the sub-directory that corresponds to your validator's public key (`voting_pubkey`).
|
||||
|
||||
|
||||
> For example, if you ran [step 1](become-a-validator-docker.html#1-clone-the-repository) in `/home/karlm/`, and your validator's `voting_pubkey` is `0x8592c7..`, then you'll find your `eth1_deposit_data.rlp` file in the following directory:
|
||||
>
|
||||
>`/home/karlm/lighthouse-docker/.lighthouse/validators/0x8592c7../`
|
||||
|
||||
Once you've located `eth1_deposit_data.rlp`, you're ready to move on to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli).
|
||||
|
@ -1,103 +1,106 @@
|
||||
# Become an Validator: Building from Source
|
||||
# Become a Validator: Building from Source
|
||||
|
||||
## 0. Install Rust
|
||||
If you don't have Rust installed already, visit [rustup.rs](https://rustup.rs/) to install it.
|
||||
|
||||
> Note: if you're not familiar with Rust or you'd like more detailed instructions, see our [installation guide](./installation.md).
|
||||
|
||||
|
||||
## 1. Download and install Lighthouse
|
||||
|
||||
If you already have Rust installed, you can install Lighthouse with the
|
||||
following commands (don't forget to use the `testnet5` branch):
|
||||
Once you have Rust installed, you can install Lighthouse with the following commands (don't forget to use the `testnet5` branch):
|
||||
|
||||
- `$ git clone https://github.com/sigp/lighthouse.git`
|
||||
- `$ git checkout testnet5`
|
||||
- `$ cd lighthouse`
|
||||
- `$ make`
|
||||
1. `git clone https://github.com/sigp/lighthouse.git`
|
||||
2. `cd lighthouse`
|
||||
3. `git checkout testnet5`
|
||||
4. `make`
|
||||
|
||||
You may need to open a new terminal window before running `make`.
|
||||
|
||||
You've completed this step when you can run `$ lighthouse --help` and see the
|
||||
help menu.
|
||||
|
||||
> - If you're not familiar with Rust or you'd like more detailed instructions,
|
||||
> see the [Installation Guide](./installation.md) which contains a
|
||||
> [Troubleshooting](installation.html#troubleshooting) section.
|
||||
|
||||
## 2. Start an Eth1 client
|
||||
|
||||
As Eth2 relies upon the Eth1 chain for validator on-boarding and eventually
|
||||
Eth1 may use the Eth2 chain as a finality gadget, all Eth2 validators must have
|
||||
a connection to an Eth1 node.
|
||||
Since Eth2 relies upon the Eth1 chain for validator on-boarding, all Eth2 validators must have a connection to an Eth1 node.
|
||||
|
||||
We provide instructions for using Geth (this is, by chance, what we ended up
|
||||
testing with), but you could use any client that implements the JSON RPC via
|
||||
HTTP. At least for Geth, a fast-synced node is sufficient.
|
||||
We provide instructions for using Geth (the Eth1 client that, by chance, we ended up testing with), but you could use any client that implements the JSON RPC via HTTP. A fast-synced node should be sufficient.
|
||||
|
||||
### Installing Geth
|
||||
If you're using a Mac, follow the instructions [listed here](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Mac) to install geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth).
|
||||
|
||||
### Starting Geth
|
||||
|
||||
[Install geth](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth)
|
||||
and then use this command (or equivalent) to start your Eth1 node:
|
||||
Once you have geth installed, use this command to start your Eth1 node:
|
||||
|
||||
```bash
|
||||
$ geth --goerli --rpc
|
||||
geth --goerli --rpc
|
||||
```
|
||||
|
||||
## 3. Start your Beacon Node
|
||||
## 3. Start your beacon node
|
||||
|
||||
The beacon node is the core component of Eth2, it connects to other peers over
|
||||
the Internet and maintains a view of the chain.
|
||||
the internet and maintains a view of the chain.
|
||||
|
||||
Start your beacon node with:
|
||||
|
||||
```bash
|
||||
$ lighthouse beacon --eth1 --http
|
||||
lighthouse beacon --eth1 --http
|
||||
```
|
||||
|
||||
>Note: the `--http` flag enables the HTTP API for the validator client. And the `--eth1` flag tells the beacon node that it should sync with an Ethereum1 node (e.g. Geth). These flags are only required if you wish to run a validator.
|
||||
|
||||
|
||||
Your beacon node has started syncing when you see the following (truncated)
|
||||
log:
|
||||
|
||||
```
|
||||
Dec 09 12:57:18.026 INFO Syncing est_time: 2 hrs ...
|
||||
Dec 09 12:57:18.026 INFO Syncing
|
||||
est_time: 2 hrs ...
|
||||
```
|
||||
|
||||
The `distance` value reports the time since eth2 genesis, whilst the `est_time`
|
||||
reports an estimate of how long it will take your node to become synced.
|
||||
|
||||
It has finished syncing once you see the following (truncated) log:
|
||||
You'll know it's finished syncing once you see the following (truncated) log:
|
||||
|
||||
```
|
||||
Dec 09 12:27:06.010 INFO Synced slot: 16835, ...
|
||||
Dec 09 12:27:06.010 INFO Synced
|
||||
slot: 16835, ...
|
||||
```
|
||||
|
||||
> - The `--http` flag enables the HTTP API for the validator client.
|
||||
> - The `--eth1` flag tells the beacon node that it should sync with an Ethereum
|
||||
> 1 node (e.g., Geth). This is only required if you wish to run a validator.
|
||||
|
||||
## 4. Generate your validator key
|
||||
|
||||
Generate new validator BLS keypairs using:
|
||||
|
||||
```shell
|
||||
$ lighthouse account validator new random
|
||||
```bash
|
||||
lighthouse account validator new random
|
||||
```
|
||||
|
||||
Take note of the `voting_pubkey` of the new validator. This will be the primary
|
||||
identifier of the validator. This is how you can find your validator in block
|
||||
explorers.
|
||||
Take note of the `voting_pubkey` of the new validator:
|
||||
|
||||
You've completed this step when you see the equivalent line:
|
||||
```
|
||||
INFO Saved new validator to disk
|
||||
voting_pubkey: 0xa1625249d80...
|
||||
```
|
||||
|
||||
It's the validator's primary identifier, and will be used to find your validator in block explorers.
|
||||
|
||||
You've completed this step when you see something like the following line:
|
||||
|
||||
```
|
||||
Dec 02 21:42:01.337 INFO Generated validator directories count: 1, base_path: "/home/karl/.lighthouse/validators"
|
||||
```
|
||||
|
||||
> - This will generate a new _validator directory_ in the `.lighthouse/validators`
|
||||
> directory. Your validator directory will be identified by it's public key,
|
||||
> which looks something like `0xc483de...`. You'll need to find this directory
|
||||
> for the next step.
|
||||
> - These keys are good enough for the Lighthouse testnet, however they shouldn't
|
||||
> be considered secure until we've undergone a security audit (planned Jan
|
||||
> 2020).
|
||||
This means you've successfully generated a new sub-directory for your validator in the `.lighthouse/validators` directory. The sub-directory is identified by your validator's public key (`voting_pubkey`). And is used to store your validator's deposit data, along with its voting and withdrawal keys.
|
||||
|
||||
> Note: these keypairs are good enough for the Lighthouse testnet, however they shouldn't be considered secure until we've undergone a security audit (planned March/April).
|
||||
|
||||
## 5. Start your validator client
|
||||
|
||||
For security reasons, the validator client runs separately to the beacon node.
|
||||
The validator client stores private keys and signs messages generated by the
|
||||
beacon node.
|
||||
Since the validator client stores private keys and signs messages generated by the beacon node, for security reasons it runs separately from it.
|
||||
|
||||
You'll need both your beacon node _and_ validator client running if you want to
|
||||
stake.
|
||||
@ -105,32 +108,45 @@ stake.
|
||||
Start the validator client with:
|
||||
|
||||
```bash
|
||||
$ lighthouse validator
|
||||
lighthouse validator
|
||||
```
|
||||
|
||||
The validator client is running and has found your validator keys from step 3
|
||||
when you see the following log:
|
||||
You know that your validator client is running and has found your validator keys from [step 3](become-a-validator-source.html#3-start-your-beacon-node) when you see the following logs:
|
||||
|
||||
```
|
||||
Dec 09 13:08:59.171 INFO Loaded validator keypair store voting_validators: 1
|
||||
Dec 09 13:09:09.000 INFO Awaiting activation slot: 17787, ...
|
||||
```
|
||||
|
||||
|
||||
To find an estimate for how long your beacon node will take to finish syncing, lookout for the following logs:
|
||||
|
||||
```bash
|
||||
beacon_node_1 | Mar 16 11:33:53.979 INFO Syncing
|
||||
est_time: 47 mins, speed: 16.67 slots/sec, distance: 47296 slots (7 days 14 hrs), peers: 3, service: slot_notifier
|
||||
```
|
||||
|
||||
You'll find the estimated time under `est_time`. In the example log above, that's `47 mins`.
|
||||
|
||||
If your beacon node hasn't finished syncing yet, you'll see some `ERRO`
|
||||
messages indicating that your node isn't synced yet. It is safest to wait for
|
||||
your node to sync before moving onto the next step, otherwise your validator
|
||||
may activate before you're able to produce blocks and attestations. However, it
|
||||
generally takes 4-8+ hours after deposit for a validator to become active. If
|
||||
your `est_time` is less than 4 hours, you _should_ be fine to just move to the
|
||||
next step. After all, this is a testnet and you're only risking Goerli ETH.
|
||||
messages indicating that your node hasn't synced yet:
|
||||
|
||||
```bash
|
||||
validator_client_1 | Mar 16 11:34:36.086 ERRO Beacon node is not synced current_epoch: 6999, node_head_epoch: 5531, service: duties
|
||||
```
|
||||
|
||||
It's safest to wait for your node to sync before moving on to the next step, otherwise your validator may activate before you're able to produce blocks and attestations (and you may be penalized as a result).
|
||||
|
||||
However, since it generally takes somewhere between [4 and 8 hours](./faq.md) after depositing for a validator to become active, if your `est_time` is less than 4 hours, you _should_ be fine to just move on to the next step. After all, this is a testnet and you're only risking Goerli ETH!
|
||||
|
||||
## Installation complete!
|
||||
|
||||
In the next step you'll need to locate your `eth1_deposit_data.rlp` file from
|
||||
your `.lighthouse/validators` directory.
|
||||
In the [next step](become-a-validator.html#2-submit-your-deposit-to-goerli) you'll need to upload your validator's deposit data. This data is stored in a file called `eth1_deposit_data.rlp`.
|
||||
|
||||
The `./lighthouse` directory is in your `$HOME` directory. For example, if
|
||||
you're in Linux and your user is `karlm`, you can find your validator directory
|
||||
in `/home/karlm/.lighthouse/validators/`.
|
||||
You'll find it in `/home/.lighthouse/validators` -- in the sub-directory that corresponds to your validator's public key (`voting_pubkey`).
|
||||
|
||||
You can now go to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli).
|
||||
> For example, if your username is `karlm`, and your validator's public key (aka `voting_pubkey`) is `0x8592c7..`, then you'll find your `eth1_deposit_data.rlp` file in the following directory:
|
||||
>
|
||||
>`/home/karlm/.lighthouse/validators/0x8592c7../`
|
||||
|
||||
Once you've located your `eth1_deposit_data.rlp` file, you're ready to move on to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli).
|
||||
|
@ -1,30 +1,36 @@
|
||||
# Become an Ethereum 2.0 Testnet Validator
|
||||
|
||||
Running Lighthouse validator is easy if you're familiar with the terminal. It
|
||||
runs on Linux, MacOS and Windows and we have a Docker work-flow.
|
||||
Running a Lighthouse validator is easy if you're familiar with the terminal.
|
||||
|
||||
Before you start, you'll need [Metamask](https://metamask.io/) and 3.2 gETH
|
||||
Lighthouse runs on Linux, MacOS and Windows and has a Docker work-flow to make things as simple as possible.
|
||||
|
||||
|
||||
## 0. Acquire Goerli ETH
|
||||
Before you install Lighthouse, you'll need [Metamask](https://metamask.io/) and 3.2 gETH
|
||||
(Goerli ETH). We recommend the [mudit.blog
|
||||
faucet](https://faucet.goerli.mudit.blog/) for those familiar with Goerli, or
|
||||
[goerli.net](https://goerli.net/) for an overview of the testnet.
|
||||
|
||||
> If this is your first time using Metamask and/or interacting with an ethereum test network, we recommend going through the beginning of [this guide](https://hack.aragon.org/docs/guides-use-metamask) first (up to the *Signing your first transaction with MetaMask* section).
|
||||
|
||||
## 1. Install and start Lighthouse
|
||||
|
||||
There are two, different ways to install and start a Lighthouse validator:
|
||||
|
||||
- [Using `docker-compose`](./become-a-validator-docker.md): this is the easiest method.
|
||||
- [Building from source](./become-a-validator-source.md): this is a little more involved, however it
|
||||
1. [Using `docker-compose`](./become-a-validator-docker.md): this is the easiest method.
|
||||
|
||||
2. [Building from source](./become-a-validator-source.md): this is a little more involved, however it
|
||||
gives a more hands-on experience.
|
||||
|
||||
Once you have completed **only one** of these steps, move onto the next step.
|
||||
Once you've completed **either one** of these steps, you can move onto the next step.
|
||||
|
||||
## 2. Submit your deposit to Goerli
|
||||
|
||||
<div class="form-signin" id="uploadDiv">
|
||||
<p>Upload the <code>eth1_deposit_data.rlp</code> file from your validator
|
||||
directory (created in Step 1) to submit your 3.2 Goerli-ETH
|
||||
directory (created in the previous step) to submit your 3.2 Goerli-ETH
|
||||
deposit using Metamask.</p>
|
||||
<p>Hint: the method used in Step 1 will determine where this file is
|
||||
<p>Note that the method you used in step 1 will determine where this file is
|
||||
located.</p>
|
||||
<input id="fileInput" type="file" style="display: none">
|
||||
<button id="uploadButton" class="btn btn-lg btn-primary btn-block"
|
||||
@ -32,7 +38,7 @@ Once you have completed **only one** of these steps, move onto the next step.
|
||||
</div>
|
||||
|
||||
<div class="form-signin" id="waitingDiv" style="display: none">
|
||||
<p>Your validator deposit was submitted and this step is complete.</p>
|
||||
<p style="color: green">Your validator deposit was submitted and this step is complete.</p>
|
||||
<p>See the transaction on <a id="txLink" target="_blank"
|
||||
href="https://etherscan.io">Etherscan</a>
|
||||
or <a href="">reload</a> to perform another deposit.</p>
|
||||
@ -40,16 +46,17 @@ Once you have completed **only one** of these steps, move onto the next step.
|
||||
|
||||
<div class="form-signin" id="errorDiv" style="display: none">
|
||||
<h4 class="h3 mb-3 font-weight-normal">Error</h4>
|
||||
<p id="errorText">Unknown error.</p>
|
||||
<p id="errorText" style="color: red">Unknown error.</p>
|
||||
<p style="color: red">Please refresh to reupload.</p>
|
||||
</div>
|
||||
|
||||
> This deposit is using gETH (Goerli ETH) which has no real value. Don't ever
|
||||
> This deposit is made using gETH (Goerli ETH) which has no real value. Please don't ever
|
||||
> send _real_ ETH to our deposit contract!
|
||||
|
||||
## 3. Leave Lighthouse running
|
||||
|
||||
Leave your beacon node and validator client running and you'll see logs as the
|
||||
beacon node keeps synced with the network and the validator client produces
|
||||
beacon node stays synced with the network while the validator client produces
|
||||
blocks and attestations.
|
||||
|
||||
It will take 4-8+ hours for the beacon chain to process and activate your
|
||||
|
@ -49,7 +49,7 @@ $ docker run lighthouse:local lighthouse --help
|
||||
You can run a Docker beacon node with the following command:
|
||||
|
||||
```bash
|
||||
$ docker run -p 9000:9000 -p 5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon --http
|
||||
$ docker run -p 9000:9000 -p 127.0.0.1:5052:5052 -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0
|
||||
```
|
||||
|
||||
> The `-p` and `-v` and values are described below.
|
||||
@ -78,8 +78,8 @@ $ docker run -p 9000:9000 sigp/lighthouse lighthouse beacon
|
||||
```
|
||||
|
||||
If you use the `--http` flag you may also want to expose the HTTP port with `-p
|
||||
5052:5052`.
|
||||
127.0.0.1:5052:5052`.
|
||||
|
||||
```bash
|
||||
$ docker run -p 9000:9000 -p 5052:5052 sigp/lighthouse lighthouse beacon --http
|
||||
$ docker run -p 9000:9000 -p 127.0.0.1:5052:5052 sigp/lighthouse lighthouse beacon --http --http-address 0.0.0.0
|
||||
```
|
||||
|
81
book/src/faq.md
Normal file
81
book/src/faq.md
Normal file
@ -0,0 +1,81 @@
|
||||
# Frequently Asked Questions
|
||||
|
||||
## Why does it take so long for a validator to be activated?
|
||||
|
||||
After validators create their Eth1 deposit transaction there are two waiting
|
||||
periods before they can start producing blocks and attestations:
|
||||
|
||||
1. Waiting for the beacon chain to recognise the Eth1 block containing the
|
||||
deposit (generally 4 to 7.4 hours).
|
||||
1. Waiting in the queue for validator activation (generally 6.4 minutes for
|
||||
every 4 validators in the queue).
|
||||
|
||||
Detailed answers below:
|
||||
|
||||
### 1. Waiting for the beacon chain to detect the Eth1 deposit
|
||||
|
||||
Since the beacon chain uses Eth1 for validator on-boarding, beacon chain
|
||||
validators must listen to event logs from the deposit contract. Since the
|
||||
latest blocks of the Eth1 chain are vulnerable to re-orgs due to minor network
|
||||
partitions, beacon nodes follow the Eth1 chain at a distance of 1,024 blocks
|
||||
(~4 hours) (see
|
||||
[`ETH1_FOLLOW_DISTANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#misc)).
|
||||
This follow distance protects the beacon chain from on-boarding validators that
|
||||
are likely to be removed due to an Eth1 re-org.
|
||||
|
||||
Now we know there's a 4 hours delay before the beacon nodes even _consider_ an
|
||||
Eth1 block. Once they _are_ considering these blocks, there's a voting period
|
||||
where beacon validators vote on which Eth1 to include in the beacon chain. This
|
||||
period is defined as 32 epochs (~3.4 hours, see
|
||||
[`ETH1_VOTING_PERIOD`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#time-parameters)).
|
||||
During this voting period, each beacon block producer includes an
|
||||
[`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#eth1data)
|
||||
in their block which counts as a vote towards what that validator considers to
|
||||
be the head of the Eth1 chain at the start of the voting period (with respect
|
||||
to `ETH1_FOLLOW_DISTANCE`, of course). You can see the exact voting logic
|
||||
[here](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#eth1-data).
|
||||
|
||||
These two delays combined represent the time between an Eth1 deposit being
|
||||
included in an Eth1 data vote and that validator appearing in the beacon chain.
|
||||
The `ETH1_FOLLOW_DISTANCE` delay causes a minimum delay of ~4 hours and
|
||||
`ETH1_VOTING_PERIOD` means that if a validator deposit happens just _before_
|
||||
the start of a new voting period then they might not notice this delay at all.
|
||||
However, if the validator deposit happens just _after_ the start of the new
|
||||
voting period the validator might have to wait ~3.4 hours for next voting
|
||||
period. In times of very, very severe network issues, the network may even fail
|
||||
to vote in new Eth1 blocks, stopping all new validator deposits!
|
||||
|
||||
> Note: you can see the list of validators included in the beacon chain using
|
||||
> our REST API: [/beacon/validators/all](./http_beacon.md#beaconvalidatorsall)
|
||||
|
||||
### 2. Waiting for a validator to be activated
|
||||
|
||||
If a validator has provided an invalid public key or signature, they will
|
||||
_never_ be activated or even show up in
|
||||
[/beacon/validators/all](https://lighthouse-book.sigmaprime.io/http_beacon.html#beaconvalidatorsall).
|
||||
They will simply be forgotten by the beacon chain! But, if those parameters were
|
||||
correct, once the Eth1 delays have elapsed and the validator appears in the
|
||||
beacon chain, there's _another_ delay before the validator becomes "active"
|
||||
(canonical definition
|
||||
[here](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#is_active_validator)) and can start producing blocks and attestations.
|
||||
|
||||
Firstly, the validator won't become active until their beacon chain balance is
|
||||
equal to or greater than
|
||||
[`MAX_EFFECTIVE_BALANCE`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#gwei-values)
|
||||
(32 ETH on mainnet, usually 3.2 ETH on testnets). Once this balance is reached,
|
||||
the validator must wait until the start of the next epoch (up to 6.4 minutes)
|
||||
for the
|
||||
[`process_registry_updates`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#registry-updates)
|
||||
routine to run. This routine activates validators with respect to a [churn
|
||||
limit](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#get_validator_churn_limit);
|
||||
it will only allow the number of validators to increase (churn) by a certain
|
||||
amount. Up until there are about 330,000 validators this churn limit is set to
|
||||
4 and it starts to very slowly increase as the number of validators increases
|
||||
from there.
|
||||
|
||||
If a new validator isn't within the churn limit from the front of the queue,
|
||||
they will need to wait another epoch (6.4 minutes) for their next chance. This
|
||||
repeats until the queue is cleared.
|
||||
|
||||
Once a validator has been activated, there's no more waiting! It's time to
|
||||
produce blocks and attestations!
|
@ -166,6 +166,7 @@ Returns an object containing a single [`SignedBeaconBlock`](https://github.com/e
|
||||
"beacon_block": {
|
||||
"message": {
|
||||
"slot": 0,
|
||||
"proposer_index": 14,
|
||||
"parent_root": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"state_root": "0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f",
|
||||
"body": {
|
||||
@ -444,7 +445,7 @@ canonical chain.
|
||||
### Returns
|
||||
|
||||
Returns an object containing a single
|
||||
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#beaconstate)
|
||||
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate)
|
||||
and its tree hash root.
|
||||
|
||||
### Example Response
|
||||
@ -454,6 +455,7 @@ and its tree hash root.
|
||||
"root": "0x528e54ca5d4c957729a73f40fc513ae312e054c7295775c4a2b21f423416a72b",
|
||||
"beacon_state": {
|
||||
"genesis_time": 1575652800,
|
||||
"genesis_validators_root": "0xa8a9226edee1b2627fb4117d7dea4996e64dec2998f37f6e824f74f2ce39a538",
|
||||
"slot": 18478
|
||||
}
|
||||
}
|
||||
@ -505,7 +507,7 @@ Typical Responses | 200
|
||||
### Returns
|
||||
|
||||
Returns an object containing the genesis
|
||||
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#beaconstate).
|
||||
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate).
|
||||
|
||||
### Example Response
|
||||
|
||||
@ -565,7 +567,7 @@ Typical Responses | 200
|
||||
|
||||
### Returns
|
||||
|
||||
Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.1/specs/phase0/beacon-chain.md#fork) of the current head.
|
||||
Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#fork) of the current head.
|
||||
|
||||
### Example Response
|
||||
|
||||
|
@ -272,7 +272,7 @@ Returns a `Attestation` object with a default signature. The `signature` field s
|
||||
"root": "0xaefd23b384994dc0c1a6b77836bdb2f24f209ebfe6c4819324d9685f4a43b4e1"
|
||||
}
|
||||
},
|
||||
"signature": "0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
"signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -1,8 +1,7 @@
|
||||
use int_to_bytes::int_to_bytes8;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz::ssz_encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use types::{AttestationData, ChainSpec, Domain, Epoch, Fork};
|
||||
use types::{AttestationData, ChainSpec, Domain, Epoch, Fork, Hash256};
|
||||
|
||||
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
|
||||
#[derive(
|
||||
@ -13,21 +12,34 @@ pub struct AttestationId {
|
||||
}
|
||||
|
||||
/// Number of domain bytes that the end of an attestation ID is padded with.
|
||||
const DOMAIN_BYTES_LEN: usize = 8;
|
||||
const DOMAIN_BYTES_LEN: usize = std::mem::size_of::<Hash256>();
|
||||
|
||||
impl AttestationId {
|
||||
pub fn from_data(attestation: &AttestationData, fork: &Fork, spec: &ChainSpec) -> Self {
|
||||
pub fn from_data(
|
||||
attestation: &AttestationData,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Self {
|
||||
let mut bytes = ssz_encode(attestation);
|
||||
let epoch = attestation.target.epoch;
|
||||
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, fork, spec));
|
||||
bytes.extend_from_slice(
|
||||
AttestationId::compute_domain_bytes(epoch, fork, genesis_validators_root, spec)
|
||||
.as_bytes(),
|
||||
);
|
||||
AttestationId { v: bytes }
|
||||
}
|
||||
|
||||
pub fn compute_domain_bytes(epoch: Epoch, fork: &Fork, spec: &ChainSpec) -> Vec<u8> {
|
||||
int_to_bytes8(spec.get_domain(epoch, Domain::BeaconAttester, fork))
|
||||
pub fn compute_domain_bytes(
|
||||
epoch: Epoch,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Hash256 {
|
||||
spec.get_domain(epoch, Domain::BeaconAttester, fork, genesis_validators_root)
|
||||
}
|
||||
|
||||
pub fn domain_bytes_match(&self, domain_bytes: &[u8]) -> bool {
|
||||
&self.v[self.v.len() - DOMAIN_BYTES_LEN..] == domain_bytes
|
||||
pub fn domain_bytes_match(&self, domain_bytes: &Hash256) -> bool {
|
||||
&self.v[self.v.len() - DOMAIN_BYTES_LEN..] == domain_bytes.as_bytes()
|
||||
}
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ use std::collections::{hash_map, HashMap, HashSet};
|
||||
use std::marker::PhantomData;
|
||||
use types::{
|
||||
typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec,
|
||||
EthSpec, Fork, ProposerSlashing, RelativeEpoch, SignedVoluntaryExit, Validator,
|
||||
EthSpec, Fork, Hash256, ProposerSlashing, RelativeEpoch, SignedVoluntaryExit, Validator,
|
||||
};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
@ -58,9 +58,10 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
&self,
|
||||
attestation: Attestation<T>,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), AttestationValidationError> {
|
||||
let id = AttestationId::from_data(&attestation.data, fork, spec);
|
||||
let id = AttestationId::from_data(&attestation.data, fork, genesis_validators_root, spec);
|
||||
|
||||
// Take a write lock on the attestations map.
|
||||
let mut attestations = self.attestations.write();
|
||||
@ -106,9 +107,18 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
// Attestations for the current fork, which may be from the current or previous epoch.
|
||||
let prev_epoch = state.previous_epoch();
|
||||
let current_epoch = state.current_epoch();
|
||||
let prev_domain_bytes = AttestationId::compute_domain_bytes(prev_epoch, &state.fork, spec);
|
||||
let curr_domain_bytes =
|
||||
AttestationId::compute_domain_bytes(current_epoch, &state.fork, spec);
|
||||
let prev_domain_bytes = AttestationId::compute_domain_bytes(
|
||||
prev_epoch,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
let curr_domain_bytes = AttestationId::compute_domain_bytes(
|
||||
current_epoch,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
let reader = self.attestations.read();
|
||||
let active_indices = state
|
||||
.get_cached_active_validator_indices(RelativeEpoch::Current)
|
||||
@ -168,7 +178,7 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
verify_proposer_slashing(&slashing, state, VerifySignatures::True, spec)?;
|
||||
self.proposer_slashings
|
||||
.write()
|
||||
.insert(slashing.proposer_index, slashing);
|
||||
.insert(slashing.signed_header_1.message.proposer_index, slashing);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -181,8 +191,18 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
spec: &ChainSpec,
|
||||
) -> (AttestationId, AttestationId) {
|
||||
(
|
||||
AttestationId::from_data(&slashing.attestation_1.data, &state.fork, spec),
|
||||
AttestationId::from_data(&slashing.attestation_2.data, &state.fork, spec),
|
||||
AttestationId::from_data(
|
||||
&slashing.attestation_1.data,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
AttestationId::from_data(
|
||||
&slashing.attestation_2.data,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
@ -214,7 +234,7 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
|slashing| {
|
||||
state
|
||||
.validators
|
||||
.get(slashing.proposer_index as usize)
|
||||
.get(slashing.signed_header_1.message.proposer_index as usize)
|
||||
.map_or(false, |validator| !validator.slashed)
|
||||
},
|
||||
T::MaxProposerSlashings::to_usize(),
|
||||
@ -224,7 +244,7 @@ impl<T: EthSpec> OperationPool<T> {
|
||||
// slashings.
|
||||
let mut to_be_slashed = proposer_slashings
|
||||
.iter()
|
||||
.map(|s| s.proposer_index)
|
||||
.map(|s| s.signed_header_1.message.proposer_index)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let epoch = state.current_epoch();
|
||||
@ -428,6 +448,7 @@ mod release_tests {
|
||||
signers,
|
||||
&committee_keys,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
extra_signer.map(|c_idx| {
|
||||
@ -437,6 +458,7 @@ mod release_tests {
|
||||
&[validator_index],
|
||||
&[&keypairs[validator_index].sk],
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
});
|
||||
@ -549,7 +571,9 @@ mod release_tests {
|
||||
spec,
|
||||
None,
|
||||
);
|
||||
op_pool.insert_attestation(att, &state.fork, spec).unwrap();
|
||||
op_pool
|
||||
.insert_attestation(att, &state.fork, state.genesis_validators_root, spec)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -618,9 +642,16 @@ mod release_tests {
|
||||
None,
|
||||
);
|
||||
op_pool
|
||||
.insert_attestation(att.clone(), &state.fork, spec)
|
||||
.insert_attestation(
|
||||
att.clone(),
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
.unwrap();
|
||||
op_pool
|
||||
.insert_attestation(att, &state.fork, state.genesis_validators_root, spec)
|
||||
.unwrap();
|
||||
op_pool.insert_attestation(att, &state.fork, spec).unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(op_pool.num_attestations(), committees.len());
|
||||
@ -657,7 +688,9 @@ mod release_tests {
|
||||
spec,
|
||||
None,
|
||||
);
|
||||
op_pool.insert_attestation(att, &state.fork, spec).unwrap();
|
||||
op_pool
|
||||
.insert_attestation(att, &state.fork, state.genesis_validators_root, spec)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@ -705,7 +738,9 @@ mod release_tests {
|
||||
spec,
|
||||
if i == 0 { None } else { Some(0) },
|
||||
);
|
||||
op_pool.insert_attestation(att, &state.fork, spec).unwrap();
|
||||
op_pool
|
||||
.insert_attestation(att, &state.fork, state.genesis_validators_root, spec)
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
|
||||
@ -778,7 +813,9 @@ mod release_tests {
|
||||
spec,
|
||||
if i == 0 { None } else { Some(0) },
|
||||
);
|
||||
op_pool.insert_attestation(att, &state.fork, spec).unwrap();
|
||||
op_pool
|
||||
.insert_attestation(att, &state.fork, state.genesis_validators_root, spec)
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -82,7 +82,7 @@ impl<T: EthSpec> PersistedOperationPool<T> {
|
||||
let proposer_slashings = RwLock::new(
|
||||
self.proposer_slashings
|
||||
.into_iter()
|
||||
.map(|slashing| (slashing.proposer_index, slashing))
|
||||
.map(|slashing| (slashing.signed_header_1.message.proposer_index, slashing))
|
||||
.collect(),
|
||||
);
|
||||
let voluntary_exits = RwLock::new(
|
||||
|
@ -4,7 +4,9 @@ use criterion::Criterion;
|
||||
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
|
||||
use ssz::Encode;
|
||||
use state_processing::{test_utils::BlockBuilder, BlockSignatureStrategy, VerifySignatures};
|
||||
use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, MainnetEthSpec, MinimalEthSpec, Slot};
|
||||
use types::{
|
||||
BeaconState, ChainSpec, EthSpec, MainnetEthSpec, MinimalEthSpec, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
pub const VALIDATORS_LOW: usize = 32_768;
|
||||
pub const VALIDATORS_HIGH: usize = 300_032;
|
||||
@ -45,7 +47,7 @@ fn worst_bench<T: EthSpec>(c: &mut Criterion, spec_desc: &str, validator_count:
|
||||
fn get_average_block<T: EthSpec>(
|
||||
validator_count: usize,
|
||||
spec: &ChainSpec,
|
||||
) -> (BeaconBlock<T>, BeaconState<T>) {
|
||||
) -> (SignedBeaconBlock<T>, BeaconState<T>) {
|
||||
let mut builder: BlockBuilder<T> = BlockBuilder::new(validator_count, &spec);
|
||||
// builder.num_attestations = T::MaxAttestations::to_usize();
|
||||
builder.num_attestations = 16;
|
||||
@ -59,7 +61,7 @@ fn get_average_block<T: EthSpec>(
|
||||
fn get_worst_block<T: EthSpec>(
|
||||
validator_count: usize,
|
||||
spec: &ChainSpec,
|
||||
) -> (BeaconBlock<T>, BeaconState<T>) {
|
||||
) -> (SignedBeaconBlock<T>, BeaconState<T>) {
|
||||
let mut builder: BlockBuilder<T> = BlockBuilder::new(validator_count, &spec);
|
||||
builder.maximize_block_operations();
|
||||
|
||||
@ -74,7 +76,7 @@ fn get_worst_block<T: EthSpec>(
|
||||
#[allow(clippy::unit_arg)]
|
||||
fn bench_block<T: EthSpec>(
|
||||
c: &mut Criterion,
|
||||
block: BeaconBlock<T>,
|
||||
block: SignedBeaconBlock<T>,
|
||||
state: BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
spec_desc: &str,
|
||||
@ -183,9 +185,7 @@ fn bench_block<T: EthSpec>(
|
||||
black_box(
|
||||
state_processing::per_block_processing::process_block_header::<T>(
|
||||
state,
|
||||
&block,
|
||||
None,
|
||||
VerifySignatures::True,
|
||||
&block.message,
|
||||
&spec,
|
||||
)
|
||||
.expect("process_block_header should succeed"),
|
||||
@ -231,7 +231,7 @@ fn bench_block<T: EthSpec>(
|
||||
black_box(
|
||||
state_processing::per_block_processing::process_attestations::<T>(
|
||||
state,
|
||||
&block.body.attestations,
|
||||
&block.message.body.attestations,
|
||||
VerifySignatures::True,
|
||||
&spec,
|
||||
)
|
||||
@ -252,7 +252,7 @@ fn bench_block<T: EthSpec>(
|
||||
Benchmark::new("verify_attestation", move |b| {
|
||||
b.iter_batched_ref(
|
||||
|| {
|
||||
let attestation = &local_block.body.attestations[0];
|
||||
let attestation = &local_block.message.body.attestations[0];
|
||||
|
||||
(local_spec.clone(), local_state.clone(), attestation.clone())
|
||||
},
|
||||
@ -280,13 +280,15 @@ fn bench_block<T: EthSpec>(
|
||||
Benchmark::new("get_indexed_attestation", move |b| {
|
||||
b.iter_batched_ref(
|
||||
|| {
|
||||
let attestation = &local_block.body.attestations[0];
|
||||
|
||||
(local_state.clone(), attestation.clone())
|
||||
let attestation = &local_block.message.body.attestations[0];
|
||||
let committee = local_state
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.unwrap();
|
||||
(committee.committee, attestation.clone())
|
||||
},
|
||||
|(ref mut state, attestation)| {
|
||||
|(committee, attestation)| {
|
||||
black_box(
|
||||
state_processing::common::get_indexed_attestation(state, &attestation)
|
||||
state_processing::common::get_indexed_attestation(committee, &attestation)
|
||||
.expect("should get indexed attestation"),
|
||||
)
|
||||
},
|
||||
@ -304,9 +306,12 @@ fn bench_block<T: EthSpec>(
|
||||
Benchmark::new("is_valid_indexed_attestation_with_signature", move |b| {
|
||||
b.iter_batched_ref(
|
||||
|| {
|
||||
let attestation = &local_block.body.attestations[0];
|
||||
let attestation = &local_block.message.body.attestations[0];
|
||||
let committee = local_state
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.unwrap();
|
||||
let indexed_attestation = state_processing::common::get_indexed_attestation(
|
||||
&local_state,
|
||||
&committee.committee,
|
||||
&attestation,
|
||||
)
|
||||
.expect("should get indexed attestation");
|
||||
@ -338,9 +343,12 @@ fn bench_block<T: EthSpec>(
|
||||
Benchmark::new("is_valid_indexed_attestation_without_signature", move |b| {
|
||||
b.iter_batched_ref(
|
||||
|| {
|
||||
let attestation = &local_block.body.attestations[0];
|
||||
let attestation = &local_block.message.body.attestations[0];
|
||||
let committee = local_state
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.unwrap();
|
||||
let indexed_attestation = state_processing::common::get_indexed_attestation(
|
||||
&local_state,
|
||||
&committee.committee,
|
||||
&attestation,
|
||||
)
|
||||
.expect("should get indexed attestation");
|
||||
@ -371,14 +379,16 @@ fn bench_block<T: EthSpec>(
|
||||
Benchmark::new("get_attesting_indices", move |b| {
|
||||
b.iter_batched_ref(
|
||||
|| {
|
||||
let attestation = &local_block.body.attestations[0];
|
||||
let attestation = &local_block.message.body.attestations[0];
|
||||
let committee = local_state
|
||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||
.unwrap();
|
||||
|
||||
(local_state.clone(), attestation.clone())
|
||||
(committee.committee, attestation.clone())
|
||||
},
|
||||
|(ref mut state, attestation)| {
|
||||
black_box(state_processing::common::get_attesting_indices(
|
||||
state,
|
||||
&attestation.data,
|
||||
|(committee, attestation)| {
|
||||
black_box(state_processing::common::get_attesting_indices::<T>(
|
||||
committee,
|
||||
&attestation.aggregation_bits,
|
||||
))
|
||||
},
|
||||
|
@ -2,7 +2,7 @@ use types::*;
|
||||
|
||||
/// Returns validator indices which participated in the attestation, sorted by increasing index.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_attesting_indices<T: EthSpec>(
|
||||
committee: &[usize],
|
||||
bitlist: &BitList<T::MaxValidatorsPerCommittee>,
|
||||
|
@ -3,7 +3,7 @@ use types::*;
|
||||
|
||||
/// Returns the base reward for some validator.
|
||||
///
|
||||
/// Spec v0.9.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_base_reward<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
index: usize,
|
||||
|
@ -6,7 +6,7 @@ type Result<T> = std::result::Result<T, BlockOperationError<Invalid>>;
|
||||
|
||||
/// Convert `attestation` to (almost) indexed-verifiable form.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_indexed_attestation<T: EthSpec>(
|
||||
committee: &[usize],
|
||||
attestation: &Attestation<T>,
|
||||
|
@ -3,7 +3,7 @@ use types::{BeaconStateError as Error, *};
|
||||
|
||||
/// Initiate the exit of the validator of the given `index`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn initiate_validator_exit<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
index: usize,
|
||||
|
@ -4,7 +4,7 @@ use types::{BeaconStateError as Error, *};
|
||||
|
||||
/// Slash the validator with index ``index``.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn slash_validator<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
slashed_index: usize,
|
||||
|
@ -6,7 +6,7 @@ use types::*;
|
||||
|
||||
/// Initialize a `BeaconState` from genesis data.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
// TODO: this is quite inefficient and we probably want to rethink how we do this
|
||||
pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
|
||||
eth1_block_hash: Hash256,
|
||||
@ -42,12 +42,15 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
|
||||
// Now that we have our validators, initialize the caches (including the committees)
|
||||
state.build_all_caches(spec)?;
|
||||
|
||||
// Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = state.update_validators_tree_hash_cache()?;
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
/// Determine whether a candidate genesis state is suitable for starting the chain.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn is_valid_genesis_state<T: EthSpec>(state: &BeaconState<T>, spec: &ChainSpec) -> bool {
|
||||
state.genesis_time >= spec.min_genesis_time
|
||||
&& state.get_active_validator_indices(T::genesis_epoch()).len() as u64
|
||||
@ -56,7 +59,7 @@ pub fn is_valid_genesis_state<T: EthSpec>(state: &BeaconState<T>, spec: &ChainSp
|
||||
|
||||
/// Activate genesis validators, if their balance is acceptable.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_activations<T: EthSpec>(state: &mut BeaconState<T>, spec: &ChainSpec) {
|
||||
for (index, validator) in state.validators.iter_mut().enumerate() {
|
||||
let balance = state.balances[index];
|
||||
|
@ -70,7 +70,7 @@ impl VerifySignatures {
|
||||
/// tree hash root of the block, NOT the signing root of the block. This function takes
|
||||
/// care of mixing in the domain.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn per_block_processing<T: EthSpec>(
|
||||
mut state: &mut BeaconState<T>,
|
||||
signed_block: &SignedBeaconBlock<T>,
|
||||
@ -142,14 +142,26 @@ pub fn per_block_processing<T: EthSpec>(
|
||||
|
||||
/// Processes the block header.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_block_header<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
block: &BeaconBlock<T>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), BlockOperationError<HeaderInvalid>> {
|
||||
// Verify that the slots match
|
||||
verify!(block.slot == state.slot, HeaderInvalid::StateSlotMismatch);
|
||||
|
||||
// Verify that proposer index is the correct index
|
||||
let proposer_index = block.proposer_index as usize;
|
||||
let state_proposer_index = state.get_beacon_proposer_index(block.slot, spec)?;
|
||||
verify!(
|
||||
proposer_index == state_proposer_index,
|
||||
HeaderInvalid::ProposerIndexMismatch {
|
||||
block_proposer_index: proposer_index,
|
||||
state_proposer_index,
|
||||
}
|
||||
);
|
||||
|
||||
let expected_previous_block_root = state.latest_block_header.tree_hash_root();
|
||||
verify!(
|
||||
block.parent_root == expected_previous_block_root,
|
||||
@ -162,11 +174,10 @@ pub fn process_block_header<T: EthSpec>(
|
||||
state.latest_block_header = block.temporary_block_header();
|
||||
|
||||
// Verify proposer is not slashed
|
||||
let proposer_idx = state.get_beacon_proposer_index(block.slot, spec)?;
|
||||
let proposer = &state.validators[proposer_idx];
|
||||
let proposer = &state.validators[proposer_index];
|
||||
verify!(
|
||||
!proposer.slashed,
|
||||
HeaderInvalid::ProposerSlashed(proposer_idx)
|
||||
HeaderInvalid::ProposerSlashed(proposer_index)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
@ -174,7 +185,7 @@ pub fn process_block_header<T: EthSpec>(
|
||||
|
||||
/// Verifies the signature of a block.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_block_signature<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
block: &SignedBeaconBlock<T>,
|
||||
@ -199,7 +210,7 @@ pub fn verify_block_signature<T: EthSpec>(
|
||||
/// Verifies the `randao_reveal` against the block's proposer pubkey and updates
|
||||
/// `state.latest_randao_mixes`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_randao<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
block: &BeaconBlock<T>,
|
||||
@ -223,7 +234,7 @@ pub fn process_randao<T: EthSpec>(
|
||||
|
||||
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_eth1_data<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
eth1_data: &Eth1Data,
|
||||
@ -240,7 +251,7 @@ pub fn process_eth1_data<T: EthSpec>(
|
||||
/// Returns `Some(eth1_data)` if adding the given `eth1_data` to `state.eth1_data_votes` would
|
||||
/// result in a change to `state.eth1_data`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_new_eth1_data<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
eth1_data: &Eth1Data,
|
||||
@ -264,7 +275,7 @@ pub fn get_new_eth1_data<T: EthSpec>(
|
||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||
/// an `Err` describing the invalid object or cause of failure.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_proposer_slashings<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
proposer_slashings: &[ProposerSlashing],
|
||||
@ -282,7 +293,12 @@ pub fn process_proposer_slashings<T: EthSpec>(
|
||||
|
||||
// Update the state.
|
||||
for proposer_slashing in proposer_slashings {
|
||||
slash_validator(state, proposer_slashing.proposer_index as usize, None, spec)?;
|
||||
slash_validator(
|
||||
state,
|
||||
proposer_slashing.signed_header_1.message.proposer_index as usize,
|
||||
None,
|
||||
spec,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -293,7 +309,7 @@ pub fn process_proposer_slashings<T: EthSpec>(
|
||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||
/// an `Err` describing the invalid object or cause of failure.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_attester_slashings<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
attester_slashings: &[AttesterSlashing<T>],
|
||||
@ -347,7 +363,7 @@ pub fn process_attester_slashings<T: EthSpec>(
|
||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||
/// an `Err` describing the invalid object or cause of failure.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_attestations<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
attestations: &[Attestation<T>],
|
||||
@ -393,7 +409,7 @@ pub fn process_attestations<T: EthSpec>(
|
||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||
/// an `Err` describing the invalid object or cause of failure.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_deposits<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
deposits: &[Deposit],
|
||||
@ -401,7 +417,7 @@ pub fn process_deposits<T: EthSpec>(
|
||||
) -> Result<(), BlockProcessingError> {
|
||||
let expected_deposit_len = std::cmp::min(
|
||||
T::MaxDeposits::to_u64(),
|
||||
state.eth1_data.deposit_count - state.eth1_deposit_index,
|
||||
state.get_outstanding_deposit_len()?,
|
||||
);
|
||||
block_verify!(
|
||||
deposits.len() as u64 == expected_deposit_len,
|
||||
@ -430,7 +446,7 @@ pub fn process_deposits<T: EthSpec>(
|
||||
|
||||
/// Process a single deposit, optionally verifying its merkle proof.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_deposit<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
deposit: &Deposit,
|
||||
@ -496,7 +512,7 @@ pub fn process_deposit<T: EthSpec>(
|
||||
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
|
||||
/// an `Err` describing the invalid object or cause of failure.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_exits<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
voluntary_exits: &[SignedVoluntaryExit],
|
||||
|
@ -56,9 +56,18 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
||||
let keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_proposer_index(proposer_index as u64);
|
||||
|
||||
match randao_sk {
|
||||
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec),
|
||||
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec),
|
||||
Some(sk) => {
|
||||
builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec)
|
||||
}
|
||||
None => builder.set_randao_reveal(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
}
|
||||
|
||||
self.block_builder.insert_deposits(
|
||||
@ -70,7 +79,12 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
spec,
|
||||
);
|
||||
|
||||
let block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||
let block = self.block_builder.build(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
@ -96,9 +110,18 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
||||
let keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_proposer_index(proposer_index as u64);
|
||||
|
||||
match randao_sk {
|
||||
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec),
|
||||
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec),
|
||||
Some(sk) => {
|
||||
builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec)
|
||||
}
|
||||
None => builder.set_randao_reveal(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
}
|
||||
match test_task {
|
||||
ExitTestTask::AlreadyInitiated => {
|
||||
@ -125,7 +148,12 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
}
|
||||
}
|
||||
|
||||
let block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||
let block = self.block_builder.build(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
@ -151,9 +179,18 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
||||
let keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_proposer_index(proposer_index as u64);
|
||||
|
||||
match randao_sk {
|
||||
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec),
|
||||
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec),
|
||||
Some(sk) => {
|
||||
builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec)
|
||||
}
|
||||
None => builder.set_randao_reveal(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
}
|
||||
|
||||
let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect();
|
||||
@ -166,7 +203,12 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
spec,
|
||||
)
|
||||
.unwrap();
|
||||
let block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||
let block = self.block_builder.build(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
@ -192,9 +234,18 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
||||
let keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_proposer_index(proposer_index as u64);
|
||||
|
||||
match randao_sk {
|
||||
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec),
|
||||
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec),
|
||||
Some(sk) => {
|
||||
builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec)
|
||||
}
|
||||
None => builder.set_randao_reveal(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
}
|
||||
|
||||
let mut validator_indices = vec![];
|
||||
@ -210,10 +261,16 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
&validator_indices,
|
||||
&secret_keys,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
let block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||
let block = self.block_builder.build(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
@ -239,9 +296,18 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
||||
let keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_proposer_index(proposer_index as u64);
|
||||
|
||||
match randao_sk {
|
||||
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec),
|
||||
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec),
|
||||
Some(sk) => {
|
||||
builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec)
|
||||
}
|
||||
None => builder.set_randao_reveal(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
}
|
||||
|
||||
for i in 0..num_proposer_slashings {
|
||||
@ -252,10 +318,16 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
validator_indices,
|
||||
&secret_keys,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
let block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||
let block = self.block_builder.build(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
@ -279,12 +351,26 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
|
||||
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
|
||||
let keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_proposer_index(proposer_index as u64);
|
||||
|
||||
match randao_sk {
|
||||
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec),
|
||||
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec),
|
||||
Some(sk) => {
|
||||
builder.set_randao_reveal(&sk, &state.fork, state.genesis_validators_root, spec)
|
||||
}
|
||||
None => builder.set_randao_reveal(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
),
|
||||
}
|
||||
|
||||
let block = self.block_builder.build(&keypair.sk, &state.fork, spec);
|
||||
let block = self.block_builder.build(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ where
|
||||
T: EthSpec,
|
||||
F: Fn(usize) -> Option<Cow<'a, G1Point>> + Clone,
|
||||
{
|
||||
get_pubkey: Box<F>,
|
||||
get_pubkey: F,
|
||||
state: &'a BeaconState<T>,
|
||||
spec: &'a ChainSpec,
|
||||
sets: Vec<SignatureSet<'a>>,
|
||||
@ -68,7 +68,7 @@ where
|
||||
/// add signatures, and the `verify`
|
||||
pub fn new(state: &'a BeaconState<T>, get_pubkey: F, spec: &'a ChainSpec) -> Self {
|
||||
Self {
|
||||
get_pubkey: Box::new(get_pubkey),
|
||||
get_pubkey: get_pubkey,
|
||||
state,
|
||||
spec,
|
||||
sets: vec![],
|
||||
|
@ -159,7 +159,14 @@ impl<T> From<ssz_types::Error> for BlockOperationError<T> {
|
||||
pub enum HeaderInvalid {
|
||||
ProposalSignatureInvalid,
|
||||
StateSlotMismatch,
|
||||
ParentBlockRootMismatch { state: Hash256, block: Hash256 },
|
||||
ProposerIndexMismatch {
|
||||
block_proposer_index: usize,
|
||||
state_proposer_index: usize,
|
||||
},
|
||||
ParentBlockRootMismatch {
|
||||
state: Hash256,
|
||||
block: Hash256,
|
||||
},
|
||||
ProposerSlashed(usize),
|
||||
}
|
||||
|
||||
@ -171,6 +178,10 @@ pub enum ProposerSlashingInvalid {
|
||||
///
|
||||
/// (proposal_1_slot, proposal_2_slot)
|
||||
ProposalSlotMismatch(Slot, Slot),
|
||||
/// The two proposals have different proposer indices.
|
||||
///
|
||||
/// (proposer_index_1, proposer_index_2)
|
||||
ProposerIndexMismatch(u64, u64),
|
||||
/// The proposals are identical and therefore not slashable.
|
||||
ProposalsIdentical,
|
||||
/// The specified proposer cannot be slashed because they are already slashed, or not active.
|
||||
|
@ -11,7 +11,7 @@ fn error(reason: Invalid) -> BlockOperationError<Invalid> {
|
||||
|
||||
/// Verify an `IndexedAttestation`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn is_valid_indexed_attestation<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
indexed_attestation: &IndexedAttestation<T>,
|
||||
|
@ -77,6 +77,7 @@ where
|
||||
block.slot.epoch(T::slots_per_epoch()),
|
||||
Domain::BeaconProposer,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
);
|
||||
|
||||
let message = if let Some(root) = block_root {
|
||||
@ -113,6 +114,7 @@ where
|
||||
block.slot.epoch(T::slots_per_epoch()),
|
||||
Domain::Randao,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
);
|
||||
|
||||
let message = block.slot.epoch(T::slots_per_epoch()).signing_root(domain);
|
||||
@ -135,7 +137,7 @@ where
|
||||
T: EthSpec,
|
||||
F: Fn(usize) -> Option<Cow<'a, G1Point>>,
|
||||
{
|
||||
let proposer_index = proposer_slashing.proposer_index as usize;
|
||||
let proposer_index = proposer_slashing.signed_header_1.message.proposer_index as usize;
|
||||
|
||||
Ok((
|
||||
block_header_signature_set(
|
||||
@ -166,6 +168,7 @@ fn block_header_signature_set<'a, T: EthSpec>(
|
||||
signed_header.message.slot.epoch(T::slots_per_epoch()),
|
||||
Domain::BeaconProposer,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
);
|
||||
|
||||
let message = signed_header
|
||||
@ -206,6 +209,7 @@ where
|
||||
indexed_attestation.data.target.epoch,
|
||||
Domain::BeaconAttester,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
);
|
||||
|
||||
let message = indexed_attestation.data.signing_root(domain);
|
||||
@ -221,6 +225,7 @@ pub fn indexed_attestation_signature_set_from_pubkeys<'a, 'b, T, F>(
|
||||
signature: &'a AggregateSignature,
|
||||
indexed_attestation: &'b IndexedAttestation<T>,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &'a ChainSpec,
|
||||
) -> Result<SignatureSet<'a>>
|
||||
where
|
||||
@ -240,6 +245,7 @@ where
|
||||
indexed_attestation.data.target.epoch,
|
||||
Domain::BeaconAttester,
|
||||
&fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
|
||||
let message = indexed_attestation.data.signing_root(domain);
|
||||
@ -322,7 +328,12 @@ where
|
||||
let exit = &signed_exit.message;
|
||||
let proposer_index = exit.validator_index as usize;
|
||||
|
||||
let domain = spec.get_domain(exit.epoch, Domain::VoluntaryExit, &state.fork);
|
||||
let domain = spec.get_domain(
|
||||
exit.epoch,
|
||||
Domain::VoluntaryExit,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
);
|
||||
|
||||
let message = exit.signing_root(domain).as_bytes().to_vec();
|
||||
|
||||
|
@ -10,7 +10,7 @@ use types::test_utils::{
|
||||
use types::*;
|
||||
|
||||
pub const NUM_DEPOSITS: u64 = 1;
|
||||
pub const VALIDATOR_COUNT: usize = 10;
|
||||
pub const VALIDATOR_COUNT: usize = 64;
|
||||
pub const SLOT_OFFSET: u64 = 4;
|
||||
pub const EXIT_SLOT_OFFSET: u64 = 2048;
|
||||
pub const NUM_ATTESTATIONS: u64 = 1;
|
||||
@ -93,7 +93,12 @@ fn invalid_block_signature() {
|
||||
|
||||
// sign the block with a keypair that is not the expected proposer
|
||||
let keypair = Keypair::random();
|
||||
let block = block.message.sign(&keypair.sk, &state.fork, &spec);
|
||||
let block = block.message.sign(
|
||||
&keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
&spec,
|
||||
);
|
||||
|
||||
// process block with invalid block signature
|
||||
let result = per_block_processing(
|
||||
@ -630,7 +635,7 @@ fn invalid_attestation_wrong_justified_checkpoint() {
|
||||
#[test]
|
||||
fn invalid_attestation_bad_indexed_attestation_bad_signature() {
|
||||
let spec = MainnetEthSpec::default_spec();
|
||||
let builder = get_builder(&spec, SLOT_OFFSET, 33); // minmium number of validators required for this test
|
||||
let builder = get_builder(&spec, SLOT_OFFSET, VALIDATOR_COUNT);
|
||||
let test_task = AttestationTestTask::BadIndexedAttestationBadSignature;
|
||||
let (block, mut state) =
|
||||
builder.build_with_n_attestations(test_task, NUM_ATTESTATIONS, None, None, &spec);
|
||||
|
@ -15,7 +15,7 @@ fn error(reason: Invalid) -> BlockOperationError<Invalid> {
|
||||
///
|
||||
/// Optionally verifies the aggregate signature, depending on `verify_signatures`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
attestation: &Attestation<T>,
|
||||
@ -49,7 +49,7 @@ pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
|
||||
/// Returns a descriptive `Err` if the attestation is malformed or does not accurately reflect the
|
||||
/// prior blocks in `state`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_attestation_for_state<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
attestation: &Attestation<T>,
|
||||
@ -58,6 +58,13 @@ pub fn verify_attestation_for_state<T: EthSpec>(
|
||||
) -> Result<()> {
|
||||
let data = &attestation.data;
|
||||
|
||||
// This emptiness check is required *in addition* to the length check in `get_attesting_indices`
|
||||
// because we can parse a bitfield and know its length, even if it has no bits set.
|
||||
verify!(
|
||||
!attestation.aggregation_bits.is_zero(),
|
||||
Invalid::AggregationBitfieldIsEmpty
|
||||
);
|
||||
|
||||
verify!(
|
||||
data.index < state.get_committee_count_at_slot(data.slot)?,
|
||||
Invalid::BadCommitteeIndex
|
||||
@ -76,7 +83,7 @@ pub fn verify_attestation_for_state<T: EthSpec>(
|
||||
|
||||
/// Check target epoch and source checkpoint.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn verify_casper_ffg_vote<T: EthSpec>(
|
||||
attestation: &Attestation<T>,
|
||||
state: &BeaconState<T>,
|
||||
|
@ -15,7 +15,7 @@ fn error(reason: Invalid) -> BlockOperationError<Invalid> {
|
||||
///
|
||||
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_attester_slashing<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
attester_slashing: &AttesterSlashing<T>,
|
||||
@ -47,7 +47,7 @@ pub fn verify_attester_slashing<T: EthSpec>(
|
||||
///
|
||||
/// Returns Ok(indices) if `indices.len() > 0`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_slashable_indices<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
attester_slashing: &AttesterSlashing<T>,
|
||||
|
@ -14,7 +14,7 @@ fn error(reason: DepositInvalid) -> BlockOperationError<DepositInvalid> {
|
||||
|
||||
/// Verify `Deposit.pubkey` signed `Deposit.signature`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) -> Result<()> {
|
||||
let deposit_signature_message = deposit_pubkey_signature_message(&deposit_data, spec)
|
||||
.ok_or_else(|| error(DepositInvalid::BadBlsBytes))?;
|
||||
@ -46,7 +46,7 @@ pub fn get_existing_validator_index<T: EthSpec>(
|
||||
/// The deposit index is provided as a parameter so we can check proofs
|
||||
/// before they're due to be processed, and in parallel.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_deposit_merkle_proof<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
deposit: &Deposit,
|
||||
|
@ -16,7 +16,7 @@ fn error(reason: ExitInvalid) -> BlockOperationError<ExitInvalid> {
|
||||
///
|
||||
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_exit<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
exit: &SignedVoluntaryExit,
|
||||
@ -28,7 +28,7 @@ pub fn verify_exit<T: EthSpec>(
|
||||
|
||||
/// Like `verify_exit` but doesn't run checks which may become true in future states.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_exit_time_independent_only<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
exit: &SignedVoluntaryExit,
|
||||
@ -40,7 +40,7 @@ pub fn verify_exit_time_independent_only<T: EthSpec>(
|
||||
|
||||
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn verify_exit_parametric<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
signed_exit: &SignedVoluntaryExit,
|
||||
|
@ -14,38 +14,40 @@ fn error(reason: Invalid) -> BlockOperationError<Invalid> {
|
||||
///
|
||||
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn verify_proposer_slashing<T: EthSpec>(
|
||||
proposer_slashing: &ProposerSlashing,
|
||||
state: &BeaconState<T>,
|
||||
verify_signatures: VerifySignatures,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<()> {
|
||||
let proposer = state
|
||||
.validators
|
||||
.get(proposer_slashing.proposer_index as usize)
|
||||
.ok_or_else(|| error(Invalid::ProposerUnknown(proposer_slashing.proposer_index)))?;
|
||||
let header_1 = &proposer_slashing.signed_header_1.message;
|
||||
let header_2 = &proposer_slashing.signed_header_2.message;
|
||||
|
||||
// Verify slots match
|
||||
verify!(
|
||||
proposer_slashing.signed_header_1.message.slot
|
||||
== proposer_slashing.signed_header_2.message.slot,
|
||||
Invalid::ProposalSlotMismatch(
|
||||
proposer_slashing.signed_header_1.message.slot,
|
||||
proposer_slashing.signed_header_2.message.slot
|
||||
)
|
||||
header_1.slot == header_2.slot,
|
||||
Invalid::ProposalSlotMismatch(header_1.slot, header_2.slot)
|
||||
);
|
||||
|
||||
// Verify header proposer indices match
|
||||
verify!(
|
||||
header_1.proposer_index == header_2.proposer_index,
|
||||
Invalid::ProposerIndexMismatch(header_1.proposer_index, header_2.proposer_index)
|
||||
);
|
||||
|
||||
// But the headers are different
|
||||
verify!(
|
||||
proposer_slashing.signed_header_1 != proposer_slashing.signed_header_2,
|
||||
Invalid::ProposalsIdentical
|
||||
);
|
||||
verify!(header_1 != header_2, Invalid::ProposalsIdentical);
|
||||
|
||||
// Check proposer is slashable
|
||||
let proposer = state
|
||||
.validators
|
||||
.get(header_1.proposer_index as usize)
|
||||
.ok_or_else(|| error(Invalid::ProposerUnknown(header_1.proposer_index)))?;
|
||||
|
||||
verify!(
|
||||
proposer.is_slashable_at(state.current_epoch()),
|
||||
Invalid::ProposerNotSlashable(proposer_slashing.proposer_index)
|
||||
Invalid::ProposerNotSlashable(header_1.proposer_index)
|
||||
);
|
||||
|
||||
if verify_signatures.is_true() {
|
||||
|
@ -19,7 +19,7 @@ pub use validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses};
|
||||
/// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is
|
||||
/// returned, a state might be "half-processed" and therefore in an invalid state.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn per_epoch_processing<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
@ -45,7 +45,11 @@ pub fn per_epoch_processing<T: EthSpec>(
|
||||
process_registry_updates(state, spec)?;
|
||||
|
||||
// Slashings.
|
||||
process_slashings(state, validator_statuses.total_balances.current_epoch, spec)?;
|
||||
process_slashings(
|
||||
state,
|
||||
validator_statuses.total_balances.current_epoch(),
|
||||
spec,
|
||||
)?;
|
||||
|
||||
// Final updates.
|
||||
process_final_updates(state, spec)?;
|
||||
@ -66,7 +70,7 @@ pub fn per_epoch_processing<T: EthSpec>(
|
||||
/// - `finalized_epoch`
|
||||
/// - `finalized_root`
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[allow(clippy::if_same_then_else)] // For readability and consistency with spec.
|
||||
pub fn process_justification_and_finalization<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
@ -86,7 +90,7 @@ pub fn process_justification_and_finalization<T: EthSpec>(
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint.clone();
|
||||
state.justification_bits.shift_up(1)?;
|
||||
|
||||
if total_balances.previous_epoch_target_attesters * 3 >= total_balances.current_epoch * 2 {
|
||||
if total_balances.previous_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 {
|
||||
state.current_justified_checkpoint = Checkpoint {
|
||||
epoch: previous_epoch,
|
||||
root: *state.get_block_root_at_epoch(previous_epoch)?,
|
||||
@ -94,7 +98,7 @@ pub fn process_justification_and_finalization<T: EthSpec>(
|
||||
state.justification_bits.set(1, true)?;
|
||||
}
|
||||
// If the current epoch gets justified, fill the last bit.
|
||||
if total_balances.current_epoch_target_attesters * 3 >= total_balances.current_epoch * 2 {
|
||||
if total_balances.current_epoch_target_attesters() * 3 >= total_balances.current_epoch() * 2 {
|
||||
state.current_justified_checkpoint = Checkpoint {
|
||||
epoch: current_epoch,
|
||||
root: *state.get_block_root_at_epoch(current_epoch)?,
|
||||
@ -134,7 +138,7 @@ pub fn process_justification_and_finalization<T: EthSpec>(
|
||||
|
||||
/// Finish up an epoch update.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_final_updates<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
@ -148,11 +152,14 @@ pub fn process_final_updates<T: EthSpec>(
|
||||
}
|
||||
|
||||
// Update effective balances with hysteresis (lag).
|
||||
let hysteresis_increment = spec.effective_balance_increment / spec.hysteresis_quotient;
|
||||
let downward_threshold = hysteresis_increment * spec.hysteresis_downward_multiplier;
|
||||
let upward_threshold = hysteresis_increment * spec.hysteresis_upward_multiplier;
|
||||
for (index, validator) in state.validators.iter_mut().enumerate() {
|
||||
let balance = state.balances[index];
|
||||
let half_increment = spec.effective_balance_increment / 2;
|
||||
if balance < validator.effective_balance
|
||||
|| validator.effective_balance + 3 * half_increment < balance
|
||||
|
||||
if balance + downward_threshold < validator.effective_balance
|
||||
|| validator.effective_balance + upward_threshold < balance
|
||||
{
|
||||
validator.effective_balance = std::cmp::min(
|
||||
balance - balance % spec.effective_balance_increment,
|
||||
|
@ -33,7 +33,7 @@ impl std::ops::AddAssign for Delta {
|
||||
|
||||
/// Apply attester and proposer rewards.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_rewards_and_penalties<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
validator_statuses: &mut ValidatorStatuses,
|
||||
@ -67,7 +67,7 @@ pub fn process_rewards_and_penalties<T: EthSpec>(
|
||||
|
||||
/// For each attesting validator, reward the proposer who was first to include their attestation.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_proposer_deltas<T: EthSpec>(
|
||||
deltas: &mut Vec<Delta>,
|
||||
state: &BeaconState<T>,
|
||||
@ -83,7 +83,7 @@ fn get_proposer_deltas<T: EthSpec>(
|
||||
let base_reward = get_base_reward(
|
||||
state,
|
||||
index,
|
||||
validator_statuses.total_balances.current_epoch,
|
||||
validator_statuses.total_balances.current_epoch(),
|
||||
spec,
|
||||
)?;
|
||||
|
||||
@ -100,7 +100,7 @@ fn get_proposer_deltas<T: EthSpec>(
|
||||
|
||||
/// Apply rewards for participation in attestations during the previous epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_attestation_deltas<T: EthSpec>(
|
||||
deltas: &mut Vec<Delta>,
|
||||
state: &BeaconState<T>,
|
||||
@ -113,7 +113,7 @@ fn get_attestation_deltas<T: EthSpec>(
|
||||
let base_reward = get_base_reward(
|
||||
state,
|
||||
index,
|
||||
validator_statuses.total_balances.current_epoch,
|
||||
validator_statuses.total_balances.current_epoch(),
|
||||
spec,
|
||||
)?;
|
||||
|
||||
@ -133,7 +133,7 @@ fn get_attestation_deltas<T: EthSpec>(
|
||||
|
||||
/// Determine the delta for a single validator, sans proposer rewards.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_attestation_delta<T: EthSpec>(
|
||||
validator: &ValidatorStatus,
|
||||
total_balances: &TotalBalances,
|
||||
@ -152,16 +152,24 @@ fn get_attestation_delta<T: EthSpec>(
|
||||
return delta;
|
||||
}
|
||||
|
||||
let total_balance = total_balances.current_epoch;
|
||||
let total_attesting_balance = total_balances.previous_epoch_attesters;
|
||||
let matching_target_balance = total_balances.previous_epoch_target_attesters;
|
||||
let matching_head_balance = total_balances.previous_epoch_head_attesters;
|
||||
// Handle integer overflow by dividing these quantities by EFFECTIVE_BALANCE_INCREMENT
|
||||
// Spec:
|
||||
// - increment = EFFECTIVE_BALANCE_INCREMENT
|
||||
// - reward_numerator = get_base_reward(state, index) * (attesting_balance // increment)
|
||||
// - rewards[index] = reward_numerator // (total_balance // increment)
|
||||
let total_balance_ebi = total_balances.current_epoch() / spec.effective_balance_increment;
|
||||
let total_attesting_balance_ebi =
|
||||
total_balances.previous_epoch_attesters() / spec.effective_balance_increment;
|
||||
let matching_target_balance_ebi =
|
||||
total_balances.previous_epoch_target_attesters() / spec.effective_balance_increment;
|
||||
let matching_head_balance_ebi =
|
||||
total_balances.previous_epoch_head_attesters() / spec.effective_balance_increment;
|
||||
|
||||
// Expected FFG source.
|
||||
// Spec:
|
||||
// - validator index in `get_unslashed_attesting_indices(state, matching_source_attestations)`
|
||||
if validator.is_previous_epoch_attester && !validator.is_slashed {
|
||||
delta.reward(base_reward * total_attesting_balance / total_balance);
|
||||
delta.reward(base_reward * total_attesting_balance_ebi / total_balance_ebi);
|
||||
// Inclusion speed bonus
|
||||
let proposer_reward = base_reward / spec.proposer_reward_quotient;
|
||||
let max_attester_reward = base_reward - proposer_reward;
|
||||
@ -177,7 +185,7 @@ fn get_attestation_delta<T: EthSpec>(
|
||||
// Spec:
|
||||
// - validator index in `get_unslashed_attesting_indices(state, matching_target_attestations)`
|
||||
if validator.is_previous_epoch_target_attester && !validator.is_slashed {
|
||||
delta.reward(base_reward * matching_target_balance / total_balance);
|
||||
delta.reward(base_reward * matching_target_balance_ebi / total_balance_ebi);
|
||||
} else {
|
||||
delta.penalize(base_reward);
|
||||
}
|
||||
@ -186,7 +194,7 @@ fn get_attestation_delta<T: EthSpec>(
|
||||
// Spec:
|
||||
// - validator index in `get_unslashed_attesting_indices(state, matching_head_attestations)`
|
||||
if validator.is_previous_epoch_head_attester && !validator.is_slashed {
|
||||
delta.reward(base_reward * matching_head_balance / total_balance);
|
||||
delta.reward(base_reward * matching_head_balance_ebi / total_balance_ebi);
|
||||
} else {
|
||||
delta.penalize(base_reward);
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ use types::{BeaconStateError as Error, *};
|
||||
|
||||
/// Process slashings.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_slashings<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
total_balance: u64,
|
||||
|
@ -5,7 +5,7 @@ use types::*;
|
||||
|
||||
/// Performs a validator registry update, if required.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_registry_updates<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
|
@ -12,7 +12,7 @@ macro_rules! set_self_if_other_is_true {
|
||||
}
|
||||
|
||||
/// The information required to reward a block producer for including an attestation in a block.
|
||||
#[derive(Clone, Copy)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct InclusionInfo {
|
||||
/// The distance between the attestation slot and the slot that attestation was included in a
|
||||
/// block.
|
||||
@ -43,7 +43,7 @@ impl InclusionInfo {
|
||||
}
|
||||
|
||||
/// Information required to reward some validator during the current and previous epoch.
|
||||
#[derive(Default, Clone)]
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ValidatorStatus {
|
||||
/// True if the validator has been slashed, ever.
|
||||
pub is_slashed: bool,
|
||||
@ -107,30 +107,64 @@ impl ValidatorStatus {
|
||||
|
||||
/// The total effective balances for different sets of validators during the previous and current
|
||||
/// epochs.
|
||||
#[derive(Default, Clone, Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TotalBalances {
|
||||
/// The effective balance increment from the spec.
|
||||
effective_balance_increment: u64,
|
||||
/// The total effective balance of all active validators during the _current_ epoch.
|
||||
pub current_epoch: u64,
|
||||
current_epoch: u64,
|
||||
/// The total effective balance of all active validators during the _previous_ epoch.
|
||||
pub previous_epoch: u64,
|
||||
previous_epoch: u64,
|
||||
/// The total effective balance of all validators who attested during the _current_ epoch.
|
||||
pub current_epoch_attesters: u64,
|
||||
current_epoch_attesters: u64,
|
||||
/// The total effective balance of all validators who attested during the _current_ epoch and
|
||||
/// agreed with the state about the beacon block at the first slot of the _current_ epoch.
|
||||
pub current_epoch_target_attesters: u64,
|
||||
current_epoch_target_attesters: u64,
|
||||
/// The total effective balance of all validators who attested during the _previous_ epoch.
|
||||
pub previous_epoch_attesters: u64,
|
||||
previous_epoch_attesters: u64,
|
||||
/// The total effective balance of all validators who attested during the _previous_ epoch and
|
||||
/// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
|
||||
pub previous_epoch_target_attesters: u64,
|
||||
previous_epoch_target_attesters: u64,
|
||||
/// The total effective balance of all validators who attested during the _previous_ epoch and
|
||||
/// agreed with the state about the beacon block at the time of attestation.
|
||||
pub previous_epoch_head_attesters: u64,
|
||||
previous_epoch_head_attesters: u64,
|
||||
}
|
||||
|
||||
// Generate a safe accessor for a balance in `TotalBalances`, as per spec `get_total_balance`.
|
||||
macro_rules! balance_accessor {
|
||||
($field_name:ident) => {
|
||||
pub fn $field_name(&self) -> u64 {
|
||||
std::cmp::max(self.effective_balance_increment, self.$field_name)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl TotalBalances {
|
||||
pub fn new(spec: &ChainSpec) -> Self {
|
||||
Self {
|
||||
effective_balance_increment: spec.effective_balance_increment,
|
||||
current_epoch: 0,
|
||||
previous_epoch: 0,
|
||||
current_epoch_attesters: 0,
|
||||
current_epoch_target_attesters: 0,
|
||||
previous_epoch_attesters: 0,
|
||||
previous_epoch_target_attesters: 0,
|
||||
previous_epoch_head_attesters: 0,
|
||||
}
|
||||
}
|
||||
|
||||
balance_accessor!(current_epoch);
|
||||
balance_accessor!(previous_epoch);
|
||||
balance_accessor!(current_epoch_attesters);
|
||||
balance_accessor!(current_epoch_target_attesters);
|
||||
balance_accessor!(previous_epoch_attesters);
|
||||
balance_accessor!(previous_epoch_target_attesters);
|
||||
balance_accessor!(previous_epoch_head_attesters);
|
||||
}
|
||||
|
||||
/// Summarised information about validator participation in the _previous and _current_ epochs of
|
||||
/// some `BeaconState`.
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ValidatorStatuses {
|
||||
/// Information about each individual validator from the state's validator registry.
|
||||
pub statuses: Vec<ValidatorStatus>,
|
||||
@ -144,13 +178,13 @@ impl ValidatorStatuses {
|
||||
/// - Active validators
|
||||
/// - Total balances for the current and previous epochs.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn new<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Self, BeaconStateError> {
|
||||
let mut statuses = Vec::with_capacity(state.validators.len());
|
||||
let mut total_balances = TotalBalances::default();
|
||||
let mut total_balances = TotalBalances::new(spec);
|
||||
|
||||
for (i, validator) in state.validators.iter().enumerate() {
|
||||
let effective_balance = state.get_effective_balance(i, spec)?;
|
||||
@ -184,7 +218,7 @@ impl ValidatorStatuses {
|
||||
/// Process some attestations from the given `state` updating the `statuses` and
|
||||
/// `total_balances` fields.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn process_attestations<T: EthSpec>(
|
||||
&mut self,
|
||||
state: &BeaconState<T>,
|
||||
@ -221,10 +255,10 @@ impl ValidatorStatuses {
|
||||
|
||||
if target_matches_epoch_start_block(a, state, state.previous_epoch())? {
|
||||
status.is_previous_epoch_target_attester = true;
|
||||
}
|
||||
|
||||
if has_common_beacon_block_root(a, state)? {
|
||||
status.is_previous_epoch_head_attester = true;
|
||||
if has_common_beacon_block_root(a, state)? {
|
||||
status.is_previous_epoch_head_attester = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -265,7 +299,7 @@ impl ValidatorStatuses {
|
||||
/// Returns `true` if the attestation's FFG target is equal to the hash of the `state`'s first
|
||||
/// beacon block in the given `epoch`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn target_matches_epoch_start_block<T: EthSpec>(
|
||||
a: &PendingAttestation<T>,
|
||||
state: &BeaconState<T>,
|
||||
@ -280,7 +314,7 @@ fn target_matches_epoch_start_block<T: EthSpec>(
|
||||
/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for
|
||||
/// the current slot of the `PendingAttestation`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn has_common_beacon_block_root<T: EthSpec>(
|
||||
a: &PendingAttestation<T>,
|
||||
state: &BeaconState<T>,
|
||||
|
@ -13,7 +13,7 @@ pub enum Error {
|
||||
/// `state_root` is `None`, the root of `state` will be computed using a cached tree hash.
|
||||
/// Providing the `state_root` makes this function several orders of magniude faster.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn per_slot_processing<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
state_root: Option<Hash256>,
|
||||
|
@ -62,7 +62,14 @@ impl<T: EthSpec> BlockBuilder<T> {
|
||||
|
||||
let proposer_keypair = &keypairs[proposer_index];
|
||||
|
||||
builder.set_randao_reveal(&proposer_keypair.sk, &state.fork, spec);
|
||||
builder.set_proposer_index(proposer_index as u64);
|
||||
|
||||
builder.set_randao_reveal(
|
||||
&proposer_keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
let parent_root = state.latest_block_header.canonical_root();
|
||||
builder.set_parent_root(parent_root);
|
||||
@ -79,6 +86,7 @@ impl<T: EthSpec> BlockBuilder<T> {
|
||||
validator_index,
|
||||
&keypairs[validator_index as usize].sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
@ -106,6 +114,7 @@ impl<T: EthSpec> BlockBuilder<T> {
|
||||
&attesters,
|
||||
&secret_keys,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
}
|
||||
@ -161,9 +170,12 @@ impl<T: EthSpec> BlockBuilder<T> {
|
||||
// Set the eth1 data to be different from the state.
|
||||
self.block_builder.block.body.eth1_data.block_hash = Hash256::from_slice(&[42; 32]);
|
||||
|
||||
let block = self
|
||||
.block_builder
|
||||
.build(&proposer_keypair.sk, &state.fork, spec);
|
||||
let block = self.block_builder.build(
|
||||
&proposer_keypair.sk,
|
||||
&state.fork,
|
||||
state.genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
(block, state)
|
||||
}
|
||||
|
@ -90,19 +90,6 @@ fn all_benches(c: &mut Criterion) {
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let inner_state = state.clone();
|
||||
c.bench(
|
||||
&format!("{}_validators", validator_count),
|
||||
Benchmark::new("clone_without_caches/beacon_state", move |b| {
|
||||
b.iter_batched_ref(
|
||||
|| inner_state.clone(),
|
||||
|state| black_box(state.clone_without_caches()),
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
})
|
||||
.sample_size(10),
|
||||
);
|
||||
|
||||
let inner_state = state.clone();
|
||||
c.bench(
|
||||
&format!("{}_validators", validator_count),
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::{
|
||||
Attestation, ChainSpec, Domain, EthSpec, Fork, PublicKey, SecretKey, SelectionProof, Signature,
|
||||
SignedRoot,
|
||||
Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, SelectionProof,
|
||||
Signature, SignedRoot,
|
||||
};
|
||||
use crate::test_utils::TestRandom;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
@ -31,10 +31,17 @@ impl<T: EthSpec> AggregateAndProof<T> {
|
||||
aggregate: Attestation<T>,
|
||||
secret_key: &SecretKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Self {
|
||||
let selection_proof =
|
||||
SelectionProof::new::<T>(aggregate.data.slot, secret_key, fork, spec).into();
|
||||
let selection_proof = SelectionProof::new::<T>(
|
||||
aggregate.data.slot,
|
||||
secret_key,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
.into();
|
||||
|
||||
Self {
|
||||
aggregator_index,
|
||||
@ -48,10 +55,16 @@ impl<T: EthSpec> AggregateAndProof<T> {
|
||||
&self,
|
||||
validator_pubkey: &PublicKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> bool {
|
||||
let target_epoch = self.aggregate.data.slot.epoch(T::slots_per_epoch());
|
||||
let domain = spec.get_domain(target_epoch, Domain::SelectionProof, fork);
|
||||
let domain = spec.get_domain(
|
||||
target_epoch,
|
||||
Domain::SelectionProof,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
let message = self.aggregate.data.slot.signing_root(domain);
|
||||
self.selection_proof
|
||||
.verify(message.as_bytes(), validator_pubkey)
|
||||
|
@ -2,7 +2,7 @@ use super::{
|
||||
AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey,
|
||||
Signature, SignedRoot, SubnetId,
|
||||
};
|
||||
use crate::test_utils::TestRandom;
|
||||
use crate::{test_utils::TestRandom, Hash256};
|
||||
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
@ -17,7 +17,7 @@ pub enum Error {
|
||||
|
||||
/// Details an attestation that can be slashable.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct Attestation<T: EthSpec> {
|
||||
@ -53,6 +53,7 @@ impl<T: EthSpec> Attestation<T> {
|
||||
secret_key: &SecretKey,
|
||||
committee_position: usize,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), Error> {
|
||||
if self
|
||||
@ -66,7 +67,12 @@ impl<T: EthSpec> Attestation<T> {
|
||||
.set(committee_position, true)
|
||||
.map_err(Error::SszTypesError)?;
|
||||
|
||||
let domain = spec.get_domain(self.data.target.epoch, Domain::BeaconAttester, fork);
|
||||
let domain = spec.get_domain(
|
||||
self.data.target.epoch,
|
||||
Domain::BeaconAttester,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
let message = self.data.signing_root(domain);
|
||||
|
||||
self.signature
|
||||
|
@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// The data upon which an attestation is based.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(
|
||||
Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom,
|
||||
)]
|
||||
|
@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Two conflicting attestations.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct AttesterSlashing<T: EthSpec> {
|
||||
|
@ -10,11 +10,12 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// A block of the `BeaconChain`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct BeaconBlock<T: EthSpec> {
|
||||
pub slot: Slot,
|
||||
pub proposer_index: u64,
|
||||
pub parent_root: Hash256,
|
||||
pub state_root: Hash256,
|
||||
pub body: BeaconBlockBody<T>,
|
||||
@ -25,10 +26,11 @@ impl<T: EthSpec> SignedRoot for BeaconBlock<T> {}
|
||||
impl<T: EthSpec> BeaconBlock<T> {
|
||||
/// Returns an empty block to be used during genesis.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn empty(spec: &ChainSpec) -> Self {
|
||||
BeaconBlock {
|
||||
slot: spec.genesis_slot,
|
||||
proposer_index: 0,
|
||||
parent_root: Hash256::zero(),
|
||||
state_root: Hash256::zero(),
|
||||
body: BeaconBlockBody {
|
||||
@ -55,7 +57,7 @@ impl<T: EthSpec> BeaconBlock<T> {
|
||||
|
||||
/// Returns the `tree_hash_root` of the block.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn canonical_root(&self) -> Hash256 {
|
||||
Hash256::from_slice(&self.tree_hash_root()[..])
|
||||
}
|
||||
@ -67,10 +69,11 @@ impl<T: EthSpec> BeaconBlock<T> {
|
||||
///
|
||||
/// Note: performs a full tree-hash of `self.body`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn block_header(&self) -> BeaconBlockHeader {
|
||||
BeaconBlockHeader {
|
||||
slot: self.slot,
|
||||
proposer_index: self.proposer_index,
|
||||
parent_root: self.parent_root,
|
||||
state_root: self.state_root,
|
||||
body_root: Hash256::from_slice(&self.body.tree_hash_root()[..]),
|
||||
@ -79,7 +82,7 @@ impl<T: EthSpec> BeaconBlock<T> {
|
||||
|
||||
/// Returns a "temporary" header, where the `state_root` is `Hash256::zero()`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn temporary_block_header(&self) -> BeaconBlockHeader {
|
||||
BeaconBlockHeader {
|
||||
state_root: Hash256::zero(),
|
||||
@ -92,9 +95,15 @@ impl<T: EthSpec> BeaconBlock<T> {
|
||||
self,
|
||||
secret_key: &SecretKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> SignedBeaconBlock<T> {
|
||||
let domain = spec.get_domain(self.epoch(), Domain::BeaconProposer, fork);
|
||||
let domain = spec.get_domain(
|
||||
self.epoch(),
|
||||
Domain::BeaconProposer,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
let message = self.signing_root(domain);
|
||||
let signature = Signature::new(message.as_bytes(), secret_key);
|
||||
SignedBeaconBlock {
|
||||
|
@ -10,7 +10,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// The body of a `BeaconChain` block, containing operations.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct BeaconBlockBody<T: EthSpec> {
|
||||
|
@ -9,10 +9,11 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// A header of a `BeaconBlock`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct BeaconBlockHeader {
|
||||
pub slot: Slot,
|
||||
pub proposer_index: u64,
|
||||
pub parent_root: Hash256,
|
||||
pub state_root: Hash256,
|
||||
pub body_root: Hash256,
|
||||
@ -23,17 +24,18 @@ impl SignedRoot for BeaconBlockHeader {}
|
||||
impl BeaconBlockHeader {
|
||||
/// Returns the `tree_hash_root` of the header.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn canonical_root(&self) -> Hash256 {
|
||||
Hash256::from_slice(&self.tree_hash_root()[..])
|
||||
}
|
||||
|
||||
/// Given a `body`, consumes `self` and returns a complete `BeaconBlock`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn into_block<T: EthSpec>(self, body: BeaconBlockBody<T>) -> BeaconBlock<T> {
|
||||
BeaconBlock {
|
||||
slot: self.slot,
|
||||
proposer_index: self.proposer_index,
|
||||
parent_root: self.parent_root,
|
||||
state_root: self.state_root,
|
||||
body,
|
||||
@ -45,10 +47,11 @@ impl BeaconBlockHeader {
|
||||
self,
|
||||
secret_key: &SecretKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> SignedBeaconBlockHeader {
|
||||
let epoch = self.slot.epoch(E::slots_per_epoch());
|
||||
let domain = spec.get_domain(epoch, Domain::BeaconProposer, fork);
|
||||
let domain = spec.get_domain(epoch, Domain::BeaconProposer, fork, genesis_validators_root);
|
||||
let message = self.signing_root(domain);
|
||||
let signature = Signature::new(message.as_bytes(), secret_key);
|
||||
SignedBeaconBlockHeader {
|
||||
|
@ -72,6 +72,10 @@ pub enum Error {
|
||||
InvalidValidatorPubkey(ssz::DecodeError),
|
||||
ValidatorRegistryShrunk,
|
||||
TreeHashCacheInconsistent,
|
||||
InvalidDepositState {
|
||||
deposit_count: u64,
|
||||
deposit_index: u64,
|
||||
},
|
||||
}
|
||||
|
||||
/// Control whether an epoch-indexed field can be indexed at the next epoch or not.
|
||||
@ -92,7 +96,7 @@ impl AllowNextEpoch {
|
||||
|
||||
/// The state of the `BeaconChain` at some slot.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(
|
||||
Debug,
|
||||
PartialEq,
|
||||
@ -112,6 +116,7 @@ where
|
||||
{
|
||||
// Versioning
|
||||
pub genesis_time: u64,
|
||||
pub genesis_validators_root: Hash256,
|
||||
pub slot: Slot,
|
||||
pub fork: Fork,
|
||||
|
||||
@ -183,11 +188,12 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Not a complete genesis state, see `initialize_beacon_state_from_eth1`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn new(genesis_time: u64, eth1_data: Eth1Data, spec: &ChainSpec) -> Self {
|
||||
BeaconState {
|
||||
// Versioning
|
||||
genesis_time,
|
||||
genesis_validators_root: Hash256::zero(), // Set later.
|
||||
slot: spec.genesis_slot,
|
||||
fork: Fork {
|
||||
previous_version: spec.genesis_fork_version,
|
||||
@ -240,7 +246,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Returns the `tree_hash_root` of the state.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn canonical_root(&self) -> Hash256 {
|
||||
Hash256::from_slice(&self.tree_hash_root()[..])
|
||||
}
|
||||
@ -269,7 +275,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// The epoch corresponding to `self.slot`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn current_epoch(&self) -> Epoch {
|
||||
self.slot.epoch(T::slots_per_epoch())
|
||||
}
|
||||
@ -278,7 +284,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// If the current epoch is the genesis epoch, the genesis_epoch is returned.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn previous_epoch(&self) -> Epoch {
|
||||
let current_epoch = self.current_epoch();
|
||||
if current_epoch > T::genesis_epoch() {
|
||||
@ -290,7 +296,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// The epoch following `self.current_epoch()`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn next_epoch(&self) -> Epoch {
|
||||
self.current_epoch() + 1
|
||||
}
|
||||
@ -299,7 +305,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Makes use of the committee cache and will fail if no cache exists for the slot's epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result<u64, Error> {
|
||||
let cache = self.committee_cache_at_slot(slot)?;
|
||||
Ok(cache.committees_per_slot() as u64)
|
||||
@ -307,7 +313,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Compute the number of committees in an entire epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_epoch_committee_count(&self, relative_epoch: RelativeEpoch) -> Result<u64, Error> {
|
||||
let cache = self.committee_cache(relative_epoch)?;
|
||||
Ok(cache.epoch_committee_count() as u64)
|
||||
@ -331,7 +337,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Does not utilize the cache, performs a full iteration over the validator registry.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_active_validator_indices(&self, epoch: Epoch) -> Vec<usize> {
|
||||
// FIXME(sproul): put a bounds check on here based on the maximum lookahead
|
||||
get_active_validator_indices(&self.validators, epoch)
|
||||
@ -352,7 +358,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Utilises the committee cache.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_beacon_committee(
|
||||
&self,
|
||||
slot: Slot,
|
||||
@ -371,7 +377,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Utilises the committee cache.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_beacon_committees_at_slot(&self, slot: Slot) -> Result<Vec<BeaconCommittee>, Error> {
|
||||
let cache = self.committee_cache_at_slot(slot)?;
|
||||
cache.get_beacon_committees_at_slot(slot)
|
||||
@ -381,7 +387,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Utilises the committee cache.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_beacon_committees_at_epoch(
|
||||
&self,
|
||||
relative_epoch: RelativeEpoch,
|
||||
@ -392,7 +398,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Compute the proposer (not necessarily for the Beacon chain) from a list of indices.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
// NOTE: be sure to test this bad boy.
|
||||
pub fn compute_proposer_index(
|
||||
&self,
|
||||
@ -455,7 +461,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Returns the beacon proposer index for the `slot` in the given `relative_epoch`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result<usize, Error> {
|
||||
let epoch = slot.epoch(T::slots_per_epoch());
|
||||
let seed = self.get_beacon_proposer_seed(slot, spec)?;
|
||||
@ -466,7 +472,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Compute the seed to use for the beacon proposer selection at the given `slot`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result<Vec<u8>, Error> {
|
||||
let epoch = slot.epoch(T::slots_per_epoch());
|
||||
let mut preimage = self
|
||||
@ -481,7 +487,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// It needs filling in on all slots where there isn't a skip.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_latest_block_root(&self, current_state_root: Hash256) -> Hash256 {
|
||||
if self.latest_block_header.state_root.is_zero() {
|
||||
let mut latest_block_header = self.latest_block_header.clone();
|
||||
@ -494,7 +500,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Safely obtains the index for latest block roots, given some `slot`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_latest_block_roots_index(&self, slot: Slot) -> Result<usize, Error> {
|
||||
if (slot < self.slot) && (self.slot <= slot + self.block_roots.len() as u64) {
|
||||
Ok(slot.as_usize() % self.block_roots.len())
|
||||
@ -505,7 +511,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Return the block root at a recent `slot`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_block_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> {
|
||||
let i = self.get_latest_block_roots_index(slot)?;
|
||||
Ok(&self.block_roots[i])
|
||||
@ -513,7 +519,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Return the block root at a recent `epoch`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
// NOTE: the spec calls this get_block_root
|
||||
pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> {
|
||||
self.get_block_root(epoch.start_slot(T::slots_per_epoch()))
|
||||
@ -521,7 +527,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Sets the block root for some given slot.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn set_block_root(
|
||||
&mut self,
|
||||
slot: Slot,
|
||||
@ -539,7 +545,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Safely obtains the index for `randao_mixes`
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_randao_mix_index(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
@ -561,7 +567,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// See `Self::get_randao_mix`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> {
|
||||
let i = epoch.as_usize() % T::EpochsPerHistoricalVector::to_usize();
|
||||
|
||||
@ -574,7 +580,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Return the randao mix at a recent ``epoch``.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> {
|
||||
let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?;
|
||||
Ok(&self.randao_mixes[i])
|
||||
@ -582,7 +588,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Set the randao mix at a recent ``epoch``.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> {
|
||||
let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?;
|
||||
self.randao_mixes[i] = mix;
|
||||
@ -591,7 +597,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Safely obtains the index for latest state roots, given some `slot`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_latest_state_roots_index(&self, slot: Slot) -> Result<usize, Error> {
|
||||
if (slot < self.slot) && (self.slot <= slot + Slot::from(self.state_roots.len())) {
|
||||
Ok(slot.as_usize() % self.state_roots.len())
|
||||
@ -602,7 +608,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Gets the state root for some slot.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> {
|
||||
let i = self.get_latest_state_roots_index(slot)?;
|
||||
Ok(&self.state_roots[i])
|
||||
@ -610,7 +616,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Gets the oldest (earliest slot) state root.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> {
|
||||
let i =
|
||||
self.get_latest_state_roots_index(self.slot - Slot::from(self.state_roots.len()))?;
|
||||
@ -619,7 +625,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Gets the oldest (earliest slot) block root.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_oldest_block_root(&self) -> Result<&Hash256, Error> {
|
||||
let i = self.get_latest_block_roots_index(self.slot - self.block_roots.len() as u64)?;
|
||||
Ok(&self.block_roots[i])
|
||||
@ -627,7 +633,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Sets the latest state root for slot.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> {
|
||||
let i = self.get_latest_state_roots_index(slot)?;
|
||||
self.state_roots[i] = state_root;
|
||||
@ -636,7 +642,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Safely obtain the index for `slashings`, given some `epoch`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_slashings_index(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
@ -656,14 +662,14 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Get a reference to the entire `slashings` vector.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_all_slashings(&self) -> &[u64] {
|
||||
&self.slashings
|
||||
}
|
||||
|
||||
/// Get the total slashed balances for some epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_slashings(&self, epoch: Epoch) -> Result<u64, Error> {
|
||||
let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?;
|
||||
Ok(self.slashings[i])
|
||||
@ -671,7 +677,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Set the total slashed balances for some epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> {
|
||||
let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?;
|
||||
self.slashings[i] = value;
|
||||
@ -680,7 +686,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Get the attestations from the current or previous epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_matching_source_attestations(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
@ -696,7 +702,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Generate a seed for the given `epoch`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_seed(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
@ -727,7 +733,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_effective_balance(
|
||||
&self,
|
||||
validator_index: usize,
|
||||
@ -741,7 +747,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn compute_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch {
|
||||
epoch + 1 + spec.max_seed_lookahead
|
||||
}
|
||||
@ -750,7 +756,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Uses the epoch cache, and will error if it isn't initialized.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_churn_limit(&self, spec: &ChainSpec) -> Result<u64, Error> {
|
||||
Ok(std::cmp::max(
|
||||
spec.min_per_epoch_churn_limit,
|
||||
@ -765,7 +771,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
///
|
||||
/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_attestation_duties(
|
||||
&self,
|
||||
validator_index: usize,
|
||||
@ -778,7 +784,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
|
||||
/// Return the combined effective balance of an array of validators.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_total_balance(
|
||||
&self,
|
||||
validator_indices: &[usize],
|
||||
@ -790,6 +796,19 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the number of outstanding deposits.
|
||||
///
|
||||
/// Returns `Err` if the state is invalid.
|
||||
pub fn get_outstanding_deposit_len(&self) -> Result<u64, Error> {
|
||||
self.eth1_data
|
||||
.deposit_count
|
||||
.checked_sub(self.eth1_deposit_index)
|
||||
.ok_or_else(|| Error::InvalidDepositState {
|
||||
deposit_count: self.eth1_data.deposit_count,
|
||||
deposit_index: self.eth1_deposit_index,
|
||||
})
|
||||
}
|
||||
|
||||
/// Build all the caches, if they need to be built.
|
||||
pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
|
||||
self.build_all_committee_caches(spec)?;
|
||||
@ -950,7 +969,26 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
if let Some(mut cache) = cache {
|
||||
// Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as
|
||||
// None. There's no need to keep a cache that fails.
|
||||
let root = cache.recalculate_tree_hash_root(self)?;
|
||||
let root = cache.recalculate_tree_hash_root(&self)?;
|
||||
self.tree_hash_cache = Some(cache);
|
||||
Ok(root)
|
||||
} else {
|
||||
Err(Error::TreeHashCacheNotInitialized)
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the tree hash root of the validators using the tree hash cache.
|
||||
///
|
||||
/// Initialize the tree hash cache if it isn't already initialized.
|
||||
pub fn update_validators_tree_hash_cache(&mut self) -> Result<Hash256, Error> {
|
||||
self.initialize_tree_hash_cache();
|
||||
|
||||
let cache = self.tree_hash_cache.take();
|
||||
|
||||
if let Some(mut cache) = cache {
|
||||
// Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as
|
||||
// None. There's no need to keep a cache that fails.
|
||||
let root = cache.recalculate_validators_tree_hash_root(&self.validators)?;
|
||||
self.tree_hash_cache = Some(cache);
|
||||
Ok(root)
|
||||
} else {
|
||||
@ -967,6 +1005,7 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
pub fn clone_with(&self, config: CloneConfig) -> Self {
|
||||
BeaconState {
|
||||
genesis_time: self.genesis_time,
|
||||
genesis_validators_root: self.genesis_validators_root,
|
||||
slot: self.slot,
|
||||
fork: self.fork.clone(),
|
||||
latest_block_header: self.latest_block_header.clone(),
|
||||
|
@ -22,7 +22,7 @@ pub struct CommitteeCache {
|
||||
impl CommitteeCache {
|
||||
/// Return a new, fully initialized cache.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn initialized<T: EthSpec>(
|
||||
state: &BeaconState<T>,
|
||||
epoch: Epoch,
|
||||
@ -87,7 +87,7 @@ impl CommitteeCache {
|
||||
///
|
||||
/// Always returns `&[]` for a non-initialized epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn active_validator_indices(&self) -> &[usize] {
|
||||
&self.shuffling
|
||||
}
|
||||
@ -96,7 +96,7 @@ impl CommitteeCache {
|
||||
///
|
||||
/// Always returns `&[]` for a non-initialized epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn shuffling(&self) -> &[usize] {
|
||||
&self.shuffling
|
||||
}
|
||||
@ -202,7 +202,7 @@ impl CommitteeCache {
|
||||
///
|
||||
/// Always returns `usize::default()` for a non-initialized epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn active_validator_count(&self) -> usize {
|
||||
self.shuffling.len()
|
||||
}
|
||||
@ -211,7 +211,7 @@ impl CommitteeCache {
|
||||
///
|
||||
/// Always returns `usize::default()` for a non-initialized epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn epoch_committee_count(&self) -> usize {
|
||||
self.committees_per_slot as usize * self.slots_per_epoch as usize
|
||||
}
|
||||
@ -223,7 +223,7 @@ impl CommitteeCache {
|
||||
|
||||
/// Returns a slice of `self.shuffling` that represents the `index`'th committee in the epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn compute_committee(&self, index: usize) -> Option<&[usize]> {
|
||||
Some(&self.shuffling[self.compute_committee_range(index)?])
|
||||
}
|
||||
@ -234,7 +234,7 @@ impl CommitteeCache {
|
||||
///
|
||||
/// Will also return `None` if the index is out of bounds.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn compute_committee_range(&self, index: usize) -> Option<Range<usize>> {
|
||||
let count = self.epoch_committee_count();
|
||||
if count == 0 || index >= count {
|
||||
@ -261,7 +261,7 @@ impl CommitteeCache {
|
||||
/// Returns a list of all `validators` indices where the validator is active at the given
|
||||
/// `epoch`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec<usize> {
|
||||
let mut active = Vec::with_capacity(validators.len());
|
||||
|
||||
|
@ -365,3 +365,44 @@ mod committees {
|
||||
committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Next);
|
||||
}
|
||||
}
|
||||
|
||||
mod get_outstanding_deposit_len {
|
||||
use super::*;
|
||||
use crate::test_utils::TestingBeaconStateBuilder;
|
||||
use crate::MinimalEthSpec;
|
||||
|
||||
fn state() -> BeaconState<MinimalEthSpec> {
|
||||
let spec = MinimalEthSpec::default_spec();
|
||||
let builder: TestingBeaconStateBuilder<MinimalEthSpec> =
|
||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec);
|
||||
let (state, _keypairs) = builder.build();
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn returns_ok() {
|
||||
let mut state = state();
|
||||
assert_eq!(state.get_outstanding_deposit_len(), Ok(0));
|
||||
|
||||
state.eth1_data.deposit_count = 17;
|
||||
state.eth1_deposit_index = 16;
|
||||
assert_eq!(state.get_outstanding_deposit_len(), Ok(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn returns_err_if_the_state_is_invalid() {
|
||||
let mut state = state();
|
||||
// The state is invalid, deposit count is lower than deposit index.
|
||||
state.eth1_data.deposit_count = 16;
|
||||
state.eth1_deposit_index = 17;
|
||||
|
||||
assert_eq!(
|
||||
state.get_outstanding_deposit_len(),
|
||||
Err(BeaconStateError::InvalidDepositState {
|
||||
deposit_count: 16,
|
||||
deposit_index: 17,
|
||||
})
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -82,6 +82,7 @@ impl BeaconTreeHashCache {
|
||||
let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASHING_FIELDS);
|
||||
|
||||
hasher.write(state.genesis_time.tree_hash_root().as_bytes())?;
|
||||
hasher.write(state.genesis_validators_root.tree_hash_root().as_bytes())?;
|
||||
hasher.write(state.slot.tree_hash_root().as_bytes())?;
|
||||
hasher.write(state.fork.tree_hash_root().as_bytes())?;
|
||||
hasher.write(state.latest_block_header.tree_hash_root().as_bytes())?;
|
||||
@ -153,6 +154,14 @@ impl BeaconTreeHashCache {
|
||||
|
||||
hasher.finish().map_err(Into::into)
|
||||
}
|
||||
|
||||
/// Updates the cache and provides the root of the given `validators`.
|
||||
pub fn recalculate_validators_tree_hash_root(
|
||||
&mut self,
|
||||
validators: &[Validator],
|
||||
) -> Result<Hash256, Error> {
|
||||
self.validators.recalculate_tree_hash_root(validators)
|
||||
}
|
||||
}
|
||||
|
||||
/// A specialized cache for computing the tree hash root of `state.validators`.
|
||||
|
@ -11,7 +11,8 @@ use utils::{
|
||||
|
||||
/// Each of the BLS signature domains.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum Domain {
|
||||
BeaconProposer,
|
||||
BeaconAttester,
|
||||
@ -24,7 +25,7 @@ pub enum Domain {
|
||||
|
||||
/// Holds all the "constants" for a BeaconChain.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct ChainSpec {
|
||||
@ -47,6 +48,9 @@ pub struct ChainSpec {
|
||||
pub shuffle_round_count: u8,
|
||||
pub min_genesis_active_validator_count: u64,
|
||||
pub min_genesis_time: u64,
|
||||
pub hysteresis_quotient: u64,
|
||||
pub hysteresis_downward_multiplier: u64,
|
||||
pub hysteresis_upward_multiplier: u64,
|
||||
|
||||
/*
|
||||
* Gwei values
|
||||
@ -128,16 +132,12 @@ impl ChainSpec {
|
||||
///
|
||||
/// Presently, we don't have any forks so we just ignore the slot. In the future this function
|
||||
/// may return something different based upon the slot.
|
||||
pub fn enr_fork_id(&self, _slot: Slot) -> EnrForkId {
|
||||
// TODO: set this to something sensible once v0.11.0 is ready.
|
||||
let genesis_validators_root = Hash256::zero();
|
||||
|
||||
pub fn enr_fork_id(&self, _slot: Slot, genesis_validators_root: Hash256) -> EnrForkId {
|
||||
EnrForkId {
|
||||
fork_digest: Self::compute_fork_digest(
|
||||
self.genesis_fork_version,
|
||||
genesis_validators_root,
|
||||
)
|
||||
.to_le_bytes(),
|
||||
),
|
||||
next_fork_version: self.genesis_fork_version,
|
||||
next_fork_epoch: self.far_future_epoch,
|
||||
}
|
||||
@ -153,7 +153,7 @@ impl ChainSpec {
|
||||
|
||||
/// Get the domain number, unmodified by the fork.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_domain_constant(&self, domain: Domain) -> u32 {
|
||||
match domain {
|
||||
Domain::BeaconProposer => self.domain_beacon_proposer,
|
||||
@ -166,12 +166,18 @@ impl ChainSpec {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the domain number that represents the fork meta and signature domain.
|
||||
/// Get the domain that represents the fork meta and signature domain.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 {
|
||||
/// Spec v0.11.1
|
||||
pub fn get_domain(
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
domain: Domain,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
) -> Hash256 {
|
||||
let fork_version = fork.get_fork_version(epoch);
|
||||
self.compute_domain(domain, fork_version)
|
||||
self.compute_domain(domain, fork_version, genesis_validators_root)
|
||||
}
|
||||
|
||||
/// Get the domain for a deposit signature.
|
||||
@ -179,16 +185,16 @@ impl ChainSpec {
|
||||
/// Deposits are valid across forks, thus the deposit domain is computed
|
||||
/// with the genesis fork version.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
pub fn get_deposit_domain(&self) -> u64 {
|
||||
self.compute_domain(Domain::Deposit, self.genesis_fork_version)
|
||||
/// Spec v0.11.1
|
||||
pub fn get_deposit_domain(&self) -> Hash256 {
|
||||
self.compute_domain(Domain::Deposit, self.genesis_fork_version, Hash256::zero())
|
||||
}
|
||||
|
||||
/// Return the 32-byte fork data root for the `current_version` and `genesis_validators_root`.
|
||||
///
|
||||
/// This is used primarily in signature domains to avoid collisions across forks/chains.
|
||||
///
|
||||
/// Spec v0.11.0
|
||||
/// Spec v0.11.1
|
||||
pub fn compute_fork_data_root(
|
||||
current_version: [u8; 4],
|
||||
genesis_validators_root: Hash256,
|
||||
@ -204,33 +210,39 @@ impl ChainSpec {
|
||||
///
|
||||
/// This is a digest primarily used for domain separation on the p2p layer.
|
||||
/// 4-bytes suffices for practical separation of forks/chains.
|
||||
pub fn compute_fork_digest(current_version: [u8; 4], genesis_validators_root: Hash256) -> u32 {
|
||||
let fork_data_root = Self::compute_fork_data_root(current_version, genesis_validators_root);
|
||||
|
||||
let mut bytes = [0; 4];
|
||||
bytes.copy_from_slice(&fork_data_root[0..4]);
|
||||
|
||||
u32::from_le_bytes(bytes)
|
||||
pub fn compute_fork_digest(
|
||||
current_version: [u8; 4],
|
||||
genesis_validators_root: Hash256,
|
||||
) -> [u8; 4] {
|
||||
let mut result = [0; 4];
|
||||
let root = Self::compute_fork_data_root(current_version, genesis_validators_root);
|
||||
result.copy_from_slice(&root.as_bytes()[0..4]);
|
||||
result
|
||||
}
|
||||
|
||||
/// Compute a domain by applying the given `fork_version`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
pub fn compute_domain(&self, domain: Domain, fork_version: [u8; 4]) -> u64 {
|
||||
/// Spec v0.11.1
|
||||
pub fn compute_domain(
|
||||
&self,
|
||||
domain: Domain,
|
||||
fork_version: [u8; 4],
|
||||
genesis_validators_root: Hash256,
|
||||
) -> Hash256 {
|
||||
let domain_constant = self.get_domain_constant(domain);
|
||||
|
||||
let mut bytes: Vec<u8> = int_to_bytes4(domain_constant);
|
||||
bytes.append(&mut fork_version.to_vec());
|
||||
let mut domain = [0; 32];
|
||||
domain[0..4].copy_from_slice(&int_to_bytes4(domain_constant));
|
||||
domain[4..].copy_from_slice(
|
||||
&Self::compute_fork_data_root(fork_version, genesis_validators_root)[..28],
|
||||
);
|
||||
|
||||
let mut fork_and_domain = [0; 8];
|
||||
fork_and_domain.copy_from_slice(&bytes);
|
||||
|
||||
u64::from_le_bytes(fork_and_domain)
|
||||
Hash256::from(domain)
|
||||
}
|
||||
|
||||
/// Returns a `ChainSpec` compatible with the Ethereum Foundation specification.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn mainnet() -> Self {
|
||||
Self {
|
||||
/*
|
||||
@ -251,6 +263,9 @@ impl ChainSpec {
|
||||
shuffle_round_count: 90,
|
||||
min_genesis_active_validator_count: 16_384,
|
||||
min_genesis_time: 1_578_009_600, // Jan 3, 2020
|
||||
hysteresis_quotient: 4,
|
||||
hysteresis_downward_multiplier: 1,
|
||||
hysteresis_upward_multiplier: 5,
|
||||
|
||||
/*
|
||||
* Gwei values
|
||||
@ -325,7 +340,7 @@ impl ChainSpec {
|
||||
|
||||
/// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn minimal() -> Self {
|
||||
// Note: bootnodes to be updated when static nodes exist.
|
||||
let boot_nodes = vec![];
|
||||
@ -337,6 +352,7 @@ impl ChainSpec {
|
||||
min_genesis_active_validator_count: 64,
|
||||
eth1_follow_distance: 16,
|
||||
genesis_fork_version: [0x00, 0x00, 0x00, 0x01],
|
||||
persistent_committee_period: 128,
|
||||
min_genesis_delay: 300,
|
||||
milliseconds_per_slot: 6_000,
|
||||
network_id: 2, // lighthouse testnet network id
|
||||
@ -371,7 +387,6 @@ impl Default for ChainSpec {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use int_to_bytes::int_to_bytes8;
|
||||
|
||||
#[test]
|
||||
fn test_mainnet_spec_can_be_constructed() {
|
||||
@ -379,19 +394,27 @@ mod tests {
|
||||
}
|
||||
|
||||
fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) {
|
||||
let previous_version = [0, 0, 0, 1];
|
||||
let current_version = [0, 0, 0, 2];
|
||||
let genesis_validators_root = Hash256::from_low_u64_le(77);
|
||||
let fork_epoch = Epoch::new(1024);
|
||||
let fork = Fork {
|
||||
previous_version: spec.genesis_fork_version,
|
||||
current_version: spec.genesis_fork_version,
|
||||
epoch: MinimalEthSpec::genesis_epoch(),
|
||||
previous_version,
|
||||
current_version,
|
||||
epoch: fork_epoch,
|
||||
};
|
||||
let epoch = Epoch::new(0);
|
||||
|
||||
let domain = spec.get_domain(epoch, domain_type, &fork);
|
||||
for (epoch, version) in vec![
|
||||
(fork_epoch - 1, previous_version),
|
||||
(fork_epoch, current_version),
|
||||
(fork_epoch + 1, current_version),
|
||||
] {
|
||||
let domain1 = spec.get_domain(epoch, domain_type, &fork, genesis_validators_root);
|
||||
let domain2 = spec.compute_domain(domain_type, version, genesis_validators_root);
|
||||
|
||||
let mut expected = int_to_bytes4(raw_domain);
|
||||
expected.append(&mut fork.get_fork_version(epoch).to_vec());
|
||||
|
||||
assert_eq!(int_to_bytes8(domain), expected);
|
||||
assert_eq!(domain1, domain2);
|
||||
assert_eq!(&domain1.as_bytes()[0..4], &int_to_bytes4(raw_domain)[..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -403,18 +426,25 @@ mod tests {
|
||||
test_domain(Domain::Randao, spec.domain_randao, &spec);
|
||||
test_domain(Domain::Deposit, spec.domain_deposit, &spec);
|
||||
test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec);
|
||||
test_domain(Domain::SelectionProof, spec.domain_selection_proof, &spec);
|
||||
test_domain(
|
||||
Domain::AggregateAndProof,
|
||||
spec.domain_aggregate_and_proof,
|
||||
&spec,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Union of a ChainSpec struct and an EthSpec struct that holds constants used for the configs
|
||||
/// from the Ethereum 2 specs repo (https://github.com/ethereum/eth2.0-specs/tree/dev/configs)
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Doesn't include fields of the YAML that we don't need yet (e.g. Phase 1 stuff).
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
// Yaml Config is declared here in order to access domain fields of ChainSpec which are private.
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
#[serde(default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct YamlConfig {
|
||||
// ChainSpec
|
||||
far_future_epoch: u64,
|
||||
@ -432,6 +462,9 @@ pub struct YamlConfig {
|
||||
max_effective_balance: u64,
|
||||
ejection_balance: u64,
|
||||
effective_balance_increment: u64,
|
||||
hysteresis_quotient: u64,
|
||||
hysteresis_downward_multiplier: u64,
|
||||
hysteresis_upward_multiplier: u64,
|
||||
genesis_slot: u64,
|
||||
#[serde(
|
||||
serialize_with = "fork_to_hex_str",
|
||||
@ -483,12 +516,22 @@ pub struct YamlConfig {
|
||||
deserialize_with = "u32_from_hex_str",
|
||||
serialize_with = "u32_to_hex_str"
|
||||
)]
|
||||
domain_selection_proof: u32,
|
||||
#[serde(
|
||||
deserialize_with = "u32_from_hex_str",
|
||||
serialize_with = "u32_to_hex_str"
|
||||
)]
|
||||
domain_aggregate_and_proof: u32,
|
||||
#[serde(
|
||||
deserialize_with = "u32_from_hex_str",
|
||||
serialize_with = "u32_to_hex_str"
|
||||
)]
|
||||
// EthSpec
|
||||
justification_bits_length: u32,
|
||||
max_validators_per_committee: u32,
|
||||
genesis_epoch: Epoch,
|
||||
slots_per_epoch: u64,
|
||||
slots_per_eth1_voting_period: usize,
|
||||
epochs_per_eth1_voting_period: u64,
|
||||
slots_per_historical_root: usize,
|
||||
epochs_per_historical_vector: usize,
|
||||
epochs_per_slashings_vector: usize,
|
||||
@ -506,34 +549,6 @@ pub struct YamlConfig {
|
||||
random_subnets_per_validator: u64,
|
||||
epochs_per_random_subnet_subscription: u64,
|
||||
seconds_per_eth1_block: u64,
|
||||
|
||||
// Deposit Contract (unused)
|
||||
#[serde(skip_serializing)]
|
||||
deposit_contract_address: String,
|
||||
|
||||
// Phase 1
|
||||
#[serde(skip_serializing)]
|
||||
epochs_per_custody_period: u32,
|
||||
#[serde(skip_serializing)]
|
||||
custody_period_to_randao_padding: u32,
|
||||
#[serde(skip_serializing)]
|
||||
shard_slots_per_beacon_slot: u32,
|
||||
#[serde(skip_serializing)]
|
||||
epochs_per_shard_period: u32,
|
||||
#[serde(skip_serializing)]
|
||||
phase_1_fork_epoch: u32,
|
||||
#[serde(skip_serializing)]
|
||||
phase_1_fork_slot: u32,
|
||||
#[serde(skip_serializing)]
|
||||
domain_custody_bit_challenge: u32,
|
||||
#[serde(skip_serializing)]
|
||||
domain_shard_proposer: u32,
|
||||
#[serde(skip_serializing)]
|
||||
domain_shard_attester: u32,
|
||||
#[serde(skip_serializing)]
|
||||
max_epochs_per_crosslink: u64,
|
||||
#[serde(skip_serializing)]
|
||||
early_derived_secret_penalty_max_future_epochs: u32,
|
||||
}
|
||||
|
||||
impl Default for YamlConfig {
|
||||
@ -543,7 +558,7 @@ impl Default for YamlConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
impl YamlConfig {
|
||||
pub fn from_spec<T: EthSpec>(spec: &ChainSpec) -> Self {
|
||||
Self {
|
||||
@ -563,6 +578,9 @@ impl YamlConfig {
|
||||
max_effective_balance: spec.max_effective_balance,
|
||||
ejection_balance: spec.ejection_balance,
|
||||
effective_balance_increment: spec.effective_balance_increment,
|
||||
hysteresis_quotient: spec.hysteresis_quotient,
|
||||
hysteresis_downward_multiplier: spec.hysteresis_downward_multiplier,
|
||||
hysteresis_upward_multiplier: spec.hysteresis_upward_multiplier,
|
||||
genesis_slot: spec.genesis_slot.into(),
|
||||
bls_withdrawal_prefix: spec.bls_withdrawal_prefix_byte,
|
||||
seconds_per_slot: spec.milliseconds_per_slot / 1000,
|
||||
@ -584,13 +602,15 @@ impl YamlConfig {
|
||||
domain_randao: spec.domain_randao,
|
||||
domain_deposit: spec.domain_deposit,
|
||||
domain_voluntary_exit: spec.domain_voluntary_exit,
|
||||
domain_selection_proof: spec.domain_selection_proof,
|
||||
domain_aggregate_and_proof: spec.domain_aggregate_and_proof,
|
||||
|
||||
// EthSpec
|
||||
justification_bits_length: T::JustificationBitsLength::to_u32(),
|
||||
max_validators_per_committee: T::MaxValidatorsPerCommittee::to_u32(),
|
||||
genesis_epoch: T::genesis_epoch(),
|
||||
slots_per_epoch: T::slots_per_epoch(),
|
||||
slots_per_eth1_voting_period: T::slots_per_eth1_voting_period(),
|
||||
epochs_per_eth1_voting_period: T::EpochsPerEth1VotingPeriod::to_u64(),
|
||||
slots_per_historical_root: T::slots_per_historical_root(),
|
||||
epochs_per_historical_vector: T::epochs_per_historical_vector(),
|
||||
epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_usize(),
|
||||
@ -608,22 +628,6 @@ impl YamlConfig {
|
||||
random_subnets_per_validator: 0,
|
||||
epochs_per_random_subnet_subscription: 0,
|
||||
seconds_per_eth1_block: spec.seconds_per_eth1_block,
|
||||
|
||||
// Deposit Contract (unused)
|
||||
deposit_contract_address: String::new(),
|
||||
|
||||
// Phase 1
|
||||
epochs_per_custody_period: 0,
|
||||
custody_period_to_randao_padding: 0,
|
||||
shard_slots_per_beacon_slot: 0,
|
||||
epochs_per_shard_period: 0,
|
||||
phase_1_fork_epoch: 0,
|
||||
phase_1_fork_slot: 0,
|
||||
domain_custody_bit_challenge: 0,
|
||||
domain_shard_proposer: 0,
|
||||
domain_shard_attester: 0,
|
||||
max_epochs_per_crosslink: 0,
|
||||
early_derived_secret_penalty_max_future_epochs: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@ -640,7 +644,7 @@ impl YamlConfig {
|
||||
|| self.max_validators_per_committee != T::MaxValidatorsPerCommittee::to_u32()
|
||||
|| self.genesis_epoch != T::genesis_epoch()
|
||||
|| self.slots_per_epoch != T::slots_per_epoch()
|
||||
|| self.slots_per_eth1_voting_period != T::slots_per_eth1_voting_period()
|
||||
|| self.epochs_per_eth1_voting_period != T::EpochsPerEth1VotingPeriod::to_u64()
|
||||
|| self.slots_per_historical_root != T::slots_per_historical_root()
|
||||
|| self.epochs_per_historical_vector != T::epochs_per_historical_vector()
|
||||
|| self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_usize()
|
||||
@ -669,6 +673,9 @@ impl YamlConfig {
|
||||
min_deposit_amount: self.min_deposit_amount,
|
||||
min_genesis_delay: self.min_genesis_delay,
|
||||
max_effective_balance: self.max_effective_balance,
|
||||
hysteresis_quotient: self.hysteresis_quotient,
|
||||
hysteresis_downward_multiplier: self.hysteresis_downward_multiplier,
|
||||
hysteresis_upward_multiplier: self.hysteresis_upward_multiplier,
|
||||
ejection_balance: self.ejection_balance,
|
||||
effective_balance_increment: self.effective_balance_increment,
|
||||
genesis_slot: Slot::from(self.genesis_slot),
|
||||
@ -688,6 +695,7 @@ impl YamlConfig {
|
||||
inactivity_penalty_quotient: self.inactivity_penalty_quotient,
|
||||
min_slashing_penalty_quotient: self.min_slashing_penalty_quotient,
|
||||
domain_beacon_proposer: self.domain_beacon_proposer,
|
||||
domain_beacon_attester: self.domain_beacon_attester,
|
||||
domain_randao: self.domain_randao,
|
||||
domain_deposit: self.domain_deposit,
|
||||
domain_voluntary_exit: self.domain_voluntary_exit,
|
||||
|
@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Casper FFG checkpoint, used in attestations.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
|
@ -11,7 +11,7 @@ pub const DEPOSIT_TREE_DEPTH: usize = 32;
|
||||
|
||||
/// A deposit to potentially become a beacon chain validator.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct Deposit {
|
||||
pub proof: FixedVector<Hash256, U33>,
|
||||
|
@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// The data supplied by the user to the deposit contract.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct DepositData {
|
||||
pub pubkey: PublicKeyBytes,
|
||||
@ -21,7 +21,7 @@ pub struct DepositData {
|
||||
impl DepositData {
|
||||
/// Create a `DepositMessage` corresponding to this `DepositData`, for signature verification.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn as_deposit_message(&self) -> DepositMessage {
|
||||
DepositMessage {
|
||||
pubkey: self.pubkey.clone(),
|
||||
@ -32,7 +32,7 @@ impl DepositData {
|
||||
|
||||
/// Generate the signature for a given DepositData details.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn create_signature(&self, secret_key: &SecretKey, spec: &ChainSpec) -> SignatureBytes {
|
||||
let domain = spec.get_deposit_domain();
|
||||
let msg = self.as_deposit_message().signing_root(domain);
|
||||
|
@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// The data supplied by the user to the deposit contract.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct DepositMessage {
|
||||
pub pubkey: PublicKeyBytes,
|
||||
|
@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Contains data obtained from the Eth1 chain.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(
|
||||
Debug,
|
||||
PartialEq,
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::*;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz_types::typenum::{
|
||||
Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U2048, U32, U4, U4096, U64,
|
||||
Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U2, U2048, U32, U4, U4096, U64,
|
||||
U65536, U8, U8192,
|
||||
};
|
||||
use std::fmt::Debug;
|
||||
@ -21,7 +21,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
|
||||
* Time parameters
|
||||
*/
|
||||
type SlotsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
type SlotsPerEth1VotingPeriod: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
type EpochsPerEth1VotingPeriod: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
type SlotsPerHistoricalRoot: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
/*
|
||||
* State list lengths
|
||||
@ -44,9 +44,13 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
|
||||
/// The length of the `{previous,current}_epoch_attestations` lists.
|
||||
///
|
||||
/// Must be set to `MaxAttestations * SlotsPerEpoch`
|
||||
// NOTE: we could safely instantiate this by using type-level arithmetic, but doing
|
||||
// NOTE: we could safely instantiate these by using type-level arithmetic, but doing
|
||||
// so adds ~25s to the time required to type-check this crate
|
||||
type MaxPendingAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
/// The length of `eth1_data_votes`.
|
||||
///
|
||||
/// Must be set to `EpochsPerEth1VotingPeriod * SlotsPerEpoch`
|
||||
type SlotsPerEth1VotingPeriod: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
|
||||
fn default_spec() -> ChainSpec;
|
||||
|
||||
@ -59,7 +63,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
|
||||
/// Note: the number of committees per slot is constant in each epoch, and depends only on
|
||||
/// the `active_validator_count` during the slot's epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn get_committee_count_per_slot(active_validator_count: usize, spec: &ChainSpec) -> usize {
|
||||
let slots_per_epoch = Self::SlotsPerEpoch::to_usize();
|
||||
|
||||
@ -83,28 +87,28 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
|
||||
|
||||
/// Returns the `SLOTS_PER_EPOCH` constant for this specification.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn slots_per_epoch() -> u64 {
|
||||
Self::SlotsPerEpoch::to_u64()
|
||||
}
|
||||
|
||||
/// Returns the `SLOTS_PER_HISTORICAL_ROOT` constant for this specification.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn slots_per_historical_root() -> usize {
|
||||
Self::SlotsPerHistoricalRoot::to_usize()
|
||||
}
|
||||
|
||||
/// Returns the `EPOCHS_PER_HISTORICAL_VECTOR` constant for this specification.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn epochs_per_historical_vector() -> usize {
|
||||
Self::EpochsPerHistoricalVector::to_usize()
|
||||
}
|
||||
|
||||
/// Returns the `SLOTS_PER_ETH1_VOTING_PERIOD` constant for this specification.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
fn slots_per_eth1_voting_period() -> usize {
|
||||
Self::SlotsPerEth1VotingPeriod::to_usize()
|
||||
}
|
||||
@ -120,7 +124,7 @@ macro_rules! params_from_eth_spec {
|
||||
|
||||
/// Ethereum Foundation specifications.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct MainnetEthSpec;
|
||||
|
||||
@ -130,7 +134,7 @@ impl EthSpec for MainnetEthSpec {
|
||||
type MaxValidatorsPerCommittee = U2048;
|
||||
type GenesisEpoch = U0;
|
||||
type SlotsPerEpoch = U32;
|
||||
type SlotsPerEth1VotingPeriod = U1024;
|
||||
type EpochsPerEth1VotingPeriod = U32;
|
||||
type SlotsPerHistoricalRoot = U8192;
|
||||
type EpochsPerHistoricalVector = U65536;
|
||||
type EpochsPerSlashingsVector = U8192;
|
||||
@ -142,6 +146,7 @@ impl EthSpec for MainnetEthSpec {
|
||||
type MaxDeposits = U16;
|
||||
type MaxVoluntaryExits = U16;
|
||||
type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch
|
||||
type SlotsPerEth1VotingPeriod = U1024; // 32 epochs * 32 slots per epoch
|
||||
|
||||
fn default_spec() -> ChainSpec {
|
||||
ChainSpec::mainnet()
|
||||
@ -152,17 +157,18 @@ pub type FoundationBeaconState = BeaconState<MainnetEthSpec>;
|
||||
|
||||
/// Ethereum Foundation minimal spec, as defined in the eth2.0-specs repo.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct MinimalEthSpec;
|
||||
|
||||
impl EthSpec for MinimalEthSpec {
|
||||
type SlotsPerEpoch = U8;
|
||||
type SlotsPerEth1VotingPeriod = U16;
|
||||
type EpochsPerEth1VotingPeriod = U2;
|
||||
type SlotsPerHistoricalRoot = U64;
|
||||
type EpochsPerHistoricalVector = U64;
|
||||
type EpochsPerSlashingsVector = U64;
|
||||
type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch
|
||||
type SlotsPerEth1VotingPeriod = U16; // 2 epochs * 8 slots per epoch
|
||||
|
||||
params_from_eth_spec!(MainnetEthSpec {
|
||||
JustificationBitsLength,
|
||||
@ -191,11 +197,12 @@ pub struct InteropEthSpec;
|
||||
|
||||
impl EthSpec for InteropEthSpec {
|
||||
type SlotsPerEpoch = U8;
|
||||
type EpochsPerEth1VotingPeriod = U2;
|
||||
type SlotsPerHistoricalRoot = U64;
|
||||
type SlotsPerEth1VotingPeriod = U16;
|
||||
type EpochsPerHistoricalVector = U64;
|
||||
type EpochsPerSlashingsVector = U64;
|
||||
type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch
|
||||
type SlotsPerEth1VotingPeriod = U16; // 2 epochs * 8 slots per epoch
|
||||
|
||||
params_from_eth_spec!(MainnetEthSpec {
|
||||
JustificationBitsLength,
|
||||
|
@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Specifies a fork of the `BeaconChain`, to prevent replay attacks.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(
|
||||
Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom,
|
||||
)]
|
||||
@ -30,7 +30,7 @@ pub struct Fork {
|
||||
impl Fork {
|
||||
/// Return the fork version of the given ``epoch``.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn get_fork_version(&self, epoch: Epoch) -> [u8; 4] {
|
||||
if epoch < self.epoch {
|
||||
return self.previous_version;
|
||||
|
@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Specifies a fork of the `BeaconChain`, to prevent replay attacks.
|
||||
///
|
||||
/// Spec v0.11.0
|
||||
/// Spec v0.11.1
|
||||
#[derive(
|
||||
Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom,
|
||||
)]
|
||||
|
@ -9,7 +9,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Historical block and state roots.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct HistoricalBatch<T: EthSpec> {
|
||||
pub block_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
|
||||
|
@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash;
|
||||
///
|
||||
/// To be included in an `AttesterSlashing`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct IndexedAttestation<T: EthSpec> {
|
||||
@ -21,14 +21,14 @@ pub struct IndexedAttestation<T: EthSpec> {
|
||||
impl<T: EthSpec> IndexedAttestation<T> {
|
||||
/// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn is_double_vote(&self, other: &Self) -> bool {
|
||||
self.data.target.epoch == other.data.target.epoch && self.data != other.data
|
||||
}
|
||||
|
||||
/// Check if ``attestation_data_1`` surrounds ``attestation_data_2``.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn is_surround_vote(&self, other: &Self) -> bool {
|
||||
self.data.source.epoch < other.data.source.epoch
|
||||
&& other.data.target.epoch < self.data.target.epoch
|
||||
|
@ -67,6 +67,7 @@ pub use crate::deposit_message::DepositMessage;
|
||||
pub use crate::enr_fork_id::EnrForkId;
|
||||
pub use crate::eth1_data::Eth1Data;
|
||||
pub use crate::fork::Fork;
|
||||
pub use crate::fork_data::ForkData;
|
||||
pub use crate::free_attestation::FreeAttestation;
|
||||
pub use crate::historical_batch::HistoricalBatch;
|
||||
pub use crate::indexed_attestation::IndexedAttestation;
|
||||
@ -83,7 +84,6 @@ pub use crate::slot_epoch::{Epoch, Slot, FAR_FUTURE_EPOCH};
|
||||
pub use crate::subnet_id::SubnetId;
|
||||
pub use crate::validator::Validator;
|
||||
pub use crate::voluntary_exit::VoluntaryExit;
|
||||
pub use fork_data::ForkData;
|
||||
|
||||
pub type CommitteeIndex = u64;
|
||||
pub type Hash256 = H256;
|
||||
|
@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// An attestation that has been included in the state but not yet fully processed.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct PendingAttestation<T: EthSpec> {
|
||||
pub aggregation_bits: BitList<T::MaxValidatorsPerCommittee>,
|
||||
|
@ -8,10 +8,9 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Two conflicting proposals from the same proposer (validator).
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct ProposerSlashing {
|
||||
pub proposer_index: u64,
|
||||
pub signed_header_1: SignedBeaconBlockHeader,
|
||||
pub signed_header_2: SignedBeaconBlockHeader,
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ pub enum Error {
|
||||
/// Defines the epochs relative to some epoch. Most useful when referring to the committees prior
|
||||
/// to and following some epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum RelativeEpoch {
|
||||
/// The prior epoch.
|
||||
@ -23,7 +23,7 @@ pub enum RelativeEpoch {
|
||||
impl RelativeEpoch {
|
||||
/// Returns the `epoch` that `self` refers to, with respect to the `base` epoch.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn into_epoch(self, base: Epoch) -> Epoch {
|
||||
match self {
|
||||
// Due to saturating nature of epoch, check for current first.
|
||||
@ -40,7 +40,7 @@ impl RelativeEpoch {
|
||||
/// - `EpochTooLow` when `other` is more than 1 prior to `base`.
|
||||
/// - `EpochTooHigh` when `other` is more than 1 after `base`.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn from_epoch(base: Epoch, other: Epoch) -> Result<Self, Error> {
|
||||
// Due to saturating nature of epoch, check for current first.
|
||||
if other == base {
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::{ChainSpec, Domain, EthSpec, Fork, SecretKey, Signature, SignedRoot, Slot};
|
||||
use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot};
|
||||
use std::convert::TryInto;
|
||||
use tree_hash::TreeHash;
|
||||
|
||||
@ -10,12 +10,14 @@ impl SelectionProof {
|
||||
slot: Slot,
|
||||
secret_key: &SecretKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Self {
|
||||
let domain = spec.get_domain(
|
||||
slot.epoch(T::slots_per_epoch()),
|
||||
Domain::SelectionProof,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
let message = slot.signing_root(domain);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::{
|
||||
AggregateAndProof, Attestation, ChainSpec, Domain, EthSpec, Fork, PublicKey, SecretKey,
|
||||
Signature, SignedRoot,
|
||||
AggregateAndProof, Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey,
|
||||
SecretKey, Signature, SignedRoot,
|
||||
};
|
||||
use crate::test_utils::TestRandom;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
@ -29,13 +29,25 @@ impl<T: EthSpec> SignedAggregateAndProof<T> {
|
||||
aggregate: Attestation<T>,
|
||||
secret_key: &SecretKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> Self {
|
||||
let message =
|
||||
AggregateAndProof::from_aggregate(aggregator_index, aggregate, secret_key, fork, spec);
|
||||
let message = AggregateAndProof::from_aggregate(
|
||||
aggregator_index,
|
||||
aggregate,
|
||||
secret_key,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
spec,
|
||||
);
|
||||
|
||||
let target_epoch = message.aggregate.data.slot.epoch(T::slots_per_epoch());
|
||||
let domain = spec.get_domain(target_epoch, Domain::AggregateAndProof, fork);
|
||||
let domain = spec.get_domain(
|
||||
target_epoch,
|
||||
Domain::AggregateAndProof,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
let signing_message = message.signing_root(domain);
|
||||
|
||||
SignedAggregateAndProof {
|
||||
@ -49,20 +61,35 @@ impl<T: EthSpec> SignedAggregateAndProof<T> {
|
||||
&self,
|
||||
validator_pubkey: &PublicKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> bool {
|
||||
let target_epoch = self.message.aggregate.data.slot.epoch(T::slots_per_epoch());
|
||||
let domain = spec.get_domain(target_epoch, Domain::AggregateAndProof, fork);
|
||||
let domain = spec.get_domain(
|
||||
target_epoch,
|
||||
Domain::AggregateAndProof,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
);
|
||||
let message = self.message.signing_root(domain);
|
||||
self.signature.verify(message.as_bytes(), validator_pubkey)
|
||||
}
|
||||
|
||||
/// Verifies the signature of the `AggregateAndProof` as well the underlying selection_proof in
|
||||
/// the contained `AggregateAndProof`.
|
||||
pub fn is_valid(&self, validator_pubkey: &PublicKey, fork: &Fork, spec: &ChainSpec) -> bool {
|
||||
self.is_valid_signature(validator_pubkey, fork, spec)
|
||||
&& self
|
||||
.message
|
||||
.is_valid_selection_proof(validator_pubkey, fork, spec)
|
||||
pub fn is_valid(
|
||||
&self,
|
||||
validator_pubkey: &PublicKey,
|
||||
fork: &Fork,
|
||||
genesis_validators_root: Hash256,
|
||||
spec: &ChainSpec,
|
||||
) -> bool {
|
||||
self.is_valid_signature(validator_pubkey, fork, genesis_validators_root, spec)
|
||||
&& self.message.is_valid_selection_proof(
|
||||
validator_pubkey,
|
||||
fork,
|
||||
genesis_validators_root,
|
||||
spec,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use tree_hash::TreeHash;
|
||||
|
||||
/// A `BeaconBlock` and a signature from its proposer.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TestRandom)]
|
||||
#[serde(bound = "E: EthSpec")]
|
||||
pub struct SignedBeaconBlock<E: EthSpec> {
|
||||
@ -34,7 +34,7 @@ impl<E: EthSpec> SignedBeaconBlock<E> {
|
||||
|
||||
/// Returns the `tree_hash_root` of the block.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
pub fn canonical_root(&self) -> Hash256 {
|
||||
Hash256::from_slice(&self.message.tree_hash_root()[..])
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// An exit voluntarily submitted a validator who wishes to withdraw.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct SignedBeaconBlockHeader {
|
||||
pub message: BeaconBlockHeader,
|
||||
|
@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash;
|
||||
|
||||
/// An exit voluntarily submitted a validator who wishes to withdraw.
|
||||
///
|
||||
/// Spec v0.10.1
|
||||
/// Spec v0.11.1
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct SignedVoluntaryExit {
|
||||
pub message: VoluntaryExit,
|
||||
|
@ -9,11 +9,11 @@ use tree_hash_derive::TreeHash;
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
|
||||
pub struct SigningRoot {
|
||||
pub object_root: Hash256,
|
||||
pub domain: u64,
|
||||
pub domain: Hash256,
|
||||
}
|
||||
|
||||
pub trait SignedRoot: TreeHash {
|
||||
fn signing_root(&self, domain: u64) -> Hash256 {
|
||||
fn signing_root(&self, domain: Hash256) -> Hash256 {
|
||||
SigningRoot {
|
||||
object_root: self.tree_hash_root(),
|
||||
domain,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user