diff --git a/.gitignore b/.gitignore index 570bb6cdf..d6b4306ef 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,4 @@ target/ flamegraph.svg perf.data* *.tar.gz -bin/ +/bin diff --git a/Cargo.lock b/Cargo.lock index 535511c43..17184a10f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,14 +2,15 @@ # It is not intended for manual editing. [[package]] name = "account_manager" -version = "0.2.13" +version = "0.3.0" dependencies = [ "account_utils", "bls", "clap", "clap_utils", "deposit_contract", - "dirs", + "directory", + "dirs 3.0.1", "environment", "eth2_keystore", "eth2_ssz", @@ -17,11 +18,12 @@ dependencies = [ "eth2_testnet_config", "eth2_wallet", "eth2_wallet_manager", - "futures 0.3.5", + "futures 0.3.6", "hex 0.4.2", "libc", "rand 0.7.3", "rayon", + "slashing_protection", "slog", "slog-async", "slog-term", @@ -36,6 +38,7 @@ dependencies = [ name = "account_utils" version = "0.1.0" dependencies = [ + "directory", "eth2_keystore", "eth2_wallet", "rand 0.7.3", @@ -86,19 +89,30 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" dependencies = [ - "aes-soft", - "aesni", - "block-cipher", + "aes-soft 0.4.0", + "aesni 0.7.0", + "block-cipher 0.7.1", +] + +[[package]] +name = "aes" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" +dependencies = [ + "aes-soft 0.5.0", + "aesni 0.8.0", + "block-cipher 0.8.0", ] [[package]] name = "aes-ctr" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e60aeefd2a0243bd53a42e92444e039f67c3d7f0382c9813577696e7c10bf3" +checksum = "64c3b03608ea1c077228520a167cca2514dc7cd8100a81b30a2b38be985234e5" dependencies = [ - "aes-soft", - "aesni", + "aes-soft 0.5.0", + "aesni 0.9.0", "ctr", "stream-cipher", ] @@ -110,8 +124,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" dependencies = [ "aead", - "aes", - "block-cipher", + "aes 0.4.0", + "block-cipher 0.7.1", + "ghash", + "subtle 2.3.0", +] + +[[package]] +name = "aes-gcm" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" +dependencies = [ + "aead", + "aes 0.5.0", + "block-cipher 0.8.0", "ghash", "subtle 2.3.0", ] @@ -122,30 +149,64 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" dependencies = [ - "block-cipher", + "block-cipher 0.7.1", "byteorder", "opaque-debug 0.2.3", ] +[[package]] +name = "aes-soft" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" +dependencies = [ + "block-cipher 0.8.0", + "byteorder", + "opaque-debug 0.3.0", +] + [[package]] name = "aesni" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" dependencies = [ - "block-cipher", + "block-cipher 0.7.1", "opaque-debug 0.2.3", +] + +[[package]] +name = "aesni" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" +dependencies = [ + "block-cipher 0.8.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "aesni" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a4d655ae633a96d0acaf0fd7e76aafb8ca5732739bba37aac6f882c8fce656" +dependencies = [ + "block-cipher 0.8.0", + "opaque-debug 0.3.0", "stream-cipher", ] [[package]] name = "ahash" -version = "0.2.18" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" -dependencies = [ - "const-random", -] +checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" + +[[package]] +name = "ahash" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0adac150c2dd5a9c864d054e07bda5e6bc010cd10036ea5f17e82a2f5867f735" [[package]] name = "aho-corasick" @@ -176,9 +237,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.32" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b602bfe940d21c130f3895acd65221e8a61270debe89d628b9cb4e3ccb8569b" +checksum = "a1fd36ffbb1fb7c834eac128ea8d0e310c5aeb635548f9d58861e1308d46e71c" [[package]] name = "arbitrary" @@ -226,29 +287,27 @@ dependencies = [ "syn", ] -[[package]] -name = "assert_matches" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" - [[package]] name = "async-tls" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df097e3f506bec0e1a24f06bb3c962c228f36671de841ff579cb99f371772634" +checksum = "d85a97c4a0ecce878efd3f945f119c78a646d8975340bca0398f9bb05c30cc52" dependencies = [ - "futures 0.3.5", + "futures-core", + "futures-io", "rustls", "webpki", - "webpki-roots 0.19.0", + "webpki-roots", ] [[package]] name = "atomic" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" +checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +dependencies = [ + "autocfg 1.0.1", +] [[package]] name = "atomic-option" @@ -292,14 +351,14 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.50" +version = "0.3.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" +checksum = "ec1931848a574faa8f7c71a12ea00453ff5effbb5f51afe7f77d7a48cace6ac1" dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.4.3", "object", "rustc-demangle", ] @@ -326,11 +385,17 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + [[package]] name = "beacon_chain" version = "0.2.0" dependencies = [ - "bitvec", + "bitvec 0.19.3", "bls", "bus", "derivative", @@ -343,7 +408,7 @@ dependencies = [ "eth2_ssz_types", "exit-future", "fork_choice", - "futures 0.3.5", + "futures 0.3.6", "genesis", "int_to_bytes", "integer-sqrt", @@ -373,6 +438,7 @@ dependencies = [ "smallvec 1.4.2", "state_processing", "store", + "task_executor", "tempfile", "tokio 0.2.22", "tree_hash", @@ -382,21 +448,22 @@ dependencies = [ [[package]] name = "beacon_node" -version = "0.2.13" +version = "0.3.0" dependencies = [ "beacon_chain", "clap", "clap_utils", "client", "ctrlc", - "dirs", + "directory", + "dirs 3.0.1", "environment", "eth2_config", "eth2_libp2p", "eth2_ssz", "eth2_testnet_config", "exit-future", - "futures 0.3.5", + "futures 0.3.6", "genesis", "hex 0.4.2", "hyper 0.13.8", @@ -409,10 +476,21 @@ dependencies = [ "slog-async", "slog-term", "store", + "task_executor", "tokio 0.2.22", "types", ] +[[package]] +name = "bincode" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" +dependencies = [ + "byteorder", + "serde", +] + [[package]] name = "bitflags" version = "0.9.1" @@ -432,7 +510,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" dependencies = [ "either", - "radium", + "radium 0.3.0", +] + +[[package]] +name = "bitvec" +version = "0.19.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11593270830d9b037fbead730bb0c05ef6fbf6be55537a1e8e5892edef7e1f03" +dependencies = [ + "funty", + "radium 0.5.3", + "tap", + "wyz", ] [[package]] @@ -501,6 +591,15 @@ dependencies = [ "generic-array 0.14.4", ] +[[package]] +name = "block-cipher" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" +dependencies = [ + "generic-array 0.14.4", +] + [[package]] name = "block-padding" version = "0.1.5" @@ -525,12 +624,12 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "ethereum-types", - "hex 0.3.2", + "hex 0.4.2", "milagro_bls", "rand 0.7.3", "serde", "serde_derive", - "serde_hex", + "serde_utils", "tree_hash", "zeroize", ] @@ -547,14 +646,14 @@ dependencies = [ [[package]] name = "boot_node" -version = "0.2.13" +version = "0.3.0" dependencies = [ "beacon_node", "clap", "eth2_libp2p", "eth2_ssz", "eth2_testnet_config", - "futures 0.3.5", + "futures 0.3.6", "hex 0.4.2", "log 0.4.11", "logging", @@ -586,6 +685,16 @@ dependencies = [ "serde", ] +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", +] + [[package]] name = "bumpalo" version = "3.4.0" @@ -704,9 +813,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "chacha20" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c0f07ac275808b7bf9a39f2fd013aae1498be83632814c8c4e0bd53f2dc58" +checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" dependencies = [ "stream-cipher", "zeroize", @@ -714,9 +823,9 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b0c90556d8e3fec7cf18d84a2f53d27b21288f2fe481b830fadcf809e48205" +checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" dependencies = [ "aead", "chacha20", @@ -727,9 +836,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d021fddb7bd3e734370acfa4a83f34095571d8570c039f1420d77540f68d5772" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ "libc", "num-integer", @@ -758,7 +867,7 @@ name = "clap_utils" version = "0.1.0" dependencies = [ "clap", - "dirs", + "dirs 3.0.1", "eth2_ssz", "eth2_testnet_config", "hex 0.4.2", @@ -771,22 +880,24 @@ version = "0.2.0" dependencies = [ "beacon_chain", "bus", - "dirs", + "directory", + "dirs 3.0.1", "environment", "error-chain", "eth1", "eth2_config", "eth2_libp2p", "eth2_ssz", - "futures 0.3.5", + "futures 0.3.6", "genesis", + "http_api", + "http_metrics", "lazy_static", "lighthouse_metrics", "network", "parking_lot 0.11.0", "prometheus", "reqwest", - "rest_api", "serde", "serde_derive", "serde_yaml", @@ -795,6 +906,7 @@ dependencies = [ "sloggers", "slot_clock", "store", + "task_executor", "time 0.2.22", "timer", "tokio 0.2.22", @@ -868,26 +980,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "const-random" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f1af9ac737b2dd2d577701e59fd09ba34822f6f2ebdb30a7647405d9e55e16a" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e4c606eb459dd29f7c57b2e0879f2b6f14ee130918c2b78ccb58a9624e6c7a" -dependencies = [ - "getrandom", - "proc-macro-hack", -] - [[package]] name = "const_fn" version = "0.4.2" @@ -1065,6 +1157,16 @@ dependencies = [ "subtle 2.3.0", ] +[[package]] +name = "crypto-mac" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58bcd97a54c7ca5ce2f6eb16f6bede5b0ab5f0055fedc17d2f0b4466e21671ca" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.3.0", +] + [[package]] name = "csv" version = "1.1.3" @@ -1089,9 +1191,9 @@ dependencies = [ [[package]] name = "ctr" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3592740fd55aaf61dd72df96756bd0d11e6037b89dcf30ae2e1895b267692be" +checksum = "cc03dee3a2843ac6eb4b5fb39cfcf4cb034d078555d1f4a0afbed418b822f3c2" dependencies = [ "stream-cipher", ] @@ -1106,19 +1208,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "curve25519-dalek" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle 2.3.0", - "zeroize", -] - [[package]] name = "curve25519-dalek" version = "3.0.0" @@ -1202,9 +1291,9 @@ dependencies = [ [[package]] name = "derive_more" -version = "0.99.10" +version = "0.99.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dcfabdab475c16a93d669dddfc393027803e347d09663f524447f642fbb84ba" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ "proc-macro2", "quote", @@ -1229,6 +1318,16 @@ dependencies = [ "generic-array 0.14.4", ] +[[package]] +name = "directory" +version = "0.1.0" +dependencies = [ + "clap", + "clap_utils", + "dirs 3.0.1", + "eth2_testnet_config", +] + [[package]] name = "dirs" version = "2.0.2" @@ -1239,6 +1338,15 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "142995ed02755914747cc6ca76fc7e4583cd18578746716d0508ea6ed558b9ff" +dependencies = [ + "dirs-sys", +] + [[package]] name = "dirs-sys" version = "0.3.5" @@ -1258,16 +1366,16 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" [[package]] name = "discv5" -version = "0.1.0-alpha.12" +version = "0.1.0-alpha.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65a5e4a22a4c1d7142f54ac068b8c6252610ed0ebf00264f39eccee7f88fe4b9" +checksum = "051e80f35af336a84e3960df036eea52366daee461f0b6ee2feee15c6d101718" dependencies = [ - "aes-gcm", + "aes-gcm 0.6.0", "arrayvec", "digest 0.8.1", "enr", "fnv", - "futures 0.3.5", + "futures 0.3.6", "hex 0.4.2", "hkdf", "lazy_static", @@ -1308,7 +1416,7 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek 3.0.0", + "curve25519-dalek", "ed25519", "rand 0.7.3", "serde", @@ -1356,9 +1464,9 @@ dependencies = [ [[package]] name = "enr" -version = "0.1.3" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3137b4854534673ea350751670c6fe53920394a328ba9ce4d9acabd4f60a586" +checksum = "7867d4637e09af7576d9399d02c45784daf264fe6bb0713496a53c51a9154e21" dependencies = [ "base64 0.12.3", "bs58", @@ -1369,7 +1477,7 @@ dependencies = [ "rand 0.7.3", "rlp", "serde", - "tiny-keccak 2.0.2", + "sha3", "zeroize", ] @@ -1391,13 +1499,10 @@ name = "environment" version = "0.1.2" dependencies = [ "ctrlc", - "discv5", "eth2_config", "eth2_testnet_config", "exit-future", - "futures 0.3.5", - "lazy_static", - "lighthouse_metrics", + "futures 0.3.6", "logging", "parking_lot 0.11.0", "slog", @@ -1405,6 +1510,7 @@ dependencies = [ "slog-json", "slog-term", "sloggers", + "task_executor", "tokio 0.2.22", "types", ] @@ -1428,7 +1534,7 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "eth2_ssz_derive", - "futures 0.3.5", + "futures 0.3.6", "hex 0.4.2", "lazy_static", "libflate", @@ -1441,6 +1547,7 @@ dependencies = [ "slog", "sloggers", "state_processing", + "task_executor", "tokio 0.2.22", "toml", "tree_hash", @@ -1453,13 +1560,35 @@ name = "eth1_test_rig" version = "0.2.0" dependencies = [ "deposit_contract", - "futures 0.3.5", + "futures 0.3.6", "serde_json", "tokio 0.2.22", "types", "web3", ] +[[package]] +name = "eth2" +version = "0.1.0" +dependencies = [ + "account_utils", + "bytes 0.5.6", + "eth2_keystore", + "eth2_libp2p", + "hex 0.4.2", + "libsecp256k1", + "procinfo", + "proto_array", + "psutil", + "reqwest", + "ring", + "serde", + "serde_json", + "serde_utils", + "types", + "zeroize", +] + [[package]] name = "eth2_config" version = "0.2.0" @@ -1485,7 +1614,7 @@ dependencies = [ name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ - "base64 0.12.3", + "base64 0.13.0", "bls", "eth2_hashing", "hex 0.4.2", @@ -1517,10 +1646,10 @@ dependencies = [ "eth2_key_derivation", "eth2_ssz", "hex 0.4.2", - "hmac 0.8.1", - "pbkdf2 0.4.0", + "hmac 0.9.0", + "pbkdf2 0.5.0", "rand 0.7.3", - "scrypt", + "scrypt 0.4.1", "serde", "serde_json", "serde_repr", @@ -1534,17 +1663,17 @@ dependencies = [ name = "eth2_libp2p" version = "0.2.0" dependencies = [ - "base64 0.12.3", - "dirs", + "base64 0.13.0", + "directory", + "dirs 3.0.1", "discv5", - "environment", "error-chain", "eth2_ssz", "eth2_ssz_derive", "eth2_ssz_types", "exit-future", "fnv", - "futures 0.3.5", + "futures 0.3.6", "hashset_delay", "hex 0.4.2", "lazy_static", @@ -1560,10 +1689,10 @@ dependencies = [ "sha2 0.9.1", "slog", "slog-async", - "slog-stdlog", "slog-term", "smallvec 1.4.2", "snap", + "task_executor", "tempdir", "tiny-keccak 2.0.2", "tokio 0.2.22", @@ -1599,7 +1728,7 @@ dependencies = [ "eth2_ssz", "serde", "serde_derive", - "serde_hex", + "serde_utils", "tree_hash", "tree_hash_derive", "typenum", @@ -1691,7 +1820,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.5", + "futures 0.3.6", ] [[package]] @@ -1755,15 +1884,15 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.17" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "766d0e77a2c1502169d4a93ff3b8c15a71fd946cd0126309752104e5f3c46d94" +checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" dependencies = [ "cfg-if", "crc32fast", "libc", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.3.7", ] [[package]] @@ -1826,16 +1955,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] -name = "futures" -version = "0.1.29" +name = "funty" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +checksum = "0ba62103ce691c2fd80fbae2213dfdda9ce60804973ac6b6e97de818ea7f52c8" [[package]] name = "futures" -version = "0.3.5" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "4c7e4c2612746b0df8fed4ce0c69156021b704c9aefa360311c04e6e9e002eed" + +[[package]] +name = "futures" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8e3078b7b2a8a671cb7a3d17b4760e4181ea243227776ba83fd043b4ca034e" dependencies = [ "futures-channel", "futures-core", @@ -1848,9 +1983,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "a7a4d35f7401e948629c9c3d6638fb9bf94e0b2121e96c3b428cc4e631f3eb74" dependencies = [ "futures-core", "futures-sink", @@ -1858,9 +1993,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" +checksum = "d674eaa0056896d5ada519900dbf97ead2e46a7b6621e8160d79e2f2e1e2784b" [[package]] name = "futures-cpupool" @@ -1868,15 +2003,15 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "num_cpus", ] [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "cc709ca1da6f66143b8c9bec8e6260181869893714e9b5a490b169b0414144ab" dependencies = [ "futures-core", "futures-task", @@ -1886,15 +2021,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "5fc94b64bb39543b4e432f1790b6bf18e3ee3b74653c5449f63310e9a74b123c" [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "f57ed14da4603b2554682e9f2ff3c65d7567b53188db96cb71538217fc64581b" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -1904,15 +2039,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "0d8764258ed64ebc5d9ed185cf86a95db5cac810269c5d20ececb32e0088abbd" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +checksum = "4dd26820a9f3637f1302da8bceba3ff33adbe53464b54ca24d4e2d4f1db30f94" dependencies = [ "once_cell", ] @@ -1925,11 +2060,11 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "8a894a0acddba51a2d49a6f4263b1e64b8c579ece8af50fa86503d52cd1eea34" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "futures-channel", "futures-core", "futures-io", @@ -1951,7 +2086,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.6", "memchr", "pin-project", ] @@ -1991,7 +2126,7 @@ dependencies = [ "eth2_hashing", "eth2_ssz", "exit-future", - "futures 0.3.5", + "futures 0.3.6", "int_to_bytes", "merkle_proof", "parking_lot 0.11.0", @@ -2090,7 +2225,7 @@ dependencies = [ "byteorder", "bytes 0.4.12", "fnv", - "futures 0.1.29", + "futures 0.1.30", "http 0.1.21", "indexmap", "log 0.4.11", @@ -2126,12 +2261,12 @@ checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" [[package]] name = "hashbrown" -version = "0.6.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" +checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" dependencies = [ - "ahash", - "autocfg 0.1.7", + "ahash 0.3.8", + "autocfg 1.0.1", ] [[package]] @@ -2139,15 +2274,52 @@ name = "hashbrown" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +dependencies = [ + "ahash 0.4.5", +] + +[[package]] +name = "hashlink" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d99cf782f0dc4372d26846bec3de7804ceb5df083c2d4462c0b8d2330e894fa8" +dependencies = [ + "hashbrown 0.9.1", +] [[package]] name = "hashset_delay" version = "0.2.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.6", "tokio 0.2.22", ] +[[package]] +name = "headers" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f" +dependencies = [ + "base64 0.12.3", + "bitflags 1.2.1", + "bytes 0.5.6", + "headers-core", + "http 0.2.1", + "mime 0.3.16", + "sha-1 0.8.2", + "time 0.1.44", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http 0.2.1", +] + [[package]] name = "heck" version = "0.3.1" @@ -2159,9 +2331,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c30f6d0bc6b00693347368a67d41b58f2fb851215ff1da49e90fe2c5c667151" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" dependencies = [ "libc", ] @@ -2214,6 +2386,16 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deae6d9dbb35ec2c502d62b8f7b1c000a0822c3b0794ba36b3149c0a1c840dff" +dependencies = [ + "crypto-mac 0.9.1", + "digest 0.9.0", +] + [[package]] name = "hmac-drbg" version = "0.2.0" @@ -2254,7 +2436,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "http 0.1.21", "tokio-buf", ] @@ -2269,6 +2451,58 @@ dependencies = [ "http 0.2.1", ] +[[package]] +name = "http_api" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "discv5", + "environment", + "eth1", + "eth2", + "eth2_libp2p", + "fork_choice", + "hex 0.4.2", + "lazy_static", + "lighthouse_metrics", + "lighthouse_version", + "network", + "parking_lot 0.11.0", + "serde", + "slog", + "slot_clock", + "state_processing", + "store", + "tokio 0.2.22", + "tree_hash", + "types", + "warp", + "warp_utils", +] + +[[package]] +name = "http_metrics" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "environment", + "eth2", + "eth2_libp2p", + "lazy_static", + "lighthouse_metrics", + "lighthouse_version", + "prometheus", + "reqwest", + "serde", + "slog", + "slot_clock", + "store", + "tokio 0.2.22", + "types", + "warp", + "warp_utils", +] + [[package]] name = "httparse" version = "1.3.4" @@ -2316,7 +2550,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "futures-cpupool", "h2 0.1.26", "http 0.1.21", @@ -2370,7 +2604,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "hyper 0.12.35", "native-tls", "tokio-io", @@ -2461,6 +2695,15 @@ dependencies = [ "hashbrown 0.9.1", ] +[[package]] +name = "input_buffer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" +dependencies = [ + "bytes 0.5.6", +] + [[package]] name = "instant" version = "0.1.7" @@ -2542,7 +2785,7 @@ version = "14.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0747307121ffb9703afd93afbd0fb4f854c38fb873f2c8b90e0e902f27c7b62" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "log 0.4.11", "serde", "serde_derive", @@ -2588,19 +2831,20 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "0.2.13" +version = "0.3.0" dependencies = [ "bls", "clap", "clap_utils", "deposit_contract", - "dirs", + "directory", + "dirs 3.0.1", "environment", "eth2_keystore", "eth2_libp2p", "eth2_ssz", "eth2_testnet_config", - "futures 0.3.5", + "futures 0.3.6", "genesis", "hex 0.4.2", "lighthouse_version", @@ -2631,9 +2875,9 @@ dependencies = [ [[package]] name = "leveldb-sys" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76c44b9b785ca705d58190ebd432a4e7edb900eadf236ff966d7d1307e482e87" +checksum = "618aee5ba3d32cb8456420a9a454aa71c1af5b3e9c7a2ec20a0f3cbbe47246cb" dependencies = [ "cmake", "libc", @@ -2642,9 +2886,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.77" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235" +checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" [[package]] name = "libflate" @@ -2672,14 +2916,14 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.25.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.29.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ "atomic", "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.6", "lazy_static", - "libp2p-core 0.21.0", + "libp2p-core 0.22.2", "libp2p-core-derive", "libp2p-dns", "libp2p-gossipsub", @@ -2690,46 +2934,13 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "multihash", - "parity-multiaddr 0.9.1", - "parking_lot 0.10.2", + "parity-multiaddr 0.9.3", + "parking_lot 0.11.0", "pin-project", "smallvec 1.4.2", "wasm-timer", ] -[[package]] -name = "libp2p-core" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures 0.3.5", - "futures-timer", - "lazy_static", - "libsecp256k1", - "log 0.4.11", - "multihash", - "multistream-select 0.8.2 (git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f)", - "parity-multiaddr 0.9.1", - "parking_lot 0.10.2", - "pin-project", - "prost", - "prost-build", - "rand 0.7.3", - "ring", - "rw-stream-sink", - "sha2 0.8.2", - "smallvec 1.4.2", - "thiserror", - "unsigned-varint 0.4.0", - "void", - "zeroize", -] - [[package]] name = "libp2p-core" version = "0.22.1" @@ -2741,13 +2952,13 @@ dependencies = [ "ed25519-dalek", "either", "fnv", - "futures 0.3.5", + "futures 0.3.6", "futures-timer", "lazy_static", "libsecp256k1", "log 0.4.11", "multihash", - "multistream-select 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "multistream-select 0.8.2", "parity-multiaddr 0.9.2", "parking_lot 0.10.2", "pin-project", @@ -2764,10 +2975,43 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-core" +version = "0.22.2" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" +dependencies = [ + "asn1_der", + "bs58", + "ed25519-dalek", + "either", + "fnv", + "futures 0.3.6", + "futures-timer", + "lazy_static", + "libsecp256k1", + "log 0.4.11", + "multihash", + "multistream-select 0.8.3", + "parity-multiaddr 0.9.3", + "parking_lot 0.11.0", + "pin-project", + "prost", + "prost-build", + "rand 0.7.3", + "ring", + "rw-stream-sink", + "sha2 0.9.1", + "smallvec 1.4.2", + "thiserror", + "unsigned-varint 0.5.1", + "void", + "zeroize", +] + [[package]] name = "libp2p-core-derive" version = "0.20.2" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ "quote", "syn", @@ -2775,27 +3019,27 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.22.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ - "futures 0.3.5", - "libp2p-core 0.21.0", + "futures 0.3.6", + "libp2p-core 0.22.2", "log 0.4.11", ] [[package]] name = "libp2p-gossipsub" -version = "0.22.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.22.1" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ "base64 0.12.3", "byteorder", "bytes 0.5.6", "fnv", - "futures 0.3.5", + "futures 0.3.6", "futures_codec", "hex_fmt", - "libp2p-core 0.21.0", + "libp2p-core 0.22.2", "libp2p-swarm", "log 0.4.11", "prost", @@ -2810,10 +3054,10 @@ dependencies = [ [[package]] name = "libp2p-identify" version = "0.22.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ - "futures 0.3.5", - "libp2p-core 0.21.0", + "futures 0.3.6", + "libp2p-core 0.22.2", "libp2p-swarm", "log 0.4.11", "prost", @@ -2824,34 +3068,34 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.23.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ "bytes 0.5.6", "fnv", - "futures 0.3.5", + "futures 0.3.6", "futures_codec", - "libp2p-core 0.21.0", + "libp2p-core 0.22.2", "log 0.4.11", - "parking_lot 0.10.2", - "unsigned-varint 0.4.0", + "parking_lot 0.11.0", + "unsigned-varint 0.5.1", ] [[package]] name = "libp2p-noise" -version = "0.23.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.24.1" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ "bytes 0.5.6", - "curve25519-dalek 2.1.0", - "futures 0.3.5", + "curve25519-dalek", + "futures 0.3.6", "lazy_static", - "libp2p-core 0.21.0", + "libp2p-core 0.22.2", "log 0.4.11", "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.2", + "sha2 0.9.1", "snow", "static_assertions", "x25519-dalek", @@ -2861,11 +3105,11 @@ dependencies = [ [[package]] name = "libp2p-swarm" version = "0.22.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ "either", - "futures 0.3.5", - "libp2p-core 0.21.0", + "futures 0.3.6", + "libp2p-core 0.22.2", "log 0.4.11", "rand 0.7.3", "smallvec 1.4.2", @@ -2875,14 +3119,14 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.21.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.22.0" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ - "futures 0.3.5", + "futures 0.3.6", "futures-timer", "get_if_addrs", "ipnet", - "libp2p-core 0.21.0", + "libp2p-core 0.22.2", "log 0.4.11", "socket2", "tokio 0.2.22", @@ -2890,13 +3134,13 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.22.0" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +version = "0.23.1" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" dependencies = [ "async-tls", "either", - "futures 0.3.5", - "libp2p-core 0.21.0", + "futures 0.3.6", + "libp2p-core 0.22.2", "log 0.4.11", "quicksink", "rustls", @@ -2904,7 +3148,7 @@ dependencies = [ "soketto", "url 2.1.1", "webpki", - "webpki-roots 0.18.0", + "webpki-roots", ] [[package]] @@ -2925,9 +3169,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.18.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e704a02bcaecd4a08b93a23f6be59d0bd79cd161e0963e9499165a0a35df7bd" +checksum = "64d31059f22935e6c31830db5249ba2b7ecd54fd73a9909286f0a67aa55c2fbd" dependencies = [ "cc", "pkg-config", @@ -2941,13 +3185,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" dependencies = [ "cc", + "libc", "pkg-config", "vcpkg", ] [[package]] name = "lighthouse" -version = "0.2.13" +version = "0.3.0" dependencies = [ "account_manager", "account_utils", @@ -2956,12 +3201,14 @@ dependencies = [ "boot_node", "clap", "clap_utils", + "directory", "env_logger", "environment", "eth2_testnet_config", - "futures 0.3.5", + "futures 0.3.6", "lighthouse_version", "logging", + "slashing_protection", "slog", "slog-async", "slog-term", @@ -3043,20 +3290,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c456c123957de3a220cd03786e0d86aa542a88b46029973b542f426da6ef34" +checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" dependencies = [ - "hashbrown 0.6.3", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", + "hashbrown 0.8.2", ] [[package]] @@ -3165,9 +3403,18 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.2" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9" +checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +dependencies = [ + "adler32", +] + +[[package]] +name = "miniz_oxide" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" dependencies = [ "adler", "autocfg 1.0.1", @@ -3271,16 +3518,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] -name = "multistream-select" -version = "0.8.2" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +name = "multipart" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8209c33c951f07387a8497841122fc6f712165e3f9bda3e6be4645b58188f676" dependencies = [ - "bytes 0.5.6", - "futures 0.3.5", + "buf_redux", + "httparse", "log 0.4.11", - "pin-project", - "smallvec 1.4.2", - "unsigned-varint 0.4.0", + "mime 0.3.16", + "mime_guess", + "quick-error", + "rand 0.6.5", + "safemem", + "tempfile", + "twoway", ] [[package]] @@ -3290,13 +3542,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9157e87afbc2ef0d84cc0345423d715f445edde00141c93721c162de35a05e5" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.6", "log 0.4.11", "pin-project", "smallvec 1.4.2", "unsigned-varint 0.4.0", ] +[[package]] +name = "multistream-select" +version = "0.8.3" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" +dependencies = [ + "bytes 0.5.6", + "futures 0.3.6", + "log 0.4.11", + "pin-project", + "smallvec 1.4.2", + "unsigned-varint 0.5.1", +] + [[package]] name = "native-tls" version = "0.2.4" @@ -3331,14 +3596,13 @@ name = "network" version = "0.2.0" dependencies = [ "beacon_chain", - "environment", "error-chain", "eth2_libp2p", "eth2_ssz", "eth2_ssz_types", "exit-future", "fnv", - "futures 0.3.5", + "futures 0.3.6", "genesis", "get_if_addrs", "hashset_delay", @@ -3352,7 +3616,6 @@ dependencies = [ "num_cpus", "parking_lot 0.11.0", "rand 0.7.3", - "rest_types", "rlp", "slog", "sloggers", @@ -3360,6 +3623,7 @@ dependencies = [ "smallvec 1.4.2", "state_processing", "store", + "task_executor", "tempfile", "tokio 0.2.22", "tree_hash", @@ -3385,10 +3649,10 @@ version = "0.2.0" dependencies = [ "beacon_node", "environment", + "eth2", "eth2_config", - "futures 0.3.5", + "futures 0.3.6", "genesis", - "remote_beacon_node", "reqwest", "serde", "tempdir", @@ -3566,23 +3830,6 @@ dependencies = [ "types", ] -[[package]] -name = "parity-multiaddr" -version = "0.9.1" -source = "git+https://github.com/sigp/rust-libp2p?rev=03f998022ce2f566a6c6e6c4206bc0ce4d45109f#03f998022ce2f566a6c6e6c4206bc0ce4d45109f" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash", - "percent-encoding 2.1.0", - "serde", - "static_assertions", - "unsigned-varint 0.4.0", - "url 2.1.1", -] - [[package]] name = "parity-multiaddr" version = "0.9.2" @@ -3601,6 +3848,23 @@ dependencies = [ "url 2.1.1", ] +[[package]] +name = "parity-multiaddr" +version = "0.9.3" +source = "git+https://github.com/sigp/rust-libp2p?rev=5a9f0819af3990cfefad528e957297af596399b4#5a9f0819af3990cfefad528e957297af596399b4" +dependencies = [ + "arrayref", + "bs58", + "byteorder", + "data-encoding", + "multihash", + "percent-encoding 2.1.0", + "serde", + "static_assertions", + "unsigned-varint 0.5.1", + "url 2.1.1", +] + [[package]] name = "parity-scale-codec" version = "1.3.5" @@ -3608,7 +3872,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c740e5fbcb6847058b40ac7e5574766c6388f585e184d769910fe0d3a2ca861" dependencies = [ "arrayvec", - "bitvec", + "bitvec 0.17.4", "byte-slice-cast", "serde", ] @@ -3708,6 +3972,15 @@ dependencies = [ "crypto-mac 0.8.0", ] +[[package]] +name = "pbkdf2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170d73bf11f39b4ce1809aabc95bf5c33564cdc16fc3200ddda17a5f6e5e48b" +dependencies = [ + "crypto-mac 0.9.1", +] + [[package]] name = "percent-encoding" version = "1.0.1" @@ -3732,18 +4005,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f48fad7cfbff853437be7cf54d7b993af21f53be7f0988cbfe4a51535aa77205" +checksum = "13fbdfd6bdee3dc9be46452f86af4a4072975899cf8592466668620bebfbcc17" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c6d293bdd3ca5a1697997854c6cf7855e43fb6a0ba1c47af57a5bcafd158ae" +checksum = "c82fb1329f632c3552cf352d14427d57a511b1cf41db93b3a7d77906a82dcc8e" dependencies = [ "proc-macro2", "quote", @@ -3752,9 +4025,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe74897791e156a0cd8cce0db31b9b2198e67877316bf3086c3acd187f719f0" +checksum = "e555d9e657502182ac97b539fb3dae8b79cda19e3e4f8ffb5e8de4f18df93c95" [[package]] name = "pin-utils" @@ -3786,26 +4059,20 @@ dependencies = [ "web-sys", ] -[[package]] -name = "podio" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b18befed8bc2b61abc79a457295e7e838417326da1586050b919414073977f19" - [[package]] name = "poly1305" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b42192ab143ed7619bf888a7f9c6733a9a2153b218e2cd557cfdb52fbf9bb1" +checksum = "22ce46de8e53ee414ca4d02bfefac75d8c12fba948b76622a40b4be34dfce980" dependencies = [ "universal-hash", ] [[package]] name = "polyval" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" +checksum = "a5884790f1ce3553ad55fec37b5aaac5882e0e845a2612df744d6c85c9bf046c" dependencies = [ "cfg-if", "universal-hash", @@ -3844,9 +4111,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51ef7cd2518ead700af67bf9d1a658d90b6037d77110fd9c0445429d0ba1c6c9" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" dependencies = [ "unicode-xid", ] @@ -3865,15 +4132,16 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd" +checksum = "30d70cf4412832bcac9cffe27906f4a66e450d323525e977168c70d1b36120ae" dependencies = [ "cfg-if", "fnv", "lazy_static", + "parking_lot 0.11.0", "protobuf", - "spin", + "regex", "thiserror", ] @@ -3942,9 +4210,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.17.0" +version = "2.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb14183cc7f213ee2410067e1ceeadba2a7478a59432ff0747a335202798b1e2" +checksum = "6d147edb77bcccbfc81fabffdc7bd50c13e103b15ca1e27515fe40de69a5776b" [[package]] name = "psutil" @@ -4027,9 +4295,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed60ebe88b27ac28c0563bc0fbeaecd302ff53e3a01e5ddc2ec9f4e6c707d929" +checksum = "227ab35ff4cbb01fa76da8f062590fe677b93c8d9e8415eb5fa981f2c1dba9d8" dependencies = [ "r2d2", "rusqlite", @@ -4041,6 +4309,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" +[[package]] +name = "radium" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8" + [[package]] name = "rand" version = "0.4.6" @@ -4067,6 +4341,25 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.7", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc 0.1.0", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift 0.1.1", + "winapi 0.3.9", +] + [[package]] name = "rand" version = "0.7.3" @@ -4075,9 +4368,19 @@ checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom", "libc", - "rand_chacha", + "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.3.1", ] [[package]] @@ -4114,6 +4417,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -4123,6 +4435,59 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +dependencies = [ + "libc", + "rand_core 0.4.2", + "winapi 0.3.9", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +dependencies = [ + "cloudabi 0.0.3", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi 0.3.9", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.4.2", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_xorshift" version = "0.2.0" @@ -4134,9 +4499,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd016f0c045ad38b5251be2c9c0ab806917f82da4d36b2a327e5166adad9270" +checksum = "dcf6960dc9a5b4ee8d3e4c5787b4a112a8818e0290a42ff664ad60692fdf2032" dependencies = [ "autocfg 1.0.1", "crossbeam-deque", @@ -4210,24 +4575,6 @@ version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" -[[package]] -name = "remote_beacon_node" -version = "0.2.0" -dependencies = [ - "eth2_config", - "eth2_ssz", - "futures 0.3.5", - "hex 0.4.2", - "operation_pool", - "proto_array", - "reqwest", - "rest_types", - "serde", - "serde_json", - "types", - "url 2.1.1", -] - [[package]] name = "remove_dir_all" version = "0.5.3" @@ -4273,73 +4620,6 @@ dependencies = [ "winreg", ] -[[package]] -name = "rest_api" -version = "0.2.0" -dependencies = [ - "assert_matches", - "beacon_chain", - "bls", - "bus", - "environment", - "eth2_config", - "eth2_libp2p", - "eth2_ssz", - "eth2_ssz_derive", - "futures 0.3.5", - "hex 0.4.2", - "http 0.2.1", - "hyper 0.13.8", - "itertools 0.9.0", - "lazy_static", - "lighthouse_metrics", - "lighthouse_version", - "network", - "node_test_rig", - "operation_pool", - "parking_lot 0.11.0", - "remote_beacon_node", - "rest_types", - "serde", - "serde_json", - "serde_yaml", - "slog", - "slog-async", - "slog-term", - "slot_clock", - "state_processing", - "store", - "tokio 0.2.22", - "tree_hash", - "types", - "uhttp_sse", - "url 2.1.1", -] - -[[package]] -name = "rest_types" -version = "0.2.0" -dependencies = [ - "beacon_chain", - "bls", - "environment", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "hyper 0.13.8", - "procinfo", - "psutil", - "rayon", - "serde", - "serde_json", - "serde_yaml", - "state_processing", - "store", - "tokio 0.2.22", - "tree_hash", - "types", -] - [[package]] name = "ring" version = "0.16.12" @@ -4363,18 +4643,18 @@ checksum = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac" [[package]] name = "rlp" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a7d3f9bed94764eac15b8f14af59fac420c236adaff743b7bcc88e265cb4345" +checksum = "1190dcc8c3a512f1eef5d09bb8c84c7f39e1054e174d1795482e18f5272f2e73" dependencies = [ "rustc-hex", ] [[package]] name = "rpassword" -version = "4.0.5" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" +checksum = "d755237fc0f99d98641540e66abac8bc46a0652f19148ac9e21de2da06b326c9" dependencies = [ "libc", "winapi 0.3.9", @@ -4382,18 +4662,17 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.23.1" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d0fd62e1df63d254714e6cb40d0a0e82e7a1623e7a27f679d851af092ae58b" +checksum = "7e3d4791ab5517217f51216a84a688b53c1ebf7988736469c538d02f46ddba68" dependencies = [ "bitflags 1.2.1", "fallible-iterator", "fallible-streaming-iterator", + "hashlink", "libsqlite3-sys", - "lru-cache", "memchr", "smallvec 1.4.2", - "time 0.1.44", ] [[package]] @@ -4410,9 +4689,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "b2610b7f643d18c87dff3b489950269617e6601a51f1f05aa5daefee36f64f0b" [[package]] name = "rustc-hash" @@ -4454,7 +4733,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.5", + "futures 0.3.6", "pin-project", "static_assertions", ] @@ -4532,6 +4811,17 @@ dependencies = [ "sha2 0.9.1", ] +[[package]] +name = "scrypt" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3437654bbbe34054a268b3859fe41f871215069b39f0aef78808d85c37100696" +dependencies = [ + "hmac 0.9.0", + "pbkdf2 0.5.0", + "sha2 0.9.1", +] + [[package]] name = "sct" version = "0.6.0" @@ -4628,19 +4918,11 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_hex" -version = "0.2.0" -dependencies = [ - "hex 0.4.2", - "serde", -] - [[package]] name = "serde_json" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164eacbdb13512ec2745fb09d51fd5b22b0d65ed294a1dcf7285a360c80a675c" +checksum = "a230ea9107ca2220eea9d46de97eddcb04cd00e92d13dda78e478dd33fa82bd4" dependencies = [ "itoa", "ryu", @@ -4674,6 +4956,7 @@ dependencies = [ name = "serde_utils" version = "0.1.0" dependencies = [ + "hex 0.4.2", "serde", "serde_derive", "serde_json", @@ -4777,9 +5060,9 @@ checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" [[package]] name = "simple_logger" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13a53ed2efd04911c8280f2da7bf9abd350c931b86bc7f9f2386fbafbf525ff9" +checksum = "b36ca4371e647131759047d7a0ac5e41e11fd540e0a49c9e158b1b94193081a1" dependencies = [ "atty", "chrono", @@ -4796,7 +5079,7 @@ dependencies = [ "env_logger", "eth1", "eth1_test_rig", - "futures 0.3.5", + "futures 0.3.6", "node_test_rig", "parking_lot 0.11.0", "rayon", @@ -4826,6 +5109,10 @@ dependencies = [ "r2d2_sqlite", "rayon", "rusqlite", + "serde", + "serde_derive", + "serde_json", + "serde_utils", "tempfile", "tree_hash", "types", @@ -4959,11 +5246,11 @@ checksum = "da73c8f77aebc0e40c300b93f0a5f1bece7a248a36eee287d4e095f35c7b7d6e" [[package]] name = "snow" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32bf8474159a95551661246cda4976e89356999e3cbfef36f493dacc3fae1e8e" +checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" dependencies = [ - "aes-gcm", + "aes-gcm 0.7.0", "blake2", "chacha20poly1305", "rand 0.7.3", @@ -4996,7 +5283,7 @@ dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.5", + "futures 0.3.6", "httparse", "log 0.4.11", "rand 0.7.3", @@ -5011,9 +5298,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "standback" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33a71ea1ea5f8747d1af1979bfb7e65c3a025a70609f04ceb78425bc5adad8e6" +checksum = "f4e0831040d2cf2bdfd51b844be71885783d489898a192f254ae25d57cce725c" dependencies = [ "version_check 0.9.2", ] @@ -5136,11 +5423,11 @@ dependencies = [ [[package]] name = "stream-cipher" -version = "0.4.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" +checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" dependencies = [ - "block-cipher", + "block-cipher 0.8.0", "generic-array 0.14.4", ] @@ -5209,12 +5496,30 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" +[[package]] +name = "tap" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36474e732d1affd3a6ed582781b3683df3d0563714c59c39591e8ff707cf078e" + [[package]] name = "target_info" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" +[[package]] +name = "task_executor" +version = "0.1.0" +dependencies = [ + "exit-future", + "futures 0.3.6", + "lazy_static", + "lighthouse_metrics", + "slog", + "tokio 0.2.22", +] + [[package]] name = "tempdir" version = "0.3.7" @@ -5245,7 +5550,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" dependencies = [ - "dirs", + "dirs 2.0.2", "winapi 0.3.9", ] @@ -5277,18 +5582,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" +checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" +checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" dependencies = [ "proc-macro2", "quote", @@ -5367,11 +5672,11 @@ name = "timer" version = "0.2.0" dependencies = [ "beacon_chain", - "environment", - "futures 0.3.5", + "futures 0.3.6", "parking_lot 0.11.0", "slog", "slot_clock", + "task_executor", "tokio 0.2.22", "types", ] @@ -5433,7 +5738,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "mio", "num_cpus", "tokio-codec", @@ -5482,7 +5787,7 @@ checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" dependencies = [ "bytes 0.4.12", "either", - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -5492,7 +5797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "tokio-io", ] @@ -5503,7 +5808,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "log 0.4.11", "mio", @@ -5521,7 +5826,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "tokio-executor", ] @@ -5532,7 +5837,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ "crossbeam-utils", - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -5541,7 +5846,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "tokio-io", "tokio-threadpool", ] @@ -5553,7 +5858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "log 0.4.11", ] @@ -5585,7 +5890,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ "crossbeam-utils", - "futures 0.1.29", + "futures 0.1.30", "lazy_static", "log 0.4.11", "mio", @@ -5604,7 +5909,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures 0.1.29", + "futures 0.1.30", ] [[package]] @@ -5614,7 +5919,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "mio", "tokio-io", @@ -5630,7 +5935,7 @@ dependencies = [ "crossbeam-deque", "crossbeam-queue", "crossbeam-utils", - "futures 0.1.29", + "futures 0.1.30", "lazy_static", "log 0.4.11", "num_cpus", @@ -5644,7 +5949,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6131e780037787ff1b3f8aad9da83bca02438b72277850dd6ad0d455e0e20efc" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "slab 0.3.0", ] @@ -5655,7 +5960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" dependencies = [ "crossbeam-utils", - "futures 0.1.29", + "futures 0.1.30", "slab 0.4.2", "tokio-executor", ] @@ -5666,7 +5971,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "native-tls", "tokio-io", ] @@ -5681,6 +5986,19 @@ dependencies = [ "tokio 0.2.22", ] +[[package]] +name = "tokio-tungstenite" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9e878ad426ca286e4dcae09cbd4e1973a7f8987d97570e2469703dd7f5720c" +dependencies = [ + "futures-util", + "log 0.4.11", + "pin-project", + "tokio 0.2.22", + "tungstenite", +] + [[package]] name = "tokio-udp" version = "0.1.6" @@ -5688,7 +6006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "log 0.4.11", "mio", "tokio-codec", @@ -5703,7 +6021,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65ae5d255ce739e8537221ed2942e0445f4b3b813daebac1c0050ddaaa3587f9" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "libc", "log 0.3.9", @@ -5720,7 +6038,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "iovec", "libc", "log 0.4.11", @@ -5782,6 +6100,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "tracing-futures" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +dependencies = [ + "pin-project", + "tracing", +] + [[package]] name = "trackable" version = "1.0.0" @@ -5835,6 +6163,34 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "tungstenite" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0308d80d86700c5878b9ef6321f020f29b1bb9d5ff3cab25e75e23f3a492a23" +dependencies = [ + "base64 0.12.3", + "byteorder", + "bytes 0.5.6", + "http 0.2.1", + "httparse", + "input_buffer", + "log 0.4.11", + "rand 0.7.3", + "sha-1 0.9.1", + "url 2.1.1", + "utf-8", +] + +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + [[package]] name = "typeable" version = "0.1.2" @@ -5858,7 +6214,6 @@ dependencies = [ "compare_fields_derive", "criterion", "derivative", - "dirs", "eth2_hashing", "eth2_interop_keypairs", "eth2_ssz", @@ -5870,13 +6225,15 @@ dependencies = [ "log 0.4.11", "merkle_proof", "rand 0.7.3", - "rand_xorshift", + "rand_xorshift 0.2.0", "rayon", + "regex", "rusqlite", "safe_arith", "serde", "serde_derive", "serde_json", + "serde_utils", "serde_yaml", "slog", "swap_or_not_shuffle", @@ -5886,12 +6243,6 @@ dependencies = [ "tree_hash_derive", ] -[[package]] -name = "uhttp_sse" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ff93345ba2206230b1bb1aa3ece1a63dd9443b7531024575d16a0680a59444" - [[package]] name = "uint" version = "0.8.5" @@ -5989,10 +6340,6 @@ name = "unsigned-varint" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" -dependencies = [ - "bytes 0.5.6", - "futures_codec", -] [[package]] name = "unsigned-varint" @@ -6032,6 +6379,18 @@ dependencies = [ "percent-encoding 2.1.0", ] +[[package]] +name = "urlencoding" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593" + +[[package]] +name = "utf-8" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" + [[package]] name = "uuid" version = "0.8.1" @@ -6044,32 +6403,40 @@ dependencies = [ [[package]] name = "validator_client" -version = "0.2.13" +version = "0.3.0" dependencies = [ "account_utils", + "bincode", "bls", "clap", "clap_utils", "deposit_contract", - "dirs", + "directory", + "dirs 3.0.1", "environment", + "eth2", "eth2_config", "eth2_interop_keypairs", "eth2_keystore", "eth2_ssz", "eth2_ssz_derive", "exit-future", - "futures 0.3.5", + "futures 0.3.6", "hex 0.4.2", + "hyper 0.13.8", "libc", + "libsecp256k1", + "lighthouse_version", "logging", "parking_lot 0.11.0", + "rand 0.7.3", "rayon", - "remote_beacon_node", - "rest_types", + "ring", + "scrypt 0.3.1", "serde", "serde_derive", "serde_json", + "serde_utils", "serde_yaml", "slashing_protection", "slog", @@ -6077,10 +6444,13 @@ dependencies = [ "slog-term", "slot_clock", "tempdir", + "tempfile", "tokio 0.2.22", "tree_hash", "types", "validator_dir", + "warp", + "warp_utils", ] [[package]] @@ -6146,7 +6516,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ - "futures 0.1.29", + "futures 0.1.30", "log 0.4.11", "try-lock", ] @@ -6161,6 +6531,48 @@ dependencies = [ "try-lock", ] +[[package]] +name = "warp" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f41be6df54c97904af01aa23e613d4521eed7ab23537cede692d4058f6449407" +dependencies = [ + "bytes 0.5.6", + "futures 0.3.6", + "headers", + "http 0.2.1", + "hyper 0.13.8", + "log 0.4.11", + "mime 0.3.16", + "mime_guess", + "multipart", + "pin-project", + "scoped-tls 1.0.0", + "serde", + "serde_json", + "serde_urlencoded", + "tokio 0.2.22", + "tokio-tungstenite", + "tower-service", + "tracing", + "tracing-futures", + "urlencoding", +] + +[[package]] +name = "warp_utils" +version = "0.1.0" +dependencies = [ + "beacon_chain", + "eth2", + "safe_arith", + "serde", + "state_processing", + "tokio 0.2.22", + "types", + "warp", +] + [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" @@ -6271,7 +6683,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.5", + "futures 0.3.6", "js-sys", "parking_lot 0.11.0", "pin-utils", @@ -6301,7 +6713,7 @@ dependencies = [ "derive_more", "ethabi", "ethereum-types", - "futures 0.1.29", + "futures 0.1.30", "hyper 0.12.35", "hyper-tls 0.3.2", "jsonrpc-core", @@ -6335,18 +6747,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.18.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" -dependencies = [ - "webpki", -] - -[[package]] -name = "webpki-roots" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" +checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" dependencies = [ "webpki", ] @@ -6361,7 +6764,7 @@ dependencies = [ "bitflags 0.9.1", "byteorder", "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.30", "hyper 0.10.16", "native-tls", "rand 0.5.6", @@ -6377,12 +6780,11 @@ dependencies = [ name = "websocket_server" version = "0.2.0" dependencies = [ - "environment", - "futures 0.3.5", + "futures 0.3.6", "serde", "serde_derive", - "serde_json", "slog", + "task_executor", "tokio 0.2.22", "types", "ws", @@ -6478,12 +6880,18 @@ dependencies = [ ] [[package]] -name = "x25519-dalek" -version = "0.6.0" +name = "wyz" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + +[[package]] +name = "x25519-dalek" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" dependencies = [ - "curve25519-dalek 2.1.0", + "curve25519-dalek", "rand_core 0.5.1", "zeroize", ] @@ -6535,13 +6943,14 @@ dependencies = [ [[package]] name = "zip" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58287c28d78507f5f91f2a4cf1e8310e2c76fd4c6932f93ac60fd1ceb402db7d" +checksum = "543adf038106b64cfca4711c82c917d785e3540e04f7996554488f988ec43124" dependencies = [ + "byteorder", "bzip2", "crc32fast", "flate2", - "podio", + "thiserror", "time 0.1.44", ] diff --git a/Cargo.toml b/Cargo.toml index 92fb5bccf..d15b23be6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,8 +7,9 @@ members = [ "beacon_node/client", "beacon_node/eth1", "beacon_node/eth2_libp2p", + "beacon_node/http_api", + "beacon_node/http_metrics", "beacon_node/network", - "beacon_node/rest_api", "beacon_node/store", "beacon_node/timer", "beacon_node/websocket_server", @@ -20,6 +21,8 @@ members = [ "common/compare_fields", "common/compare_fields_derive", "common/deposit_contract", + "common/directory", + "common/eth2", "common/eth2_config", "common/eth2_interop_keypairs", "common/eth2_testnet_config", @@ -29,10 +32,10 @@ members = [ "common/lighthouse_version", "common/logging", "common/lru_cache", - "common/remote_beacon_node", - "common/rest_types", "common/slot_clock", "common/test_random_derive", + "common/warp_utils", + "common/task_executor", "common/validator_dir", "consensus/cached_tree_hash", @@ -43,7 +46,6 @@ members = [ "consensus/ssz", "consensus/ssz_derive", "consensus/ssz_types", - "consensus/serde_hex", "consensus/serde_utils", "consensus/state_processing", "consensus/swap_or_not_shuffle", diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index a225af081..0eefabe46 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,33 +1,35 @@ [package] name = "account_manager" -version = "0.2.13" +version = "0.3.0" authors = ["Paul Hauner ", "Luke Anderson "] edition = "2018" [dependencies] bls = { path = "../crypto/bls" } -clap = "2.33.0" +clap = "2.33.3" slog = "2.5.2" -slog-term = "2.5.0" +slog-term = "2.6.0" slog-async = "2.5.0" types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } -dirs = "2.0.2" +dirs = "3.0.1" environment = { path = "../lighthouse/environment" } deposit_contract = { path = "../common/deposit_contract" } -libc = "0.2.65" +libc = "0.2.79" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" hex = "0.4.2" -rayon = "1.3.0" +rayon = "1.4.1" eth2_testnet_config = { path = "../common/eth2_testnet_config" } web3 = "0.11.0" futures = { version = "0.3.5", features = ["compat"] } clap_utils = { path = "../common/clap_utils" } +directory = { path = "../common/directory" } eth2_wallet = { path = "../crypto/eth2_wallet" } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } -rand = "0.7.2" +rand = "0.7.3" validator_dir = { path = "../common/validator_dir" } tokio = { version = "0.2.22", features = ["full"] } eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } +slashing_protection = { path = "../validator_client/slashing_protection" } diff --git a/account_manager/src/common.rs b/account_manager/src/common.rs index 030092036..2b9c93fb1 100644 --- a/account_manager/src/common.rs +++ b/account_manager/src/common.rs @@ -1,10 +1,8 @@ use account_utils::PlainText; use account_utils::{read_input_from_user, strip_off_newlines}; -use clap::ArgMatches; use eth2_wallet::bip39::{Language, Mnemonic}; use std::fs; -use std::fs::create_dir_all; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::str::from_utf8; use std::thread::sleep; use std::time::Duration; @@ -12,26 +10,6 @@ use std::time::Duration; pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:"; pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; -pub fn ensure_dir_exists>(path: P) -> Result<(), String> { - let path = path.as_ref(); - - if !path.exists() { - create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?; - } - - Ok(()) -} - -pub fn base_wallet_dir(matches: &ArgMatches, arg: &'static str) -> Result { - clap_utils::parse_path_with_default_in_home_dir( - matches, - arg, - PathBuf::new().join(".lighthouse").join("wallets"), - ) -} - -/// Reads in a mnemonic from the user. If the file path is provided, read from it. Otherwise, read -/// from an interactive prompt using tty, unless the `--stdin-inputs` flag is provided. pub fn read_mnemonic_from_cli( mnemonic_path: Option, stdin_inputs: bool, diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 5300693dc..829756778 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -10,7 +10,7 @@ use types::EthSpec; pub const CMD: &str = "account_manager"; pub const SECRETS_DIR_FLAG: &str = "secrets-dir"; pub const VALIDATOR_DIR_FLAG: &str = "validator-dir"; -pub const BASE_DIR_FLAG: &str = "base-dir"; +pub const WALLETS_DIR_FLAG: &str = "wallets-dir"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 948942978..863cbf1c0 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -1,12 +1,16 @@ use crate::common::read_wallet_name_from_cli; use crate::wallet::create::STDIN_INPUTS_FLAG; -use crate::{common::ensure_dir_exists, SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG}; +use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG}; use account_utils::{ random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText, }; use clap::{App, Arg, ArgMatches}; +use directory::{ + ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, +}; use environment::Environment; use eth2_wallet_manager::WalletManager; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::ffi::OsStr; use std::fs; use std::path::{Path, PathBuf}; @@ -14,7 +18,6 @@ use types::EthSpec; use validator_dir::Builder as ValidatorDirBuilder; pub const CMD: &str = "create"; -pub const BASE_DIR_FLAG: &str = "base-dir"; pub const WALLET_NAME_FLAG: &str = "wallet-name"; pub const WALLET_PASSWORD_FLAG: &str = "wallet-password"; pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei"; @@ -44,14 +47,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true), ) .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path where the validator directories will be created. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), + Arg::with_name(WALLETS_DIR_FLAG) + .long(WALLETS_DIR_FLAG) + .value_name(WALLETS_DIR_FLAG) + .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{testnet}/wallets") + .takes_value(true) + .conflicts_with("datadir"), ) .arg( Arg::with_name(SECRETS_DIR_FLAG) @@ -59,8 +60,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ - Defaults to ~/.lighthouse/secrets", + Defaults to ~/.lighthouse/{testnet}/secrets", ) + .conflicts_with("datadir") .takes_value(true), ) .arg( @@ -111,23 +113,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_run( matches: &ArgMatches, mut env: Environment, - wallet_base_dir: PathBuf, + validator_dir: PathBuf, ) -> Result<(), String> { let spec = env.core_context().eth2_config.spec; let name: Option = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?; let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); + let wallet_base_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_WALLET_DIR) + } else { + parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? + }; + let secrets_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_SECRET_DIR) + } else { + parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)? + }; - let validator_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; - let secrets_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - SECRETS_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("secrets"), - )?; let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)? .unwrap_or_else(|| spec.max_effective_balance); let count: Option = clap_utils::parse_optional(matches, COUNT_FLAG)?; @@ -136,6 +140,9 @@ pub fn cli_run( ensure_dir_exists(&validator_dir)?; ensure_dir_exists(&secrets_dir)?; + eprintln!("secrets-dir path {:?}", secrets_dir); + eprintln!("wallets-dir path {:?}", wallet_base_dir); + let starting_validator_count = existing_validator_count(&validator_dir)?; let n = match (count, at_most) { @@ -166,12 +173,22 @@ pub fn cli_run( let wallet_password = read_wallet_password_from_cli(wallet_password_path, stdin_inputs)?; let mgr = WalletManager::open(&wallet_base_dir) - .map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?; + .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; let mut wallet = mgr .wallet_by_name(&wallet_name) .map_err(|e| format!("Unable to open wallet: {:?}", e))?; + let slashing_protection_path = validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_protection = + SlashingDatabase::open_or_create(&slashing_protection_path).map_err(|e| { + format!( + "Unable to open or create slashing protection database at {}: {:?}", + slashing_protection_path.display(), + e + ) + })?; + for i in 0..n { let voting_password = random_password(); let withdrawal_password = random_password(); @@ -184,9 +201,25 @@ pub fn cli_run( ) .map_err(|e| format!("Unable to create validator keys: {:?}", e))?; - let voting_pubkey = keystores.voting.pubkey().to_string(); + let voting_pubkey = keystores.voting.public_key().ok_or_else(|| { + format!( + "Keystore public key is invalid: {}", + keystores.voting.pubkey() + ) + })?; - ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone()) + slashing_protection + .register_validator(&voting_pubkey) + .map_err(|e| { + format!( + "Error registering validator {}: {:?}", + voting_pubkey.to_hex_string(), + e + ) + })?; + + ValidatorDirBuilder::new(validator_dir.clone()) + .password_dir(secrets_dir.clone()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) .create_eth1_tx_data(deposit_gwei, &spec) @@ -194,7 +227,7 @@ pub fn cli_run( .build() .map_err(|e| format!("Unable to build validator directory: {:?}", e))?; - println!("{}/{}\t0x{}", i + 1, n, voting_pubkey); + println!("{}/{}\t{}", i + 1, n, voting_pubkey.to_hex_string()); } Ok(()) @@ -202,14 +235,18 @@ pub fn cli_run( /// Returns the number of validators that exist in the given `validator_dir`. /// -/// This function just assumes all files and directories, excluding the validator definitions YAML, -/// are validator directories, making it likely to return a higher number than accurate -/// but never a lower one. +/// This function just assumes all files and directories, excluding the validator definitions YAML +/// and slashing protection database are validator directories, making it likely to return a higher +/// number than accurate but never a lower one. fn existing_validator_count>(validator_dir: P) -> Result { fs::read_dir(validator_dir.as_ref()) .map(|iter| { iter.filter_map(|e| e.ok()) - .filter(|e| e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME)) + .filter(|e| { + e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME) + && e.file_name() + != OsStr::new(slashing_protection::SLASHING_PROTECTION_FILENAME) + }) .count() }) .map_err(|e| format!("Unable to read {:?}: {}", validator_dir.as_ref(), e)) diff --git a/account_manager/src/validator/deposit.rs b/account_manager/src/validator/deposit.rs index 0e508cfd2..233e7634e 100644 --- a/account_manager/src/validator/deposit.rs +++ b/account_manager/src/validator/deposit.rs @@ -46,16 +46,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { The deposit contract address will be determined by the --testnet-dir flag on the \ primary Lighthouse binary.", ) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path to the validator client data directory. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) .arg( Arg::with_name(VALIDATOR_FLAG) .long(VALIDATOR_FLAG) @@ -209,14 +199,10 @@ where pub fn cli_run( matches: &ArgMatches<'_>, mut env: Environment, + validator_dir: PathBuf, ) -> Result<(), String> { let log = env.core_context().log().clone(); - let data_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?; let eth1_ipc_path: Option = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?; let eth1_http_url: Option = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?; @@ -225,7 +211,7 @@ pub fn cli_run( let confirmation_batch_size: usize = clap_utils::parse_required(matches, CONFIRMATION_BATCH_SIZE_FLAG)?; - let manager = ValidatorManager::open(&data_dir) + let manager = ValidatorManager::open(&validator_dir) .map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?; let validators = match validator.as_ref() { diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 5216b3d9c..8b4a216e8 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -1,5 +1,4 @@ use crate::wallet::create::STDIN_INPUTS_FLAG; -use crate::{common::ensure_dir_exists, VALIDATOR_DIR_FLAG}; use account_utils::{ eth2_keystore::Keystore, read_password_from_user, @@ -10,6 +9,7 @@ use account_utils::{ ZeroizeString, }; use clap::{App, Arg, ArgMatches}; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::fs; use std::path::PathBuf; use std::thread::sleep; @@ -55,16 +55,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .required_unless(KEYSTORE_FLAG) .takes_value(true), ) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path where the validator directories will be created. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) .arg( Arg::with_name(STDIN_INPUTS_FLAG) .long(STDIN_INPUTS_FLAG) @@ -77,22 +67,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { +pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { let keystore: Option = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?; let keystores_dir: Option = clap_utils::parse_optional(matches, DIR_FLAG)?; - let validator_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG); let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG); - ensure_dir_exists(&validator_dir)?; - let mut defs = ValidatorDefinitions::open_or_create(&validator_dir) .map_err(|e| format!("Unable to open {}: {:?}", CONFIG_FILENAME, e))?; + let slashing_protection_path = validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_protection = + SlashingDatabase::open_or_create(&slashing_protection_path).map_err(|e| { + format!( + "Unable to open or create slashing protection database at {}: {:?}", + slashing_protection_path.display(), + e + ) + })?; + // Collect the paths for the keystores that should be imported. let keystore_paths = match (keystore, keystores_dir) { (Some(keystore), None) => vec![keystore], @@ -123,6 +116,7 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { // // - Obtain the keystore password, if the user desires. // - Copy the keystore into the `validator_dir`. + // - Register the voting key with the slashing protection database. // - Add the keystore to the validator definitions file. // // Skip keystores that already exist, but exit early if any operation fails. @@ -203,6 +197,20 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { fs::copy(&src_keystore, &dest_keystore) .map_err(|e| format!("Unable to copy keystore: {:?}", e))?; + // Register with slashing protection. + let voting_pubkey = keystore + .public_key() + .ok_or_else(|| format!("Keystore public key is invalid: {}", keystore.pubkey()))?; + slashing_protection + .register_validator(&voting_pubkey) + .map_err(|e| { + format!( + "Error registering validator {}: {:?}", + voting_pubkey.to_hex_string(), + e + ) + })?; + eprintln!("Successfully imported keystore."); num_imported_keystores += 1; diff --git a/account_manager/src/validator/list.rs b/account_manager/src/validator/list.rs index 148564303..dd97de156 100644 --- a/account_manager/src/validator/list.rs +++ b/account_manager/src/validator/list.rs @@ -1,38 +1,21 @@ use crate::VALIDATOR_DIR_FLAG; -use clap::{App, Arg, ArgMatches}; +use clap::App; use std::path::PathBuf; use validator_dir::Manager as ValidatorManager; pub const CMD: &str = "list"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { - App::new(CMD) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path to search for validator directories. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) - .about("Lists the names of all validators.") + App::new(CMD).about("Lists the names of all validators.") } -pub fn cli_run(matches: &ArgMatches<'_>) -> Result<(), String> { - let data_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; - - let mgr = ValidatorManager::open(&data_dir) +pub fn cli_run(validator_dir: PathBuf) -> Result<(), String> { + let mgr = ValidatorManager::open(&validator_dir) .map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?; for (name, _path) in mgr .directory_names() - .map_err(|e| format!("Unable to list wallets: {:?}", e))? + .map_err(|e| format!("Unable to list validators: {:?}", e))? { println!("{}", name) } diff --git a/account_manager/src/validator/mod.rs b/account_manager/src/validator/mod.rs index 84ad6df39..042a35ccd 100644 --- a/account_manager/src/validator/mod.rs +++ b/account_manager/src/validator/mod.rs @@ -3,10 +3,13 @@ pub mod deposit; pub mod import; pub mod list; pub mod recover; +pub mod slashing_protection; -use crate::common::base_wallet_dir; +use crate::VALIDATOR_DIR_FLAG; use clap::{App, Arg, ArgMatches}; +use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR}; use environment::Environment; +use std::path::PathBuf; use types::EthSpec; pub const CMD: &str = "validator"; @@ -15,28 +18,42 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .about("Provides commands for managing Eth2 validators.") .arg( - Arg::with_name("base-dir") - .long("base-dir") - .value_name("BASE_DIRECTORY") - .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets") - .takes_value(true), + Arg::with_name(VALIDATOR_DIR_FLAG) + .long(VALIDATOR_DIR_FLAG) + .value_name("VALIDATOR_DIRECTORY") + .help( + "The path to search for validator directories. \ + Defaults to ~/.lighthouse/{testnet}/validators", + ) + .takes_value(true) + .conflicts_with("datadir"), ) .subcommand(create::cli_app()) .subcommand(deposit::cli_app()) .subcommand(import::cli_app()) .subcommand(list::cli_app()) .subcommand(recover::cli_app()) + .subcommand(slashing_protection::cli_app()) } pub fn cli_run(matches: &ArgMatches, env: Environment) -> Result<(), String> { - let base_wallet_dir = base_wallet_dir(matches, "base-dir")?; + let validator_base_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_VALIDATOR_DIR) + } else { + parse_path_or_default_with_flag(matches, VALIDATOR_DIR_FLAG, DEFAULT_VALIDATOR_DIR)? + }; + eprintln!("validator-dir path: {:?}", validator_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run::(matches, env, base_wallet_dir), - (deposit::CMD, Some(matches)) => deposit::cli_run::(matches, env), - (import::CMD, Some(matches)) => import::cli_run(matches), - (list::CMD, Some(matches)) => list::cli_run(matches), - (recover::CMD, Some(matches)) => recover::cli_run(matches), + (create::CMD, Some(matches)) => create::cli_run::(matches, env, validator_base_dir), + (deposit::CMD, Some(matches)) => deposit::cli_run::(matches, env, validator_base_dir), + (import::CMD, Some(matches)) => import::cli_run(matches, validator_base_dir), + (list::CMD, Some(_)) => list::cli_run(validator_base_dir), + (recover::CMD, Some(matches)) => recover::cli_run(matches, validator_base_dir), + (slashing_protection::CMD, Some(matches)) => { + slashing_protection::cli_run(matches, env, validator_base_dir) + } (unknown, _) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index 376c21645..ecea0efbd 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -1,11 +1,13 @@ use super::create::STORE_WITHDRAW_FLAG; -use crate::common::{ensure_dir_exists, read_mnemonic_from_cli}; +use crate::common::read_mnemonic_from_cli; use crate::validator::create::COUNT_FLAG; use crate::wallet::create::STDIN_INPUTS_FLAG; -use crate::{SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG}; +use crate::SECRETS_DIR_FLAG; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; use account_utils::random_password; use clap::{App, Arg, ArgMatches}; +use directory::ensure_dir_exists; +use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use eth2_wallet::bip39::Seed; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores}; use std::path::PathBuf; @@ -48,23 +50,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true) ) - .arg( - Arg::with_name(VALIDATOR_DIR_FLAG) - .long(VALIDATOR_DIR_FLAG) - .value_name("VALIDATOR_DIRECTORY") - .help( - "The path where the validator directories will be created. \ - Defaults to ~/.lighthouse/validators", - ) - .takes_value(true), - ) .arg( Arg::with_name(SECRETS_DIR_FLAG) .long(SECRETS_DIR_FLAG) .value_name("SECRETS_DIR") .help( "The path where the validator keystore passwords will be stored. \ - Defaults to ~/.lighthouse/secrets", + Defaults to ~/.lighthouse/{testnet}/secrets", ) .takes_value(true), ) @@ -84,17 +76,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { - let validator_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - VALIDATOR_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("validators"), - )?; - let secrets_dir = clap_utils::parse_path_with_default_in_home_dir( - matches, - SECRETS_DIR_FLAG, - PathBuf::new().join(".lighthouse").join("secrets"), - )?; +pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> { + let secrets_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_SECRET_DIR) + } else { + parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)? + }; let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?; let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?; let mnemonic_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; @@ -136,7 +124,8 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { let voting_pubkey = keystores.voting.pubkey().to_string(); - ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone()) + ValidatorDirBuilder::new(validator_dir.clone()) + .password_dir(secrets_dir.clone()) .voting_keystore(keystores.voting, voting_password.as_bytes()) .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) .store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG)) diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs new file mode 100644 index 000000000..53a7edd51 --- /dev/null +++ b/account_manager/src/validator/slashing_protection.rs @@ -0,0 +1,137 @@ +use clap::{App, Arg, ArgMatches}; +use environment::Environment; +use slashing_protection::{ + interchange::Interchange, SlashingDatabase, SLASHING_PROTECTION_FILENAME, +}; +use std::fs::File; +use std::path::PathBuf; +use types::EthSpec; + +pub const CMD: &str = "slashing-protection"; +pub const IMPORT_CMD: &str = "import"; +pub const EXPORT_CMD: &str = "export"; + +pub const IMPORT_FILE_ARG: &str = "IMPORT-FILE"; +pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE"; + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new(CMD) + .about("Import or export slashing protection data to or from another client") + .subcommand( + App::new(IMPORT_CMD) + .about("Import an interchange file") + .arg( + Arg::with_name(IMPORT_FILE_ARG) + .takes_value(true) + .value_name("FILE") + .help("The slashing protection interchange file to import (.json)"), + ), + ) + .subcommand( + App::new(EXPORT_CMD) + .about("Export an interchange file") + .arg( + Arg::with_name(EXPORT_FILE_ARG) + .takes_value(true) + .value_name("FILE") + .help("The filename to export the interchange file to"), + ), + ) +} + +pub fn cli_run( + matches: &ArgMatches<'_>, + env: Environment, + validator_base_dir: PathBuf, +) -> Result<(), String> { + let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME); + + let genesis_validators_root = env + .testnet + .and_then(|testnet_config| { + Some( + testnet_config + .genesis_state + .as_ref()? + .genesis_validators_root, + ) + }) + .ok_or_else(|| { + "Unable to get genesis validators root from testnet config, has genesis occurred?" + })?; + + match matches.subcommand() { + (IMPORT_CMD, Some(matches)) => { + let import_filename: PathBuf = clap_utils::parse_required(&matches, IMPORT_FILE_ARG)?; + let import_file = File::open(&import_filename).map_err(|e| { + format!( + "Unable to open import file at {}: {:?}", + import_filename.display(), + e + ) + })?; + + let interchange = Interchange::from_json_reader(&import_file) + .map_err(|e| format!("Error parsing file for import: {:?}", e))?; + + let slashing_protection_database = + SlashingDatabase::open_or_create(&slashing_protection_db_path).map_err(|e| { + format!( + "Unable to open database at {}: {:?}", + slashing_protection_db_path.display(), + e + ) + })?; + + slashing_protection_database + .import_interchange_info(&interchange, genesis_validators_root) + .map_err(|e| { + format!( + "Error during import, no data imported: {:?}\n\ + IT IS NOT SAFE TO START VALIDATING", + e + ) + })?; + + eprintln!("Import completed successfully"); + + Ok(()) + } + (EXPORT_CMD, Some(matches)) => { + let export_filename: PathBuf = clap_utils::parse_required(&matches, EXPORT_FILE_ARG)?; + + if !slashing_protection_db_path.exists() { + return Err(format!( + "No slashing protection database exists at: {}", + slashing_protection_db_path.display() + )); + } + + let slashing_protection_database = SlashingDatabase::open(&slashing_protection_db_path) + .map_err(|e| { + format!( + "Unable to open database at {}: {:?}", + slashing_protection_db_path.display(), + e + ) + })?; + + let interchange = slashing_protection_database + .export_interchange_info(genesis_validators_root) + .map_err(|e| format!("Error during export: {:?}", e))?; + + let output_file = File::create(export_filename) + .map_err(|e| format!("Error creating output file: {:?}", e))?; + + interchange + .write_to(&output_file) + .map_err(|e| format!("Error writing output file: {:?}", e))?; + + eprintln!("Export completed successfully"); + + Ok(()) + } + ("", _) => Err("No subcommand provided, see --help for options".to_string()), + (command, _) => Err(format!("No such subcommand `{}`", command)), + } +} diff --git a/account_manager/src/wallet/create.rs b/account_manager/src/wallet/create.rs index 744910247..1332dad44 100644 --- a/account_manager/src/wallet/create.rs +++ b/account_manager/src/wallet/create.rs @@ -1,5 +1,5 @@ use crate::common::read_wallet_name_from_cli; -use crate::BASE_DIR_FLAG; +use crate::WALLETS_DIR_FLAG; use account_utils::{ is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines, }; @@ -102,7 +102,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) } -pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { +pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> { let mnemonic_output_path: Option = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?; // Create a new random mnemonic. @@ -114,7 +114,7 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { Language::English, ); - let wallet = create_wallet_from_mnemonic(matches, &base_dir.as_path(), &mnemonic)?; + let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic)?; if let Some(path) = mnemonic_output_path { create_with_600_perms(&path, mnemonic.phrase().as_bytes()) @@ -147,7 +147,7 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> { pub fn create_wallet_from_mnemonic( matches: &ArgMatches, - base_dir: &Path, + wallet_base_dir: &Path, mnemonic: &Mnemonic, ) -> Result { let name: Option = clap_utils::parse_optional(matches, NAME_FLAG)?; @@ -160,8 +160,8 @@ pub fn create_wallet_from_mnemonic( unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)), }; - let mgr = WalletManager::open(&base_dir) - .map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?; + let mgr = WalletManager::open(&wallet_base_dir) + .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; let wallet_password: PlainText = match wallet_password_path { Some(path) => { diff --git a/account_manager/src/wallet/list.rs b/account_manager/src/wallet/list.rs index 85096dc5f..5b671b1dc 100644 --- a/account_manager/src/wallet/list.rs +++ b/account_manager/src/wallet/list.rs @@ -1,4 +1,4 @@ -use crate::BASE_DIR_FLAG; +use crate::WALLETS_DIR_FLAG; use clap::App; use eth2_wallet_manager::WalletManager; use std::path::PathBuf; @@ -9,9 +9,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD).about("Lists the names of all wallets.") } -pub fn cli_run(base_dir: PathBuf) -> Result<(), String> { - let mgr = WalletManager::open(&base_dir) - .map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?; +pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> { + let mgr = WalletManager::open(&wallet_base_dir) + .map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?; for (name, _uuid) in mgr .wallets() diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index e8315b77a..4ab957ecb 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -2,11 +2,10 @@ pub mod create; pub mod list; pub mod recover; -use crate::{ - common::{base_wallet_dir, ensure_dir_exists}, - BASE_DIR_FLAG, -}; +use crate::WALLETS_DIR_FLAG; use clap::{App, Arg, ArgMatches}; +use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; +use std::path::PathBuf; pub const CMD: &str = "wallet"; @@ -14,11 +13,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .about("Manage wallets, from which validator keys can be derived.") .arg( - Arg::with_name(BASE_DIR_FLAG) - .long(BASE_DIR_FLAG) - .value_name("BASE_DIRECTORY") - .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets") - .takes_value(true), + Arg::with_name(WALLETS_DIR_FLAG) + .long(WALLETS_DIR_FLAG) + .value_name("WALLETS_DIRECTORY") + .help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{testnet}/wallets") + .takes_value(true) + .conflicts_with("datadir"), ) .subcommand(create::cli_app()) .subcommand(list::cli_app()) @@ -26,13 +26,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { } pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { - let base_dir = base_wallet_dir(matches, BASE_DIR_FLAG)?; - ensure_dir_exists(&base_dir)?; + let wallet_base_dir = if matches.value_of("datadir").is_some() { + let path: PathBuf = clap_utils::parse_required(matches, "datadir")?; + path.join(DEFAULT_WALLET_DIR) + } else { + parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? + }; + ensure_dir_exists(&wallet_base_dir)?; + + eprintln!("wallet-dir path: {:?}", wallet_base_dir); match matches.subcommand() { - (create::CMD, Some(matches)) => create::cli_run(matches, base_dir), - (list::CMD, Some(_)) => list::cli_run(base_dir), - (recover::CMD, Some(matches)) => recover::cli_run(matches, base_dir), + (create::CMD, Some(matches)) => create::cli_run(matches, wallet_base_dir), + (list::CMD, Some(_)) => list::cli_run(wallet_base_dir), + (recover::CMD, Some(matches)) => recover::cli_run(matches, wallet_base_dir), (unknown, _) => Err(format!( "{} does not have a {} command. See --help", CMD, unknown diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index bce3c402e..83b25831b 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "0.2.13" +version = "0.3.0" authors = ["Paul Hauner ", "Age Manning { pub struct VerifiedUnaggregatedAttestation { attestation: Attestation, indexed_attestation: IndexedAttestation, + subnet_id: SubnetId, } /// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive @@ -276,6 +274,7 @@ impl Clone for VerifiedUnaggregatedAttestation { Self { attestation: self.attestation.clone(), indexed_attestation: self.indexed_attestation.clone(), + subnet_id: self.subnet_id, } } } @@ -437,6 +436,11 @@ impl VerifiedAggregatedAttestation { pub fn attestation(&self) -> &Attestation { &self.signed_aggregate.message.aggregate } + + /// Returns the underlying `signed_aggregate`. + pub fn aggregate(&self) -> &SignedAggregateAndProof { + &self.signed_aggregate + } } impl VerifiedUnaggregatedAttestation { @@ -447,7 +451,7 @@ impl VerifiedUnaggregatedAttestation { /// verify that it was received on the correct subnet. pub fn verify( attestation: Attestation, - subnet_id: SubnetId, + subnet_id: Option, chain: &BeaconChain, ) -> Result { let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()); @@ -493,13 +497,15 @@ impl VerifiedUnaggregatedAttestation { ) .map_err(BeaconChainError::from)?; - // Ensure the attestation is from the correct subnet. - if subnet_id != expected_subnet_id { - return Err(Error::InvalidSubnetId { - received: subnet_id, - expected: expected_subnet_id, - }); - } + // If a subnet was specified, ensure that subnet is correct. + if let Some(subnet_id) = subnet_id { + if subnet_id != expected_subnet_id { + return Err(Error::InvalidSubnetId { + received: subnet_id, + expected: expected_subnet_id, + }); + } + }; let validator_index = *indexed_attestation .attesting_indices @@ -544,6 +550,7 @@ impl VerifiedUnaggregatedAttestation { Ok(Self { attestation, indexed_attestation, + subnet_id: expected_subnet_id, }) } @@ -552,6 +559,11 @@ impl VerifiedUnaggregatedAttestation { chain.add_to_naive_aggregation_pool(self) } + /// Returns the correct subnet for the attestation. + pub fn subnet_id(&self) -> SubnetId { + self.subnet_id + } + /// Returns the wrapped `attestation`. pub fn attestation(&self) -> &Attestation { &self.attestation @@ -567,6 +579,7 @@ impl VerifiedUnaggregatedAttestation { } /// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain. +/// You can use this `shuffling_id` to read from the shuffling cache. /// /// The block root may not be known for two reasons: /// @@ -595,6 +608,7 @@ fn verify_head_block_is_known( }); } } + Ok(block) } else { Err(Error::UnknownHeadBlock { @@ -801,7 +815,7 @@ type CommitteesPerSlot = u64; /// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the /// public keys cached in the `chain`. -pub fn obtain_indexed_attestation_and_committees_per_slot( +fn obtain_indexed_attestation_and_committees_per_slot( chain: &BeaconChain, attestation: &Attestation, ) -> Result<(IndexedAttestation, CommitteesPerSlot), Error> { @@ -821,8 +835,8 @@ pub fn obtain_indexed_attestation_and_committees_per_slot( /// /// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state /// from disk and then update the `shuffling_cache`. -pub fn map_attestation_committee<'a, T, F, R>( - chain: &'a BeaconChain, +fn map_attestation_committee( + chain: &BeaconChain, attestation: &Attestation, map_fn: F, ) -> Result @@ -840,104 +854,23 @@ where // processing an attestation that does not include our latest finalized block in its chain. // // We do not delay consideration for later, we simply drop the attestation. - let target_block = chain - .fork_choice - .read() - .get_block(&target.root) - .ok_or_else(|| Error::UnknownTargetRoot(target.root))?; - - // Obtain the shuffling cache, timing how long we wait. - let cache_wait_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); - - let mut shuffling_cache = chain - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?; - - metrics::stop_timer(cache_wait_timer); - - if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) { - let committees_per_slot = committee_cache.committees_per_slot(); - committee_cache - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map(|committee| map_fn((committee, committees_per_slot))) - .unwrap_or_else(|| { - Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data.slot, - index: attestation.data.index, - }) - }) - } else { - // Drop the shuffling cache to avoid holding the lock for any longer than - // required. - drop(shuffling_cache); - - debug!( - chain.log, - "Attestation processing cache miss"; - "attn_epoch" => attestation_epoch.as_u64(), - "target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(), - ); - - let state_read_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); - - let mut state = chain - .store - .get_inconsistent_state_for_attestation_verification_only( - &target_block.state_root, - Some(target_block.slot), - ) - .map_err(BeaconChainError::from)? - .ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?; - - metrics::stop_timer(state_read_timer); - let state_skip_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); - - while state.current_epoch() + 1 < attestation_epoch { - // Here we tell `per_slot_processing` to skip hashing the state and just - // use the zero hash instead. - // - // The state roots are not useful for the shuffling, so there's no need to - // compute them. - per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec) - .map_err(BeaconChainError::from)?; - } - - metrics::stop_timer(state_skip_timer); - let committee_building_timer = - metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), attestation_epoch) - .map_err(BeaconChainError::IncorrectStateForAttestation)?; - - state - .build_committee_cache(relative_epoch, &chain.spec) - .map_err(BeaconChainError::from)?; - - let committee_cache = state - .committee_cache(relative_epoch) - .map_err(BeaconChainError::from)?; - - chain - .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)? - .insert(attestation_epoch, target.root, committee_cache); - - metrics::stop_timer(committee_building_timer); - - let committees_per_slot = committee_cache.committees_per_slot(); - committee_cache - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map(|committee| map_fn((committee, committees_per_slot))) - .unwrap_or_else(|| { - Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data.slot, - index: attestation.data.index, - }) - }) + if !chain.fork_choice.read().contains_block(&target.root) { + return Err(Error::UnknownTargetRoot(target.root)); } + + chain + .with_committee_cache(target.root, attestation_epoch, |committee_cache| { + let committees_per_slot = committee_cache.committees_per_slot(); + + Ok(committee_cache + .get_beacon_committee(attestation.data.slot, attestation.data.index) + .map(|committee| map_fn((committee, committees_per_slot))) + .unwrap_or_else(|| { + Err(Error::NoCommitteeForSlotAndIndex { + slot: attestation.data.slot, + index: attestation.data.index, + }) + })) + }) + .map_err(BeaconChainError::from)? } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 63aaada9c..1af92df7f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -20,7 +20,7 @@ use crate::observed_block_producers::ObservedBlockProducers; use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_fork_choice::PersistedForkChoice; -use crate::shuffling_cache::ShufflingCache; +use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::SnapshotCache; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_pubkey_cache::ValidatorPubkeyCache; @@ -32,7 +32,6 @@ use futures::channel::mpsc::Sender; use itertools::process_results; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::RwLock; -use regex::bytes::Regex; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ @@ -68,10 +67,11 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// validator pubkey cache. pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); -pub const BEACON_CHAIN_DB_KEY: [u8; 32] = [0; 32]; -pub const OP_POOL_DB_KEY: [u8; 32] = [0; 32]; -pub const ETH1_CACHE_DB_KEY: [u8; 32] = [0; 32]; -pub const FORK_CHOICE_DB_KEY: [u8; 32] = [0; 32]; +// These keys are all zero because they get stored in different columns, see `DBColumn` type. +pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); +pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); +pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::zero(); +pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero(); /// The result of a chain segment processing. pub enum ChainSegmentResult { @@ -202,6 +202,8 @@ pub struct BeaconChain { pub(crate) canonical_head: TimeoutRwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, + /// The root of the genesis state. + pub genesis_state_root: Hash256, /// The root of the list of genesis validators, used during syncing. pub genesis_validators_root: Hash256, @@ -263,7 +265,7 @@ impl BeaconChain { let fork_choice = self.fork_choice.read(); self.store.put_item( - &Hash256::from_slice(&FORK_CHOICE_DB_KEY), + &FORK_CHOICE_DB_KEY, &PersistedForkChoice { fork_choice: fork_choice.to_persisted(), fork_choice_store: fork_choice.fc_store().to_persisted(), @@ -275,8 +277,7 @@ impl BeaconChain { metrics::stop_timer(fork_choice_timer); let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD); - self.store - .put_item(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?; + self.store.put_item(&BEACON_CHAIN_DB_KEY, &persisted_head)?; metrics::stop_timer(head_timer); @@ -293,7 +294,7 @@ impl BeaconChain { let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); self.store.put_item( - &Hash256::from_slice(&OP_POOL_DB_KEY), + &OP_POOL_DB_KEY, &PersistedOperationPool::from_operation_pool(&self.op_pool), )?; @@ -305,10 +306,8 @@ impl BeaconChain { let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); if let Some(eth1_chain) = self.eth1_chain.as_ref() { - self.store.put_item( - &Hash256::from_slice(Ð1_CACHE_DB_KEY), - ð1_chain.as_ssz_container(), - )?; + self.store + .put_item(Ð1_CACHE_DB_KEY, ð1_chain.as_ssz_container())?; } Ok(()) @@ -463,6 +462,30 @@ impl BeaconChain { } } + /// Returns the block at the given slot, if any. Only returns blocks in the canonical chain. + /// + /// ## Errors + /// + /// May return a database error. + pub fn state_root_at_slot(&self, slot: Slot) -> Result, Error> { + process_results(self.rev_iter_state_roots()?, |mut iter| { + iter.find(|(_, this_slot)| *this_slot == slot) + .map(|(root, _)| root) + }) + } + + /// Returns the block root at the given slot, if any. Only returns roots in the canonical chain. + /// + /// ## Errors + /// + /// May return a database error. + pub fn block_root_at_slot(&self, slot: Slot) -> Result, Error> { + process_results(self.rev_iter_block_roots()?, |mut iter| { + iter.find(|(_, this_slot)| *this_slot == slot) + .map(|(root, _)| root) + }) + } + /// Returns the block at the given root, if any. /// /// ## Errors @@ -510,6 +533,30 @@ impl BeaconChain { f(&head_lock) } + /// Returns the beacon block root at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block_root(&self) -> Result { + self.with_head(|s| Ok(s.beacon_block_root)) + } + + /// Returns the beacon block at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_block(&self) -> Result, Error> { + self.with_head(|s| Ok(s.beacon_block.clone())) + } + + /// Returns the beacon state at the head of the canonical chain. + /// + /// See `Self::head` for more information. + pub fn head_beacon_state(&self) -> Result, Error> { + self.with_head(|s| { + Ok(s.beacon_state + .clone_with(CloneConfig::committee_caches_only())) + }) + } + /// Returns info representing the head block and state. /// /// A summarized version of `Self::head` that involves less cloning. @@ -743,46 +790,20 @@ impl BeaconChain { .map_err(Into::into) } - /// Returns the attestation slot and committee index for a given validator index. + /// Returns the attestation duties for a given validator index. /// /// Information is read from the current state, so only information from the present and prior /// epoch is available. - pub fn validator_attestation_slot_and_index( + pub fn validator_attestation_duty( &self, validator_index: usize, epoch: Epoch, - ) -> Result, Error> { - let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); - let head_state = &self.head()?.beacon_state; + ) -> Result, Error> { + let head_block_root = self.head_beacon_block_root()?; - let mut state = if epoch == as_epoch(head_state.slot) { - self.head()?.beacon_state - } else { - // The block proposer shuffling is not affected by the state roots, so we don't need to - // calculate them. - self.state_at_slot( - epoch.start_slot(T::EthSpec::slots_per_epoch()), - StateSkipConfig::WithoutStateRoots, - )? - }; - - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - - if as_epoch(state.slot) != epoch { - return Err(Error::InvariantViolated(format!( - "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", - as_epoch(state.slot), - epoch - ))); - } - - if let Some(attestation_duty) = - state.get_attestation_duties(validator_index, RelativeEpoch::Current)? - { - Ok(Some((attestation_duty.slot, attestation_duty.index))) - } else { - Ok(None) - } + self.with_committee_cache(head_block_root, epoch, |committee_cache| { + Ok(committee_cache.get_attestation_duties(validator_index)) + }) } /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. @@ -791,11 +812,22 @@ impl BeaconChain { pub fn get_aggregated_attestation( &self, data: &AttestationData, - ) -> Result>, Error> { + ) -> Option> { + self.naive_aggregation_pool.read().get(data) + } + + /// Returns an aggregated `Attestation`, if any, that has a matching + /// `attestation.data.tree_hash_root()`. + /// + /// The attestation will be obtained from `self.naive_aggregation_pool`. + pub fn get_aggregated_attestation_by_slot_and_root( + &self, + slot: Slot, + attestation_data_root: &Hash256, + ) -> Option> { self.naive_aggregation_pool .read() - .get(data) - .map_err(Into::into) + .get_by_slot_and_root(slot, attestation_data_root) } /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. @@ -922,7 +954,7 @@ impl BeaconChain { pub fn verify_unaggregated_attestation_for_gossip( &self, attestation: Attestation, - subnet_id: SubnetId, + subnet_id: Option, ) -> Result, AttestationError> { metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS); let _timer = @@ -1344,11 +1376,7 @@ impl BeaconChain { block: SignedBeaconBlock, ) -> Result, BlockError> { let slot = block.message.slot; - #[allow(clippy::invalid_regex)] - let re = Regex::new("\\p{C}").expect("regex is valid"); - let graffiti_string = - String::from_utf8_lossy(&re.replace_all(&block.message.body.graffiti[..], &b""[..])) - .to_string(); + let graffiti_string = block.message.body.graffiti.as_utf8_lossy(); match GossipVerifiedBlock::new(block, self) { Ok(verified) => { @@ -1473,8 +1501,7 @@ impl BeaconChain { ) -> Result> { let signed_block = fully_verified_block.block; let block_root = fully_verified_block.block_root; - let state = fully_verified_block.state; - let parent_block = fully_verified_block.parent_block; + let mut state = fully_verified_block.state; let current_slot = self.slot()?; let mut ops = fully_verified_block.intermediate_states; @@ -1506,29 +1533,25 @@ impl BeaconChain { .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)? .import_new_pubkeys(&state)?; - // If the imported block is in the previous or current epochs (according to the - // wall-clock), check to see if this is the first block of the epoch. If so, add the - // committee to the shuffling cache. - if state.current_epoch() + 1 >= self.epoch()? - && parent_block.slot().epoch(T::EthSpec::slots_per_epoch()) != state.current_epoch() - { - let mut shuffling_cache = self + // For the current and next epoch of this state, ensure we have the shuffling from this + // block in our cache. + for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] { + let shuffling_id = ShufflingId::new(block_root, &state, *relative_epoch)?; + + let shuffling_is_cached = self .shuffling_cache - .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::AttestationCacheLockTimeout)?; + .try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .contains(&shuffling_id); - let committee_cache = state.committee_cache(RelativeEpoch::Current)?; - - let epoch_start_slot = state - .current_epoch() - .start_slot(T::EthSpec::slots_per_epoch()); - let target_root = if state.slot == epoch_start_slot { - block_root - } else { - *state.get_block_root(epoch_start_slot)? - }; - - shuffling_cache.insert(state.current_epoch(), target_root, committee_cache); + if !shuffling_is_cached { + state.build_committee_cache(*relative_epoch, &self.spec)?; + let committee_cache = state.committee_cache(*relative_epoch)?; + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .insert(shuffling_id, committee_cache); + } } let mut fork_choice = self.fork_choice.write(); @@ -2102,6 +2125,129 @@ impl BeaconChain { Ok(()) } + /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head + /// `head_block_root`. + /// + /// It's not necessary that `head_block_root` matches our current view of the chain, it can be + /// any block that is: + /// + /// - Known to us. + /// - The finalized block or a descendant of the finalized block. + /// + /// It would be quite common for attestation verification operations to use a `head_block_root` + /// that differs from our view of the head. + /// + /// ## Important + /// + /// This function is **not** suitable for determining proposer duties. + /// + /// ## Notes + /// + /// This function exists in this odd "map" pattern because efficiently obtaining a committee + /// can be complex. It might involve reading straight from the `beacon_chain.shuffling_cache` + /// or it might involve reading it from a state from the DB. Due to the complexities of + /// `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here. + /// + /// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the + /// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`. + pub(crate) fn with_committee_cache( + &self, + head_block_root: Hash256, + shuffling_epoch: Epoch, + map_fn: F, + ) -> Result + where + F: Fn(&CommitteeCache) -> Result, + { + let head_block = self + .fork_choice + .read() + .get_block(&head_block_root) + .ok_or_else(|| Error::MissingBeaconBlock(head_block_root))?; + + let shuffling_id = BlockShufflingIds { + current: head_block.current_epoch_shuffling_id.clone(), + next: head_block.next_epoch_shuffling_id.clone(), + block_root: head_block.root, + } + .id_for_epoch(shuffling_epoch) + .ok_or_else(|| Error::InvalidShufflingId { + shuffling_epoch, + head_block_epoch: head_block.slot.epoch(T::EthSpec::slots_per_epoch()), + })?; + + // Obtain the shuffling cache, timing how long we wait. + let cache_wait_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES); + + let mut shuffling_cache = self + .shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)?; + + metrics::stop_timer(cache_wait_timer); + + if let Some(committee_cache) = shuffling_cache.get(&shuffling_id) { + map_fn(committee_cache) + } else { + // Drop the shuffling cache to avoid holding the lock for any longer than + // required. + drop(shuffling_cache); + + debug!( + self.log, + "Committee cache miss"; + "shuffling_epoch" => shuffling_epoch.as_u64(), + "head_block_root" => head_block_root.to_string(), + ); + + let state_read_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); + + let mut state = self + .store + .get_inconsistent_state_for_attestation_verification_only( + &head_block.state_root, + Some(head_block.slot), + )? + .ok_or_else(|| Error::MissingBeaconState(head_block.state_root))?; + + metrics::stop_timer(state_read_timer); + let state_skip_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); + + while state.current_epoch() + 1 < shuffling_epoch { + // Here we tell `per_slot_processing` to skip hashing the state and just + // use the zero hash instead. + // + // The state roots are not useful for the shuffling, so there's no need to + // compute them. + per_slot_processing(&mut state, Some(Hash256::zero()), &self.spec) + .map_err(Error::from)?; + } + + metrics::stop_timer(state_skip_timer); + let committee_building_timer = + metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); + + let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), shuffling_epoch) + .map_err(Error::IncorrectStateForAttestation)?; + + state.build_committee_cache(relative_epoch, &self.spec)?; + + let committee_cache = state.committee_cache(relative_epoch)?; + + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| Error::AttestationCacheLockTimeout)? + .insert(shuffling_id, committee_cache); + + metrics::stop_timer(committee_building_timer); + + map_fn(&committee_cache) + } + } + /// Returns `true` if the given block root has not been processed. pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { Ok(!self diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index f83f0d6cb..a251ced2d 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -232,7 +232,7 @@ where .ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?; store - .get_item::(&Hash256::from_slice(Ð1_CACHE_DB_KEY)) + .get_item::(Ð1_CACHE_DB_KEY) .map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e)) } @@ -244,7 +244,7 @@ where .ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?; Ok(store - .get_item::(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) + .get_item::(&BEACON_CHAIN_DB_KEY) .map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))? .is_some()) } @@ -275,7 +275,7 @@ where .ok_or_else(|| "resume_from_db requires a store.".to_string())?; let chain = store - .get_item::(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) + .get_item::(&BEACON_CHAIN_DB_KEY) .map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))? .ok_or_else(|| { "No persisted beacon chain found in store. Try purging the beacon chain database." @@ -283,7 +283,7 @@ where })?; let persisted_fork_choice = store - .get_item::(&Hash256::from_slice(&FORK_CHOICE_DB_KEY)) + .get_item::(&FORK_CHOICE_DB_KEY) .map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))? .ok_or_else(|| "No persisted fork choice present in database.".to_string())?; @@ -310,7 +310,7 @@ where self.op_pool = Some( store - .get_item::>(&Hash256::from_slice(&OP_POOL_DB_KEY)) + .get_item::>(&OP_POOL_DB_KEY) .map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))? .map(PersistedOperationPool::into_operation_pool) .unwrap_or_else(OperationPool::new), @@ -377,8 +377,13 @@ where let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); - let fork_choice = ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message) - .map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?; + let fork_choice = ForkChoice::from_genesis( + fc_store, + genesis.beacon_block_root, + &genesis.beacon_block.message, + &genesis.beacon_state, + ) + .map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?; self.fork_choice = Some(fork_choice); self.genesis_time = Some(genesis.beacon_state.genesis_time); @@ -570,6 +575,7 @@ where observed_attester_slashings: <_>::default(), eth1_chain: self.eth1_chain, genesis_validators_root: canonical_head.beacon_state.genesis_validators_root, + genesis_state_root: canonical_head.beacon_state_root, canonical_head: TimeoutRwLock::new(canonical_head.clone()), genesis_block_root, fork_choice: RwLock::new(fork_choice), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 869250d62..4153ac702 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -84,6 +84,10 @@ pub enum BeaconChainError { ObservedBlockProducersError(ObservedBlockProducersError), PruningError(PruningError), ArithError(ArithError), + InvalidShufflingId { + shuffling_epoch: Epoch, + head_block_epoch: Epoch, + }, WeakSubjectivtyVerificationFailure, WeakSubjectivtyShutdownError(TrySendError<&'static str>), } diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index b2477c3b1..ea455064e 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,5 +1,4 @@ use crate::metrics; -use environment::TaskExecutor; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2_hashing::hash; use slog::{debug, error, trace, Logger}; @@ -11,6 +10,7 @@ use std::collections::HashMap; use std::iter::DoubleEndedIterator; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem}; +use task_executor::TaskExecutor; use types::{ BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, DEPOSIT_TREE_DEPTH, diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index c561141a1..247f613a9 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -1,7 +1,9 @@ use crate::metrics; use std::collections::HashMap; -use types::{Attestation, AttestationData, EthSpec, Slot}; +use tree_hash::TreeHash; +use types::{Attestation, AttestationData, EthSpec, Hash256, Slot}; +type AttestationDataRoot = Hash256; /// The number of slots that will be stored in the pool. /// /// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations @@ -53,7 +55,7 @@ pub enum Error { /// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all /// `attestation` are from the same slot. struct AggregatedAttestationMap { - map: HashMap>, + map: HashMap>, } impl AggregatedAttestationMap { @@ -87,7 +89,9 @@ impl AggregatedAttestationMap { return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); } - if let Some(existing_attestation) = self.map.get_mut(&a.data) { + let attestation_data_root = a.data.tree_hash_root(); + + if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) { if existing_attestation .aggregation_bits .get(committee_index) @@ -107,7 +111,7 @@ impl AggregatedAttestationMap { )); } - self.map.insert(a.data.clone(), a.clone()); + self.map.insert(attestation_data_root, a.clone()); Ok(InsertOutcome::NewAttestationData { committee_index }) } } @@ -115,8 +119,13 @@ impl AggregatedAttestationMap { /// Returns an aggregated `Attestation` with the given `data`, if any. /// /// The given `a.data.slot` must match the slot that `self` was initialized with. - pub fn get(&self, data: &AttestationData) -> Result>, Error> { - Ok(self.map.get(data).cloned()) + pub fn get(&self, data: &AttestationData) -> Option> { + self.map.get(&data.tree_hash_root()).cloned() + } + + /// Returns an aggregated `Attestation` with the given `root`, if any. + pub fn get_by_root(&self, root: &AttestationDataRoot) -> Option<&Attestation> { + self.map.get(root) } /// Iterate all attestations in `self`. @@ -220,12 +229,19 @@ impl NaiveAggregationPool { } /// Returns an aggregated `Attestation` with the given `data`, if any. - pub fn get(&self, data: &AttestationData) -> Result>, Error> { + pub fn get(&self, data: &AttestationData) -> Option> { + self.maps.get(&data.slot).and_then(|map| map.get(data)) + } + + /// Returns an aggregated `Attestation` with the given `data`, if any. + pub fn get_by_slot_and_root( + &self, + slot: Slot, + root: &AttestationDataRoot, + ) -> Option> { self.maps - .iter() - .find(|(slot, _map)| **slot == data.slot) - .map(|(_slot, map)| map.get(data)) - .unwrap_or_else(|| Ok(None)) + .get(&slot) + .and_then(|map| map.get_by_root(root).cloned()) } /// Iterate all attestations in all slots of `self`. @@ -338,8 +354,7 @@ mod tests { let retrieved = pool .get(&a.data) - .expect("should not error while getting attestation") - .expect("should get an attestation"); + .expect("should not error while getting attestation"); assert_eq!( retrieved, a, "retrieved attestation should equal the one inserted" @@ -378,8 +393,7 @@ mod tests { let retrieved = pool .get(&a_0.data) - .expect("should not error while getting attestation") - .expect("should get an attestation"); + .expect("should not error while getting attestation"); let mut a_01 = a_0.clone(); a_01.aggregate(&a_1); @@ -408,8 +422,7 @@ mod tests { assert_eq!( pool.get(&a_0.data) - .expect("should not error while getting attestation") - .expect("should get an attestation"), + .expect("should not error while getting attestation"), retrieved, "should not have aggregated different attestation data" ); diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index d8b6e8706..b76adf05e 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -1,6 +1,6 @@ use crate::metrics; use lru::LruCache; -use types::{beacon_state::CommitteeCache, Epoch, Hash256}; +use types::{beacon_state::CommitteeCache, Epoch, Hash256, ShufflingId}; /// The size of the LRU cache that stores committee caches for quicker verification. /// @@ -14,7 +14,7 @@ const CACHE_SIZE: usize = 16; /// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like /// a find/replace error. pub struct ShufflingCache { - cache: LruCache<(Epoch, Hash256), CommitteeCache>, + cache: LruCache, } impl ShufflingCache { @@ -24,8 +24,8 @@ impl ShufflingCache { } } - pub fn get(&mut self, epoch: Epoch, root: Hash256) -> Option<&CommitteeCache> { - let opt = self.cache.get(&(epoch, root)); + pub fn get(&mut self, key: &ShufflingId) -> Option<&CommitteeCache> { + let opt = self.cache.get(key); if opt.is_some() { metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); @@ -36,11 +36,37 @@ impl ShufflingCache { opt } - pub fn insert(&mut self, epoch: Epoch, root: Hash256, committee_cache: &CommitteeCache) { - let key = (epoch, root); + pub fn contains(&self, key: &ShufflingId) -> bool { + self.cache.contains(key) + } + pub fn insert(&mut self, key: ShufflingId, committee_cache: &CommitteeCache) { if !self.cache.contains(&key) { self.cache.put(key, committee_cache.clone()); } } } + +/// Contains the shuffling IDs for a beacon block. +pub struct BlockShufflingIds { + pub current: ShufflingId, + pub next: ShufflingId, + pub block_root: Hash256, +} + +impl BlockShufflingIds { + /// Returns the shuffling ID for the given epoch. + /// + /// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`. + pub fn id_for_epoch(&self, epoch: Epoch) -> Option { + if epoch == self.current.shuffling_epoch { + Some(self.current.clone()) + } else if epoch == self.next.shuffling_epoch { + Some(self.next.clone()) + } else if epoch > self.next.shuffling_epoch { + Some(ShufflingId::from_components(epoch, self.block_root)) + } else { + None + } + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index abf233027..17dff57d1 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -27,9 +27,11 @@ use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, Mem use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; use types::{ - AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, Epoch, - EthSpec, Hash256, Keypair, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockHash, SignedRoot, Slot, SubnetId, + AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconState, + BeaconStateHash, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, Hash256, IndexedAttestation, + Keypair, ProposerSlashing, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, + SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList, + VoluntaryExit, }; pub use types::test_utils::generate_deterministic_keypairs; @@ -131,7 +133,7 @@ impl BeaconChainHarness> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let config = StoreConfig::default(); @@ -216,7 +218,7 @@ impl BeaconChainHarness> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -265,7 +267,7 @@ impl BeaconChainHarness> { let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let drain = slog_term::FullFormat::new(decorator).build(); - let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); + let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); @@ -430,7 +432,7 @@ where // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce // different blocks each time. - self.chain.set_graffiti(self.rng.gen::<[u8; 32]>()); + self.chain.set_graffiti(self.rng.gen::<[u8; 32]>().into()); let randao_reveal = { let epoch = slot.epoch(E::slots_per_epoch()); @@ -475,8 +477,8 @@ where let committee_count = state.get_committee_count_at_slot(state.slot).unwrap(); state - .get_beacon_committees_at_slot(state.slot) - .unwrap() + .get_beacon_committees_at_slot(attestation_slot) + .expect("should get committees") .iter() .map(|bc| { bc.committee @@ -603,7 +605,6 @@ where let aggregate = self .chain .get_aggregated_attestation(&attestation.data) - .unwrap() .unwrap_or_else(|| { committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| { agg.aggregate(att); @@ -634,6 +635,94 @@ where .collect() } + pub fn make_attester_slashing(&self, validator_indices: Vec) -> AttesterSlashing { + let mut attestation_1 = IndexedAttestation { + attesting_indices: VariableList::new(validator_indices).unwrap(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + target: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + source: Checkpoint { + root: Hash256::zero(), + epoch: Epoch::new(0), + }, + }, + signature: AggregateSignature::infinity(), + }; + + let mut attestation_2 = attestation_1.clone(); + attestation_2.data.index += 1; + + for attestation in &mut [&mut attestation_1, &mut attestation_2] { + for &i in &attestation.attesting_indices { + let sk = &self.validators_keypairs[i as usize].sk; + + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let domain = self.chain.spec.get_domain( + attestation.data.target.epoch, + Domain::BeaconAttester, + &fork, + genesis_validators_root, + ); + let message = attestation.data.signing_root(domain); + + attestation.signature.add_assign(&sk.sign(message)); + } + } + + AttesterSlashing { + attestation_1, + attestation_2, + } + } + + pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { + let mut block_header_1 = self + .chain + .head_beacon_block() + .unwrap() + .message + .block_header(); + block_header_1.proposer_index = validator_index; + + let mut block_header_2 = block_header_1.clone(); + block_header_2.state_root = Hash256::zero(); + + let sk = &self.validators_keypairs[validator_index as usize].sk; + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let mut signed_block_headers = vec![block_header_1, block_header_2] + .into_iter() + .map(|block_header| { + block_header.sign::(&sk, &fork, genesis_validators_root, &self.chain.spec) + }) + .collect::>(); + + ProposerSlashing { + signed_header_2: signed_block_headers.remove(1), + signed_header_1: signed_block_headers.remove(0), + } + } + + pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { + let sk = &self.validators_keypairs[validator_index as usize].sk; + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + VoluntaryExit { + epoch, + validator_index, + } + .sign(sk, &fork, genesis_validators_root, &self.chain.spec) + } + pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock) -> SignedBeaconBlockHash { assert_eq!(self.chain.slot().unwrap(), slot); let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into(); @@ -656,7 +745,10 @@ where for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() { for (attestation, subnet_id) in unaggregated_attestations { self.chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) + .verify_unaggregated_attestation_for_gossip( + attestation.clone(), + Some(subnet_id), + ) .unwrap() .add_to_pool(&self.chain) .unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 8202fee0e..4a8a071cc 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -585,7 +585,7 @@ fn unaggregated_gossip_verification() { matches!( harness .chain - .verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter) + .verify_unaggregated_attestation_for_gossip($attn_getter, Some($subnet_getter)) .err() .expect(&format!( "{} should error during verify_unaggregated_attestation_for_gossip", @@ -852,7 +852,7 @@ fn unaggregated_gossip_verification() { harness .chain - .verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id) + .verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), Some(subnet_id)) .expect("valid attestation should be verified"); /* @@ -941,6 +941,6 @@ fn attestation_that_skips_epochs() { harness .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) .expect("should gossip verify attestation that skips slots"); } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index caa2f9d6c..e9006a626 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -326,7 +326,7 @@ fn epoch_boundary_state_attestation_processing() { let res = harness .chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); + .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); let expected_attestation_slot = attestation.data.slot; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 12f1c4364..cd8b56478 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -357,11 +357,10 @@ fn roundtrip_operation_pool() { .persist_op_pool() .expect("should persist op pool"); - let key = Hash256::from_slice(&OP_POOL_DB_KEY); let restored_op_pool = harness .chain .store - .get_item::>(&key) + .get_item::>(&OP_POOL_DB_KEY) .expect("should read db") .expect("should find op pool") .into_operation_pool(); @@ -463,7 +462,7 @@ fn attestations_with_increasing_slots() { for (attestation, subnet_id) in attestations.into_iter().flatten() { let res = harness .chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); + .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id)); let current_slot = harness.chain.slot().expect("should get slot"); let expected_attestation_slot = attestation.data.slot; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8af3bccf4..8114761aa 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dev-dependencies] -sloggers = "1.0.0" +sloggers = "1.0.1" toml = "0.5.6" [dependencies] @@ -14,30 +14,33 @@ store = { path = "../store" } network = { path = "../network" } timer = { path = "../timer" } eth2_libp2p = { path = "../eth2_libp2p" } -rest_api = { path = "../rest_api" } parking_lot = "0.11.0" websocket_server = { path = "../websocket_server" } -prometheus = "0.9.0" +prometheus = "0.10.0" types = { path = "../../consensus/types" } -tree_hash = "0.1.0" +tree_hash = "0.1.1" eth2_config = { path = "../../common/eth2_config" } slot_clock = { path = "../../common/slot_clock" } -serde = "1.0.110" -serde_derive = "1.0.110" -error-chain = "0.12.2" -serde_yaml = "0.8.11" +serde = "1.0.116" +serde_derive = "1.0.116" +error-chain = "0.12.4" +serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace"] } slog-async = "2.5.0" tokio = "0.2.22" -dirs = "2.0.2" +dirs = "3.0.1" futures = "0.3.5" -reqwest = { version = "0.10.4", features = ["native-tls-vendored"] } +reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } url = "2.1.1" eth1 = { path = "../eth1" } genesis = { path = "../genesis" } +task_executor = { path = "../../common/task_executor" } environment = { path = "../../lighthouse/environment" } eth2_ssz = "0.1.2" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -time = "0.2.16" +time = "0.2.22" bus = "2.2.3" +directory = {path = "../../common/directory"} +http_api = { path = "../http_api" } +http_metrics = { path = "../http_metrics" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 3c8026f5b..97d68f407 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -13,15 +13,14 @@ use beacon_chain::{ use bus::Bus; use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; -use eth2_config::Eth2Config; use eth2_libp2p::NetworkGlobals; use genesis::{interop_genesis_state, Eth1GenesisService}; use network::{NetworkConfig, NetworkMessage, NetworkService}; use parking_lot::Mutex; -use slog::info; +use slog::{debug, info}; use ssz::Decode; use std::net::SocketAddr; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use timer::spawn_timer; @@ -61,7 +60,10 @@ pub struct ClientBuilder { event_handler: Option, network_globals: Option>>, network_send: Option>>, - http_listen_addr: Option, + db_path: Option, + freezer_db_path: Option, + http_api_config: http_api::Config, + http_metrics_config: http_metrics::Config, websocket_listen_addr: Option, eth_spec_instance: T::EthSpec, } @@ -103,7 +105,10 @@ where event_handler: None, network_globals: None, network_send: None, - http_listen_addr: None, + db_path: None, + freezer_db_path: None, + http_api_config: <_>::default(), + http_metrics_config: <_>::default(), websocket_listen_addr: None, eth_spec_instance, } @@ -280,55 +285,16 @@ where Ok(self) } - /// Immediately starts the beacon node REST API http server. - pub fn http_server( - mut self, - client_config: &ClientConfig, - eth2_config: &Eth2Config, - events: Arc>>, - ) -> Result { - let beacon_chain = self - .beacon_chain - .clone() - .ok_or_else(|| "http_server requires a beacon chain")?; - let context = self - .runtime_context - .as_ref() - .ok_or_else(|| "http_server requires a runtime_context")? - .service_context("http".into()); - let network_globals = self - .network_globals - .clone() - .ok_or_else(|| "http_server requires a libp2p network")?; - let network_send = self - .network_send - .clone() - .ok_or_else(|| "http_server requires a libp2p network sender")?; + /// Provides configuration for the HTTP API. + pub fn http_api_config(mut self, config: http_api::Config) -> Self { + self.http_api_config = config; + self + } - let network_info = rest_api::NetworkInfo { - network_globals, - network_chan: network_send, - }; - - let listening_addr = rest_api::start_server( - context.executor, - &client_config.rest_api, - beacon_chain, - network_info, - client_config - .create_db_path() - .map_err(|_| "unable to read data dir")?, - client_config - .create_freezer_db_path() - .map_err(|_| "unable to read freezer DB dir")?, - eth2_config.clone(), - events, - ) - .map_err(|e| format!("Failed to start HTTP API: {:?}", e))?; - - self.http_listen_addr = Some(listening_addr); - - Ok(self) + /// Provides configuration for the HTTP server that serves Prometheus metrics. + pub fn http_metrics_config(mut self, config: http_metrics::Config) -> Self { + self.http_metrics_config = config; + self } /// Immediately starts the service that periodically logs information each slot. @@ -367,25 +333,85 @@ where /// specified. /// /// If type inference errors are being raised, see the comment on the definition of `Self`. + #[allow(clippy::type_complexity)] pub fn build( self, - ) -> Client< - Witness< - TStoreMigrator, - TSlotClock, - TEth1Backend, - TEthSpec, - TEventHandler, - THotStore, - TColdStore, + ) -> Result< + Client< + Witness< + TStoreMigrator, + TSlotClock, + TEth1Backend, + TEthSpec, + TEventHandler, + THotStore, + TColdStore, + >, >, + String, > { - Client { + let runtime_context = self + .runtime_context + .as_ref() + .ok_or_else(|| "build requires a runtime context".to_string())?; + let log = runtime_context.log().clone(); + + let http_api_listen_addr = if self.http_api_config.enabled { + let ctx = Arc::new(http_api::Context { + config: self.http_api_config.clone(), + chain: self.beacon_chain.clone(), + network_tx: self.network_send.clone(), + network_globals: self.network_globals.clone(), + log: log.clone(), + }); + + let exit = runtime_context.executor.exit(); + + let (listen_addr, server) = http_api::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + runtime_context + .clone() + .executor + .spawn_without_exit(async move { server.await }, "http-api"); + + Some(listen_addr) + } else { + info!(log, "HTTP server is disabled"); + None + }; + + let http_metrics_listen_addr = if self.http_metrics_config.enabled { + let ctx = Arc::new(http_metrics::Context { + config: self.http_metrics_config.clone(), + chain: self.beacon_chain.clone(), + db_path: self.db_path.clone(), + freezer_db_path: self.freezer_db_path.clone(), + log: log.clone(), + }); + + let exit = runtime_context.executor.exit(); + + let (listen_addr, server) = http_metrics::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + runtime_context + .executor + .spawn_without_exit(async move { server.await }, "http-api"); + + Some(listen_addr) + } else { + debug!(log, "Metrics server is disabled"); + None + }; + + Ok(Client { beacon_chain: self.beacon_chain, network_globals: self.network_globals, - http_listen_addr: self.http_listen_addr, + http_api_listen_addr, + http_metrics_listen_addr, websocket_listen_addr: self.websocket_listen_addr, - } + }) } } @@ -527,6 +553,9 @@ where .clone() .ok_or_else(|| "disk_store requires a chain spec".to_string())?; + self.db_path = Some(hot_path.into()); + self.freezer_db_path = Some(cold_path.into()); + let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone()) .map_err(|e| format!("Unable to open database: {:?}", e))?; self.store = Some(Arc::new(store)); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 19088e785..0cf90d6b4 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,11 +1,10 @@ +use directory::DEFAULT_ROOT_DIR; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use types::Graffiti; -pub const DEFAULT_DATADIR: &str = ".lighthouse"; - /// The number initial validators when starting the `Minimal`. const TESTNET_SPEC_CONSTANTS: &str = "minimal"; @@ -63,16 +62,17 @@ pub struct Config { pub genesis: ClientGenesis, pub store: store::StoreConfig, pub network: network::NetworkConfig, - pub rest_api: rest_api::Config, pub chain: beacon_chain::ChainConfig, pub websocket_server: websocket_server::Config, pub eth1: eth1::Config, + pub http_api: http_api::Config, + pub http_metrics: http_metrics::Config, } impl Default for Config { fn default() -> Self { Self { - data_dir: PathBuf::from(DEFAULT_DATADIR), + data_dir: PathBuf::from(DEFAULT_ROOT_DIR), db_name: "chain_db".to_string(), freezer_db_path: None, log_file: PathBuf::from(""), @@ -80,7 +80,6 @@ impl Default for Config { store: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), - rest_api: <_>::default(), websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), dummy_eth1_backend: false, @@ -88,6 +87,8 @@ impl Default for Config { eth1: <_>::default(), disabled_forks: Vec::new(), graffiti: Graffiti::default(), + http_api: <_>::default(), + http_metrics: <_>::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index da670ff13..6b721aee9 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -23,7 +23,10 @@ pub use eth2_config::Eth2Config; pub struct Client { beacon_chain: Option>>, network_globals: Option>>, - http_listen_addr: Option, + /// Listen address for the standard eth2.0 API, if the service was started. + http_api_listen_addr: Option, + /// Listen address for the HTTP server which serves Prometheus metrics. + http_metrics_listen_addr: Option, websocket_listen_addr: Option, } @@ -33,9 +36,14 @@ impl Client { self.beacon_chain.clone() } - /// Returns the address of the client's HTTP API server, if it was started. - pub fn http_listen_addr(&self) -> Option { - self.http_listen_addr + /// Returns the address of the client's standard eth2.0 API server, if it was started. + pub fn http_api_listen_addr(&self) -> Option { + self.http_api_listen_addr + } + + /// Returns the address of the client's HTTP Prometheus metrics server, if it was started. + pub fn http_metrics_listen_addr(&self) -> Option { + self.http_metrics_listen_addr } /// Returns the address of the client's WebSocket API server, if it was started. diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index f82c9971d..523779687 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -22,7 +22,7 @@ const SPEEDO_OBSERVATIONS: usize = 4; /// Spawns a notifier service which periodically logs information about the node. pub fn spawn_notifier( - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, beacon_chain: Arc>, network: Arc>, milliseconds_per_slot: u64, diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 37eea2412..602c3e90c 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -8,25 +8,26 @@ edition = "2018" eth1_test_rig = { path = "../../testing/eth1_test_rig" } toml = "0.5.6" web3 = "0.11.0" -sloggers = "1.0.0" +sloggers = "1.0.1" +environment = { path = "../../lighthouse/environment" } [dependencies] -reqwest = { version = "0.10.4", features = ["native-tls-vendored"] } +reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } futures = { version = "0.3.5", features = ["compat"] } -serde_json = "1.0.52" -serde = { version = "1.0.110", features = ["derive"] } +serde_json = "1.0.58" +serde = { version = "1.0.116", features = ["derive"] } hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -tree_hash = "0.1.0" +tree_hash = "0.1.1" eth2_hashing = "0.1.0" parking_lot = "0.11.0" slog = "2.5.2" tokio = { version = "0.2.22", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } -libflate = "1.0.0" +libflate = "1.0.2" lighthouse_metrics = { path = "../../common/lighthouse_metrics"} lazy_static = "1.4.0" -environment = { path = "../../lighthouse/environment" } +task_executor = { path = "../../common/task_executor" } diff --git a/beacon_node/eth1/src/http.rs b/beacon_node/eth1/src/http.rs index 6dffdaa7c..e8f7d23a0 100644 --- a/beacon_node/eth1/src/http.rs +++ b/beacon_node/eth1/src/http.rs @@ -39,19 +39,34 @@ pub enum Eth1NetworkId { Custom(u64), } +impl Into for Eth1NetworkId { + fn into(self) -> u64 { + match self { + Eth1NetworkId::Mainnet => 1, + Eth1NetworkId::Goerli => 5, + Eth1NetworkId::Custom(id) => id, + } + } +} + +impl From for Eth1NetworkId { + fn from(id: u64) -> Self { + let into = |x: Eth1NetworkId| -> u64 { x.into() }; + match id { + id if id == into(Eth1NetworkId::Mainnet) => Eth1NetworkId::Mainnet, + id if id == into(Eth1NetworkId::Goerli) => Eth1NetworkId::Goerli, + id => Eth1NetworkId::Custom(id), + } + } +} + impl FromStr for Eth1NetworkId { type Err = String; fn from_str(s: &str) -> Result { - match s { - "1" => Ok(Eth1NetworkId::Mainnet), - "5" => Ok(Eth1NetworkId::Goerli), - custom => { - let network_id = u64::from_str_radix(custom, 10) - .map_err(|e| format!("Failed to parse eth1 network id {}", e))?; - Ok(Eth1NetworkId::Custom(network_id)) - } - } + u64::from_str_radix(s, 10) + .map(Into::into) + .map_err(|e| format!("Failed to parse eth1 network id {}", e)) } } diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index f5f018bd1..a7aba85a2 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -13,4 +13,6 @@ pub use block_cache::{BlockCache, Eth1Block}; pub use deposit_cache::DepositCache; pub use deposit_log::DepositLog; pub use inner::SszEth1Cache; -pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service}; +pub use service::{ + BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_NETWORK_ID, +}; diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index ee203b645..6b6d65855 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -345,7 +345,7 @@ impl Service { /// - Err(_) if there is an error. /// /// Emits logs for debugging and errors. - pub fn auto_update(self, handle: environment::TaskExecutor) { + pub fn auto_update(self, handle: task_executor::TaskExecutor) { let update_interval = Duration::from_millis(self.config().auto_update_interval_millis); let mut interval = interval_at(Instant::now(), update_interval); diff --git a/beacon_node/eth2_libp2p/Cargo.toml b/beacon_node/eth2_libp2p/Cargo.toml index e09573917..82cf8cf79 100644 --- a/beacon_node/eth2_libp2p/Cargo.toml +++ b/beacon_node/eth2_libp2p/Cargo.toml @@ -5,50 +5,50 @@ authors = ["Sigma Prime "] edition = "2018" [dependencies] -hex = "0.4.2" +discv5 = { version = "0.1.0-alpha.13", features = ["libp2p"] } types = { path = "../../consensus/types" } hashset_delay = { path = "../../common/hashset_delay" } eth2_ssz_types = { path = "../../consensus/ssz_types" } -serde = { version = "1.0.110", features = ["derive"] } -serde_derive = "1.0.110" +serde = { version = "1.0.116", features = ["derive"] } +serde_derive = "1.0.116" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "0.2.22", features = ["time", "macros"] } futures = "0.3.5" -error-chain = "0.12.2" -dirs = "2.0.2" +error-chain = "0.12.4" +dirs = "3.0.1" fnv = "1.0.7" unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] } lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -smallvec = "1.4.1" -lru = "0.5.1" +smallvec = "1.4.2" +lru = "0.6.0" parking_lot = "0.11.0" sha2 = "0.9.1" -base64 = "0.12.1" -snap = "1.0.0" +base64 = "0.13.0" +snap = "1.0.1" void = "1.0.2" +hex = "0.4.2" tokio-io-timeout = "0.4.0" tokio-util = { version = "0.3.1", features = ["codec", "compat"] } -discv5 = { version = "0.1.0-alpha.12", features = ["libp2p"] } tiny-keccak = "2.0.2" -environment = { path = "../../lighthouse/environment" } +task_executor = { path = "../../common/task_executor" } rand = "0.7.3" +directory = { path = "../../common/directory" } regex = "1.3.9" [dependencies.libp2p] #version = "0.23.0" git = "https://github.com/sigp/rust-libp2p" -rev = "03f998022ce2f566a6c6e6c4206bc0ce4d45109f" +rev = "5a9f0819af3990cfefad528e957297af596399b4" default-features = false features = ["websocket", "identify", "mplex", "noise", "gossipsub", "dns", "tcp-tokio"] [dev-dependencies] tokio = { version = "0.2.22", features = ["full"] } -slog-stdlog = "4.0.0" -slog-term = "2.5.0" +slog-term = "2.6.0" slog-async = "2.5.0" tempdir = "0.3.7" exit-future = "0.2.0" diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs index 3ab8dcbec..452686a5c 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/handler/delegate.rs @@ -54,8 +54,6 @@ impl DelegatingHandler { } } -// TODO: this can all be created with macros - /// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers. /// Simply delegated to the corresponding behaviour's handler. #[derive(Debug, Clone)] @@ -115,7 +113,6 @@ pub type DelegateOutProto = EitherUpgrade< >, >; -// TODO: prob make this an enum pub type DelegateOutInfo = EitherOutput< ::OutboundOpenInfo, EitherOutput< @@ -216,7 +213,6 @@ impl ProtocolsHandler for DelegatingHandler { >::Error, >, ) { - // TODO: find how to clean up match info { // Gossipsub EitherOutput::First(info) => match error { diff --git a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs index 605870d0f..538c122cc 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/handler/mod.rs @@ -41,15 +41,9 @@ pub enum BehaviourHandlerIn { Shutdown(Option<(RequestId, RPCRequest)>), } -pub enum BehaviourHandlerOut { - Delegate(Box>), - // TODO: replace custom with events to send - Custom, -} - impl ProtocolsHandler for BehaviourHandler { type InEvent = BehaviourHandlerIn; - type OutEvent = BehaviourHandlerOut; + type OutEvent = DelegateOut; type Error = DelegateError; type InboundProtocol = DelegateInProto; type OutboundProtocol = DelegateOutProto; @@ -122,9 +116,7 @@ impl ProtocolsHandler for BehaviourHandler { match self.delegate.poll(cx) { Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => { - return Poll::Ready(ProtocolsHandlerEvent::Custom( - BehaviourHandlerOut::Delegate(Box::new(event)), - )) + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) } Poll::Ready(ProtocolsHandlerEvent::Close(err)) => { return Poll::Ready(ProtocolsHandlerEvent::Close(err)) diff --git a/beacon_node/eth2_libp2p/src/behaviour/mod.rs b/beacon_node/eth2_libp2p/src/behaviour/mod.rs index 143b59f4f..4c3f8e05c 100644 --- a/beacon_node/eth2_libp2p/src/behaviour/mod.rs +++ b/beacon_node/eth2_libp2p/src/behaviour/mod.rs @@ -5,7 +5,7 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic, SubnetDiscovery}; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; use futures::prelude::*; -use handler::{BehaviourHandler, BehaviourHandlerIn, BehaviourHandlerOut, DelegateIn, DelegateOut}; +use handler::{BehaviourHandler, BehaviourHandlerIn, DelegateIn, DelegateOut}; use libp2p::{ core::{ connection::{ConnectedPoint, ConnectionId, ListenerId}, @@ -102,7 +102,7 @@ pub struct Behaviour { /// The Eth2 RPC specified in the wire-0 protocol. eth2_rpc: RPC, /// Keep regular connection to peers and disconnect if absent. - // TODO: Using id for initial interop. This will be removed by mainnet. + // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. identify: Identify, /// The peer manager that keeps track of peer's reputation and status. @@ -203,9 +203,6 @@ impl Behaviour { self.enr_fork_id.fork_digest, ); - // TODO: Implement scoring - // let topic: Topic = gossip_topic.into(); - // self.gossipsub.set_topic_params(t.hash(), TopicScoreParams::default()); self.subscribe(gossip_topic) } @@ -227,12 +224,6 @@ impl Behaviour { GossipEncoding::default(), self.enr_fork_id.fork_digest, ); - // TODO: Implement scoring - /* - let t: Topic = topic.clone().into(); - self.gossipsub - .set_topic_params(t.hash(), TopicScoreParams::default()); - */ self.subscribe(topic) } @@ -591,7 +582,6 @@ impl Behaviour { } => { if matches!(error, RPCError::HandlerRejected) { // this peer's request got canceled - // TODO: cancel processing for this request } // Inform the peer manager of the error. // An inbound error here means we sent an error to the peer, or the stream @@ -621,11 +611,8 @@ impl Behaviour { RPCRequest::MetaData(_) => { // send the requested meta-data self.send_meta_data_response((handler_id, id), peer_id); - // TODO: inform the peer manager? } RPCRequest::Goodbye(reason) => { - // let the peer manager know this peer is in the process of disconnecting - self.peer_manager._disconnecting_peer(&peer_id); // queue for disconnection without a goodbye message debug!( self.log, "Peer sent Goodbye"; @@ -975,17 +962,11 @@ impl NetworkBehaviour for Behaviour { return; } + // Events comming from the handler, redirected to each behaviour match event { - // Events comming from the handler, redirected to each behaviour - BehaviourHandlerOut::Delegate(delegate) => match *delegate { - DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev), - DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev), - DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev), - }, - /* Custom events sent BY the handler */ - BehaviourHandlerOut::Custom => { - // TODO: implement - } + DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev), + DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev), + DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev), } } @@ -1003,7 +984,6 @@ impl NetworkBehaviour for Behaviour { self.waker = Some(cx.waker().clone()); } - // TODO: move where it's less distracting macro_rules! poll_behaviour { /* $behaviour: The sub-behaviour being polled. * $on_event_fn: Function to call if we get an event from the sub-behaviour. diff --git a/beacon_node/eth2_libp2p/src/config.rs b/beacon_node/eth2_libp2p/src/config.rs index 3d6c726f4..93e0a423c 100644 --- a/beacon_node/eth2_libp2p/src/config.rs +++ b/beacon_node/eth2_libp2p/src/config.rs @@ -1,5 +1,8 @@ use crate::types::GossipKind; use crate::{Enr, PeerIdSerialized}; +use directory::{ + DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_TESTNET, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, +}; use discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub::{ GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, ValidationMode, @@ -77,9 +80,14 @@ pub struct Config { impl Default for Config { /// Generate a default network configuration. fn default() -> Self { - let mut network_dir = dirs::home_dir().unwrap_or_else(|| PathBuf::from(".")); - network_dir.push(".lighthouse"); - network_dir.push("network"); + // WARNING: this directory default should be always overrided with parameters + // from cli for specific networks. + let network_dir = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_TESTNET) + .join(DEFAULT_BEACON_NODE_DIR) + .join(DEFAULT_NETWORK_DIR); // The function used to generate a gossipsub message id // We use the first 8 bytes of SHA256(data) for content addressing @@ -115,6 +123,7 @@ impl Default for Config { .request_retries(1) .enr_peer_update_min(10) .query_parallelism(5) + .disable_report_discovered_peers() .query_timeout(Duration::from_secs(30)) .query_peer_timeout(Duration::from_secs(2)) .ip_limit() // limits /24 IP's in buckets. diff --git a/beacon_node/eth2_libp2p/src/discovery/enr.rs b/beacon_node/eth2_libp2p/src/discovery/enr.rs index 6af9f21fb..ffe671dcf 100644 --- a/beacon_node/eth2_libp2p/src/discovery/enr.rs +++ b/beacon_node/eth2_libp2p/src/discovery/enr.rs @@ -129,7 +129,6 @@ pub fn create_enr_builder_from_config(config: &NetworkConfig) -> EnrB builder.udp(udp_port); } // we always give it our listening tcp port - // TODO: Add uPnP support to map udp and tcp ports let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port); builder.tcp(tcp_port).tcp(config.libp2p_port); builder @@ -144,12 +143,12 @@ pub fn build_enr( let mut builder = create_enr_builder_from_config(config); // set the `eth2` field on our ENR - builder.add_value(ETH2_ENR_KEY.into(), enr_fork_id.as_ssz_bytes()); + builder.add_value(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes()); // set the "attnets" field on our ENR let bitfield = BitVector::::new(); - builder.add_value(BITFIELD_ENR_KEY.into(), bitfield.as_ssz_bytes()); + builder.add_value(BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); builder .build(enr_key) diff --git a/beacon_node/eth2_libp2p/src/discovery/mod.rs b/beacon_node/eth2_libp2p/src/discovery/mod.rs index b065265da..8c29d844c 100644 --- a/beacon_node/eth2_libp2p/src/discovery/mod.rs +++ b/beacon_node/eth2_libp2p/src/discovery/mod.rs @@ -365,7 +365,7 @@ impl Discovery { /// If the external address needs to be modified, use `update_enr_udp_socket. pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> { self.discv5 - .enr_insert("tcp", port.to_be_bytes().into()) + .enr_insert("tcp", &port.to_be_bytes()) .map_err(|e| format!("{:?}", e))?; // replace the global version @@ -383,18 +383,18 @@ impl Discovery { match socket_addr { SocketAddr::V4(socket) => { self.discv5 - .enr_insert("ip", socket.ip().octets().into()) + .enr_insert("ip", &socket.ip().octets()) .map_err(|e| format!("{:?}", e))?; self.discv5 - .enr_insert("udp", socket.port().to_be_bytes().into()) + .enr_insert("udp", &socket.port().to_be_bytes()) .map_err(|e| format!("{:?}", e))?; } SocketAddr::V6(socket) => { self.discv5 - .enr_insert("ip6", socket.ip().octets().into()) + .enr_insert("ip6", &socket.ip().octets()) .map_err(|e| format!("{:?}", e))?; self.discv5 - .enr_insert("udp6", socket.port().to_be_bytes().into()) + .enr_insert("udp6", &socket.port().to_be_bytes()) .map_err(|e| format!("{:?}", e))?; } } @@ -439,7 +439,7 @@ impl Discovery { // insert the bitfield into the ENR record self.discv5 - .enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes()) + .enr_insert(BITFIELD_ENR_KEY, ¤t_bitfield.as_ssz_bytes()) .map_err(|e| format!("{:?}", e))?; // replace the global version @@ -468,7 +468,7 @@ impl Discovery { let _ = self .discv5 - .enr_insert(ETH2_ENR_KEY, enr_fork_id.as_ssz_bytes()) + .enr_insert(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes()) .map_err(|e| { warn!( self.log, @@ -858,7 +858,10 @@ impl Discovery { // Still awaiting the event stream, poll it if let Poll::Ready(event_stream) = fut.poll_unpin(cx) { match event_stream { - Ok(stream) => self.event_stream = EventStream::Present(stream), + Ok(stream) => { + debug!(self.log, "Discv5 event stream ready"); + self.event_stream = EventStream::Present(stream); + } Err(e) => { slog::crit!(self.log, "Discv5 event stream failed"; "error" => e.to_string()); self.event_stream = EventStream::InActive; diff --git a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs index d528d7e69..d03480d21 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/mod.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/mod.rs @@ -147,8 +147,7 @@ impl PeerManager { /// /// If the peer doesn't exist, log a warning and insert defaults. pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) { - // TODO: Remove duplicate code - This is duplicated in the update_peer_scores() - // function. + // NOTE: This is duplicated in the update_peer_scores() and could be improved. // Variables to update the PeerDb if required. let mut ban_peer = None; @@ -179,7 +178,6 @@ impl PeerManager { GoodbyeReason::BadScore, )); } - // TODO: Update the peer manager to inform that the peer is disconnecting. } ScoreState::Healthy => { debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string()); @@ -322,15 +320,6 @@ impl PeerManager { self.connect_peer(peer_id, ConnectingType::OutgoingConnected { multiaddr }) } - /// Updates the database informing that a peer is being disconnected. - pub fn _disconnecting_peer(&mut self, _peer_id: &PeerId) -> bool { - // TODO: implement - // This informs the database that we are in the process of disconnecting the - // peer. Currently this state only exists for a short period of time before we force the - // disconnection. - true - } - /// Reports if a peer is banned or not. /// /// This is used to determine if we should accept incoming connections. @@ -408,10 +397,7 @@ impl PeerManager { // Not supporting a protocol shouldn't be considered a malicious action, but // it is an action that in some cases will make the peer unfit to continue // communicating. - // TODO: To avoid punishing a peer repeatedly for not supporting a protocol, this - // information could be stored and used to prevent sending requests for the given - // protocol to this peer. Similarly, to avoid blacklisting a peer for a protocol - // forever, if stored this information should expire. + match protocol { Protocol::Ping => PeerAction::Fatal, Protocol::BlocksByRange => return, @@ -445,7 +431,6 @@ impl PeerManager { /// A ping request has been received. // NOTE: The behaviour responds with a PONG automatically - // TODO: Update last seen pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) { if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a ping @@ -475,7 +460,6 @@ impl PeerManager { } /// A PONG has been returned from a peer. - // TODO: Update last seen pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) { if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { // received a pong @@ -501,7 +485,6 @@ impl PeerManager { } /// Received a metadata response from a peer. - // TODO: Update last seen pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { if let Some(known_meta_data) = &peer_info.meta_data { @@ -597,7 +580,7 @@ impl PeerManager { let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); for (peer_id, min_ttl) in results { // we attempt a connection if this peer is a subnet peer or if the max peer count - // is not yet filled (including dialling peers) + // is not yet filled (including dialing peers) if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers) && !self .network_globals @@ -610,7 +593,6 @@ impl PeerManager { .read() .is_banned_or_disconnected(&peer_id) { - // TODO: Update output // This should be updated with the peer dialing. In fact created once the peer is // dialed if let Some(min_ttl) = min_ttl { @@ -699,58 +681,6 @@ impl PeerManager { // Update scores info.score_update(); - /* TODO: Implement logic about connection lifetimes - match info.connection_status { - Connected { .. } => { - // Connected peers gain reputation by sending useful messages - } - Disconnected { since } | Banned { since } => { - // For disconnected peers, lower their reputation by 1 for every hour they - // stay disconnected. This helps us slowly forget disconnected peers. - // In the same way, slowly allow banned peers back again. - let dc_hours = now - .checked_duration_since(since) - .unwrap_or_else(|| Duration::from_secs(0)) - .as_secs() - / 3600; - let last_dc_hours = self - ._last_updated - .checked_duration_since(since) - .unwrap_or_else(|| Duration::from_secs(0)) - .as_secs() - / 3600; - if dc_hours > last_dc_hours { - // this should be 1 most of the time - let rep_dif = (dc_hours - last_dc_hours) - .try_into() - .unwrap_or(Rep::max_value()); - - info.reputation = if info.connection_status.is_banned() { - info.reputation.saturating_add(rep_dif) - } else { - info.reputation.saturating_sub(rep_dif) - }; - } - } - Dialing { since } => { - // A peer shouldn't be dialing for more than 2 minutes - if since.elapsed().as_secs() > 120 { - warn!(self.log,"Peer has been dialing for too long"; "peer_id" => id.to_string()); - // TODO: decide how to handle this - } - } - Unknown => {} //TODO: Handle this case - } - // Check if the peer gets banned or unbanned and if it should be disconnected - if info.reputation < _MIN_REP_BEFORE_BAN && !info.connection_status.is_banned() { - // This peer gets banned. Check if we should request disconnection - ban_queue.push(id.clone()); - } else if info.reputation >= _MIN_REP_BEFORE_BAN && info.connection_status.is_banned() { - // This peer gets unbanned - unban_queue.push(id.clone()); - } - */ - // handle score transitions if previous_state != info.score_state() { match info.score_state() { @@ -774,7 +704,6 @@ impl PeerManager { GoodbyeReason::BadScore, )); } - // TODO: Update peer manager to report that it's disconnecting. } ScoreState::Healthy => { debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string()); @@ -838,9 +767,6 @@ impl PeerManager { /// /// NOTE: Discovery will only add a new query if one isn't already queued. fn heartbeat(&mut self) { - // TODO: Provide a back-off time for discovery queries. I.e Queue many initially, then only - // perform discoveries over a larger fixed interval. Perhaps one every 6 heartbeats. This - // is achievable with a leaky bucket let peer_count = self.network_globals.connected_or_dialing_peers(); if peer_count < self.target_peers { // If we need more peers, queue a discovery lookup. diff --git a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs index 0f8774f7c..184baa5b2 100644 --- a/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs +++ b/beacon_node/eth2_libp2p/src/peer_manager/peerdb.rs @@ -130,7 +130,6 @@ impl PeerDB { } /// Returns a mutable reference to a peer's info if known. - /// TODO: make pub(super) to ensure that peer management is unified pub fn peer_info_mut(&mut self, peer_id: &PeerId) -> Option<&mut PeerInfo> { self.peers.get_mut(peer_id) } diff --git a/beacon_node/eth2_libp2p/src/rpc/handler.rs b/beacon_node/eth2_libp2p/src/rpc/handler.rs index a4b18b03b..93f26eed8 100644 --- a/beacon_node/eth2_libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2_libp2p/src/rpc/handler.rs @@ -25,8 +25,6 @@ use std::{ use tokio::time::{delay_queue, delay_until, Delay, DelayQueue, Instant as TInstant}; use types::EthSpec; -//TODO: Implement check_timeout() on the substream types - /// The time (in seconds) before a substream that is awaiting a response from the user times out. pub const RESPONSE_TIMEOUT: u64 = 10; @@ -163,8 +161,6 @@ struct OutboundInfo { /// Info over the protocol this substream is handling. proto: Protocol, /// Number of chunks to be seen from the peer's response. - // TODO: removing the option could allow clossing the streams after the number of - // expected responses is met for all protocols. remaining_chunks: Option, /// `RequestId` as given by the application that sent the request. req_id: RequestId, diff --git a/beacon_node/eth2_libp2p/src/service.rs b/beacon_node/eth2_libp2p/src/service.rs index 52286c05d..ece4cd1d5 100644 --- a/beacon_node/eth2_libp2p/src/service.rs +++ b/beacon_node/eth2_libp2p/src/service.rs @@ -59,7 +59,7 @@ pub struct Service { impl Service { pub async fn new( - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, config: &NetworkConfig, enr_fork_id: EnrForkId, log: &slog::Logger, @@ -109,7 +109,7 @@ impl Service { Behaviour::new(&local_keypair, config, network_globals.clone(), &log).await?; // use the executor for libp2p - struct Executor(environment::TaskExecutor); + struct Executor(task_executor::TaskExecutor); impl libp2p::core::Executor for Executor { fn exec(&self, f: Pin + Send>>) { self.0.spawn(f, "libp2p"); diff --git a/beacon_node/eth2_libp2p/tests/common/mod.rs b/beacon_node/eth2_libp2p/tests/common/mod.rs index dc81cdb68..916f2f841 100644 --- a/beacon_node/eth2_libp2p/tests/common/mod.rs +++ b/beacon_node/eth2_libp2p/tests/common/mod.rs @@ -99,7 +99,7 @@ pub async fn build_libp2p_instance(boot_nodes: Vec, log: slog::Logger) -> L let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = environment::TaskExecutor::new( + let executor = task_executor::TaskExecutor::new( tokio::runtime::Handle::current(), exit, log.clone(), diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 9510a840d..2150754ea 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -12,16 +12,16 @@ futures = "0.3.5" types = { path = "../../consensus/types"} environment = { path = "../../lighthouse/environment"} eth1 = { path = "../eth1"} -rayon = "1.3.0" +rayon = "1.4.1" state_processing = { path = "../../consensus/state_processing" } merkle_proof = { path = "../../consensus/merkle_proof" } eth2_ssz = "0.1.2" eth2_hashing = "0.1.0" -tree_hash = "0.1.0" +tree_hash = "0.1.1" tokio = { version = "0.2.22", features = ["full"] } parking_lot = "0.11.0" slog = "2.5.2" exit-future = "0.2.0" -serde = "1.0.110" -serde_derive = "1.0.110" +serde = "1.0.116" +serde_derive = "1.0.116" int_to_bytes = { path = "../../consensus/int_to_bytes" } diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml new file mode 100644 index 000000000..af2ebfeca --- /dev/null +++ b/beacon_node/http_api/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "http_api" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +warp = "0.2.5" +serde = { version = "1.0.116", features = ["derive"] } +tokio = { version = "0.2.22", features = ["macros"] } +parking_lot = "0.11.0" +types = { path = "../../consensus/types" } +hex = "0.4.2" +beacon_chain = { path = "../beacon_chain" } +eth2 = { path = "../../common/eth2", features = ["lighthouse"] } +slog = "2.5.2" +network = { path = "../network" } +eth2_libp2p = { path = "../eth2_libp2p" } +eth1 = { path = "../eth1" } +fork_choice = { path = "../../consensus/fork_choice" } +state_processing = { path = "../../consensus/state_processing" } +lighthouse_version = { path = "../../common/lighthouse_version" } +lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +lazy_static = "1.4.0" +warp_utils = { path = "../../common/warp_utils" } +slot_clock = { path = "../../common/slot_clock" } + +[dev-dependencies] +store = { path = "../store" } +environment = { path = "../../lighthouse/environment" } +tree_hash = "0.1.1" +discv5 = { version = "0.1.0-alpha.13", features = ["libp2p"] } diff --git a/beacon_node/http_api/src/beacon_proposer_cache.rs b/beacon_node/http_api/src/beacon_proposer_cache.rs new file mode 100644 index 000000000..b062119e5 --- /dev/null +++ b/beacon_node/http_api/src/beacon_proposer_cache.rs @@ -0,0 +1,185 @@ +use crate::metrics; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::ProposerData; +use fork_choice::ProtoBlock; +use slot_clock::SlotClock; +use state_processing::per_slot_processing; +use types::{BeaconState, Epoch, EthSpec, Hash256, PublicKeyBytes}; + +/// This sets a maximum bound on the number of epochs to skip whilst instantiating the cache for +/// the first time. +const EPOCHS_TO_SKIP: u64 = 2; + +/// Caches the beacon block proposers for a given `epoch` and `epoch_boundary_root`. +/// +/// This cache is only able to contain a single set of proposers and is only +/// intended to cache the proposers for the current epoch according to the head +/// of the chain. A change in epoch or re-org to a different chain may cause a +/// cache miss and rebuild. +pub struct BeaconProposerCache { + epoch: Epoch, + decision_block_root: Hash256, + proposers: Vec, +} + +impl BeaconProposerCache { + /// Create a new cache for the current epoch of the `chain`. + pub fn new(chain: &BeaconChain) -> Result { + let head_root = chain.head_beacon_block_root()?; + let head_block = chain + .fork_choice + .read() + .get_block(&head_root) + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_root))?; + + // If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at + // the epoch of the head. This prevents doing a massive amount of skip slots when starting + // a new database from genesis. + let epoch = { + let epoch_now = chain + .epoch() + .unwrap_or_else(|_| chain.spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch())); + let head_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch()); + if epoch_now > head_epoch + EPOCHS_TO_SKIP { + head_epoch + } else { + epoch_now + } + }; + + Self::for_head_block(chain, epoch, head_root, head_block) + } + + /// Create a new cache that contains the shuffling for `current_epoch`, + /// assuming that `head_root` and `head_block` represents the most recent + /// canonical block. + fn for_head_block( + chain: &BeaconChain, + current_epoch: Epoch, + head_root: Hash256, + head_block: ProtoBlock, + ) -> Result { + let _timer = metrics::start_timer(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_TIMES); + + let mut head_state = chain + .get_state(&head_block.state_root, Some(head_block.slot))? + .ok_or_else(|| BeaconChainError::MissingBeaconState(head_block.state_root))?; + + let decision_block_root = Self::decision_block_root(current_epoch, head_root, &head_state)?; + + // We *must* skip forward to the current epoch to obtain valid proposer + // duties. We cannot skip to the previous epoch, like we do with + // attester duties. + while head_state.current_epoch() < current_epoch { + // Skip slots until the current epoch, providing `Hash256::zero()` as the state root + // since we don't require it to be valid to identify producers. + per_slot_processing(&mut head_state, Some(Hash256::zero()), &chain.spec)?; + } + + let proposers = current_epoch + .slot_iter(T::EthSpec::slots_per_epoch()) + .map(|slot| { + head_state + .get_beacon_proposer_index(slot, &chain.spec) + .map_err(BeaconChainError::from) + .and_then(|i| { + let pubkey = chain + .validator_pubkey(i)? + .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?; + + Ok(ProposerData { + pubkey: PublicKeyBytes::from(pubkey), + slot, + }) + }) + }) + .collect::>()?; + + Ok(Self { + epoch: current_epoch, + decision_block_root, + proposers, + }) + } + + /// Returns a block root which can be used to key the shuffling obtained from the following + /// parameters: + /// + /// - `shuffling_epoch`: the epoch for which the shuffling pertains. + /// - `head_block_root`: the block root at the head of the chain. + /// - `head_block_state`: the state of `head_block_root`. + pub fn decision_block_root( + shuffling_epoch: Epoch, + head_block_root: Hash256, + head_block_state: &BeaconState, + ) -> Result { + let decision_slot = shuffling_epoch + .start_slot(E::slots_per_epoch()) + .saturating_sub(1_u64); + + // If decision slot is equal to or ahead of the head, the block root is the head block root + if decision_slot >= head_block_state.slot { + Ok(head_block_root) + } else { + head_block_state + .get_block_root(decision_slot) + .map(|root| *root) + .map_err(Into::into) + } + } + + /// Return the proposers for the given `Epoch`. + /// + /// The cache may be rebuilt if: + /// + /// - The epoch has changed since the last cache build. + /// - There has been a re-org that crosses an epoch boundary. + pub fn get_proposers( + &mut self, + chain: &BeaconChain, + epoch: Epoch, + ) -> Result, warp::Rejection> { + let current_epoch = chain + .slot_clock + .now_or_genesis() + .ok_or_else(|| { + warp_utils::reject::custom_server_error("unable to read slot clock".to_string()) + })? + .epoch(T::EthSpec::slots_per_epoch()); + + // Disallow requests that are outside the current epoch. This ensures the cache doesn't get + // washed-out with old values. + if current_epoch != epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "requested epoch is {} but only current epoch {} is allowed", + epoch, current_epoch + ))); + } + + let (head_block_root, head_decision_block_root) = chain + .with_head(|head| { + Self::decision_block_root(current_epoch, head.beacon_block_root, &head.beacon_state) + .map(|decision_root| (head.beacon_block_root, decision_root)) + }) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let head_block = chain + .fork_choice + .read() + .get_block(&head_block_root) + .ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_block_root)) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Rebuild the cache if this call causes a cache-miss. + if self.epoch != current_epoch || self.decision_block_root != head_decision_block_root { + metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL); + + *self = Self::for_head_block(chain, current_epoch, head_block_root, head_block) + .map_err(warp_utils::reject::beacon_chain_error)?; + } else { + metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL); + } + + Ok(self.proposers.clone()) + } +} diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs new file mode 100644 index 000000000..5e358a2d6 --- /dev/null +++ b/beacon_node/http_api/src/block_id.rs @@ -0,0 +1,87 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::BlockId as CoreBlockId; +use std::str::FromStr; +use types::{Hash256, SignedBeaconBlock, Slot}; + +/// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given +/// `BlockId`. +#[derive(Debug)] +pub struct BlockId(pub CoreBlockId); + +impl BlockId { + pub fn from_slot(slot: Slot) -> Self { + Self(CoreBlockId::Slot(slot)) + } + + pub fn from_root(root: Hash256) -> Self { + Self(CoreBlockId::Root(root)) + } + + /// Return the block root identified by `self`. + pub fn root( + &self, + chain: &BeaconChain, + ) -> Result { + match &self.0 { + CoreBlockId::Head => chain + .head_info() + .map(|head| head.block_root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Genesis => Ok(chain.genesis_block_root), + CoreBlockId::Finalized => chain + .head_info() + .map(|head| head.finalized_checkpoint.root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Justified => chain + .head_info() + .map(|head| head.current_justified_checkpoint.root) + .map_err(warp_utils::reject::beacon_chain_error), + CoreBlockId::Slot(slot) => chain + .block_root_at_slot(*slot) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block at slot {}", + slot + )) + }) + }), + CoreBlockId::Root(root) => Ok(*root), + } + } + + /// Return the `SignedBeaconBlock` identified by `self`. + pub fn block( + &self, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + match &self.0 { + CoreBlockId::Head => chain + .head_beacon_block() + .map_err(warp_utils::reject::beacon_chain_error), + _ => { + let root = self.root(chain)?; + chain + .get_block(&root) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|root_opt| { + root_opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon block with root {}", + root + )) + }) + }) + } + } + } +} + +impl FromStr for BlockId { + type Err = String; + + fn from_str(s: &str) -> Result { + CoreBlockId::from_str(s).map(Self) + } +} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs new file mode 100644 index 000000000..1b52cbd2c --- /dev/null +++ b/beacon_node/http_api/src/lib.rs @@ -0,0 +1,1730 @@ +//! This crate contains a HTTP server which serves the endpoints listed here: +//! +//! https://github.com/ethereum/eth2.0-APIs +//! +//! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are +//! used for development. + +mod beacon_proposer_cache; +mod block_id; +mod metrics; +mod state_id; +mod validator_inclusion; + +use beacon_chain::{ + observed_operations::ObservationOutcome, AttestationError as AttnError, BeaconChain, + BeaconChainError, BeaconChainTypes, +}; +use beacon_proposer_cache::BeaconProposerCache; +use block_id::BlockId; +use eth2::{ + types::{self as api_types, ValidatorId}, + StatusCode, +}; +use eth2_libp2p::{types::SyncState, NetworkGlobals, PubsubMessage}; +use lighthouse_version::version_with_platform; +use network::NetworkMessage; +use parking_lot::Mutex; +use serde::{Deserialize, Serialize}; +use slog::{crit, error, info, trace, warn, Logger}; +use slot_clock::SlotClock; +use state_id::StateId; +use state_processing::per_slot_processing; +use std::borrow::Cow; +use std::convert::TryInto; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use types::{ + Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec, + Hash256, ProposerSlashing, PublicKey, RelativeEpoch, SignedAggregateAndProof, + SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, +}; +use warp::Filter; +use warp_utils::task::{blocking_json_task, blocking_task}; + +const API_PREFIX: &str = "eth"; +const API_VERSION: &str = "v1"; + +/// If the node is within this many epochs from the head, we declare it to be synced regardless of +/// the network sync state. +/// +/// This helps prevent attacks where nodes can convince us that we're syncing some non-existent +/// finalized head. +const SYNC_TOLERANCE_EPOCHS: u64 = 8; + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context { + pub config: Config, + pub chain: Option>>, + pub network_tx: Option>>, + pub network_globals: Option>>, + pub log: Logger, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + pub listen_addr: Ipv4Addr, + pub listen_port: u16, + pub allow_origin: Option, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 5052, + allow_origin: None, + } + } +} + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// Creates a `warp` logging wrapper which we use to create `slog` logs. +pub fn slog_logging( + log: Logger, +) -> warp::filters::log::Log { + warp::log::custom(move |info| { + match info.status() { + status if status == StatusCode::OK || status == StatusCode::NOT_FOUND => { + trace!( + log, + "Processed HTTP API request"; + "elapsed" => format!("{:?}", info.elapsed()), + "status" => status.to_string(), + "path" => info.path(), + "method" => info.method().to_string(), + ); + } + status => { + warn!( + log, + "Error processing HTTP API request"; + "elapsed" => format!("{:?}", info.elapsed()), + "status" => status.to_string(), + "path" => info.path(), + "method" => info.method().to_string(), + ); + } + }; + }) +} + +/// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging, +/// per say). +pub fn prometheus_metrics() -> warp::filters::log::Log { + warp::log::custom(move |info| { + // Here we restrict the `info.path()` value to some predefined values. Without this, we end + // up with a new metric type each time someone includes something unique in the path (e.g., + // a block hash). + let path = { + let equals = |s: &'static str| -> Option<&'static str> { + if info.path() == format!("/{}/{}/{}", API_PREFIX, API_VERSION, s) { + Some(s) + } else { + None + } + }; + + let starts_with = |s: &'static str| -> Option<&'static str> { + if info + .path() + .starts_with(&format!("/{}/{}/{}", API_PREFIX, API_VERSION, s)) + { + Some(s) + } else { + None + } + }; + + equals("beacon/blocks") + .or_else(|| starts_with("validator/duties/attester")) + .or_else(|| starts_with("validator/duties/proposer")) + .or_else(|| starts_with("validator/attestation_data")) + .or_else(|| starts_with("validator/blocks")) + .or_else(|| starts_with("validator/aggregate_attestation")) + .or_else(|| starts_with("validator/aggregate_and_proofs")) + .or_else(|| starts_with("validator/beacon_committee_subscriptions")) + .or_else(|| starts_with("beacon/")) + .or_else(|| starts_with("config/")) + .or_else(|| starts_with("debug/")) + .or_else(|| starts_with("events/")) + .or_else(|| starts_with("node/")) + .or_else(|| starts_with("validator/")) + .unwrap_or("other") + }; + + metrics::inc_counter_vec(&metrics::HTTP_API_PATHS_TOTAL, &[path]); + metrics::inc_counter_vec( + &metrics::HTTP_API_STATUS_CODES_TOTAL, + &[&info.status().to_string()], + ); + metrics::observe_timer_vec(&metrics::HTTP_API_PATHS_TIMES, &[path], info.elapsed()); + }) +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve( + ctx: Arc>, + shutdown: impl Future + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future), Error> { + let config = ctx.config.clone(); + let log = ctx.log.clone(); + let allow_origin = config.allow_origin.clone(); + + // Sanity check. + if !config.enabled { + crit!(log, "Cannot start disabled HTTP server"); + return Err(Error::Other( + "A disabled server should not be started".to_string(), + )); + } + + let eth1_v1 = warp::path(API_PREFIX).and(warp::path(API_VERSION)); + + // Instantiate the beacon proposer cache. + let beacon_proposer_cache = ctx + .chain + .as_ref() + .map(|chain| BeaconProposerCache::new(&chain)) + .transpose() + .map_err(|e| format!("Unable to initialize beacon proposer cache: {:?}", e))? + .map(Mutex::new) + .map(Arc::new); + + // Create a `warp` filter that provides access to the proposer cache. + let beacon_proposer_cache = || { + warp::any() + .map(move || beacon_proposer_cache.clone()) + .and_then(|beacon_proposer_cache| async move { + match beacon_proposer_cache { + Some(cache) => Ok(cache), + None => Err(warp_utils::reject::custom_not_found( + "Beacon proposer cache is not initialized.".to_string(), + )), + } + }) + }; + + // Create a `warp` filter that provides access to the network globals. + let inner_network_globals = ctx.network_globals.clone(); + let network_globals = warp::any() + .map(move || inner_network_globals.clone()) + .and_then(|network_globals| async move { + match network_globals { + Some(globals) => Ok(globals), + None => Err(warp_utils::reject::custom_not_found( + "network globals are not initialized.".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the beacon chain. + let inner_ctx = ctx.clone(); + let chain_filter = + warp::any() + .map(move || inner_ctx.chain.clone()) + .and_then(|chain| async move { + match chain { + Some(chain) => Ok(chain), + None => Err(warp_utils::reject::custom_not_found( + "Beacon chain genesis has not yet been observed.".to_string(), + )), + } + }); + + // Create a `warp` filter that provides access to the network sender channel. + let inner_ctx = ctx.clone(); + let network_tx_filter = warp::any() + .map(move || inner_ctx.network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started.".to_string(), + )), + } + }); + + // Create a `warp` filter that rejects request whilst the node is syncing. + let not_while_syncing_filter = warp::any() + .and(network_globals.clone()) + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc>, chain: Arc>| async move { + match *network_globals.sync_state.read() { + SyncState::SyncingFinalized { head_slot, .. } => { + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ) + })?; + + let tolerance = SYNC_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch(); + + if head_slot + tolerance >= current_slot { + Ok(()) + } else { + Err(warp_utils::reject::not_synced(format!( + "head slot is {}, current slot is {}", + head_slot, current_slot + ))) + } + } + SyncState::SyncingHead { .. } => Ok(()), + SyncState::Synced => Ok(()), + SyncState::Stalled => Err(warp_utils::reject::not_synced( + "sync is stalled".to_string(), + )), + } + }, + ) + .untuple_one(); + + // Create a `warp` filter that provides access to the logger. + let log_filter = warp::any().map(move || ctx.log.clone()); + + /* + * + * Start of HTTP method definitions. + * + */ + + // GET beacon/genesis + let get_beacon_genesis = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("genesis")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + chain + .head_info() + .map_err(warp_utils::reject::beacon_chain_error) + .map(|head| api_types::GenesisData { + genesis_time: head.genesis_time, + genesis_validators_root: head.genesis_validators_root, + genesis_fork_version: chain.spec.genesis_fork_version, + }) + .map(api_types::GenericResponse::from) + }) + }); + + /* + * beacon/states/{state_id} + */ + + let beacon_states_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::()) + .and(chain_filter.clone()); + + // GET beacon/states/{state_id}/root + let get_beacon_state_root = beacon_states_path + .clone() + .and(warp::path("root")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || { + state_id + .root(&chain) + .map(api_types::RootData::from) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/fork + let get_beacon_state_fork = beacon_states_path + .clone() + .and(warp::path("fork")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || state_id.fork(&chain).map(api_types::GenericResponse::from)) + }); + + // GET beacon/states/{state_id}/finality_checkpoints + let get_beacon_state_finality_checkpoints = beacon_states_path + .clone() + .and(warp::path("finality_checkpoints")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + Ok(api_types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint, + current_justified: state.current_justified_checkpoint, + finalized: state.finalized_checkpoint, + }) + }) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/validators + let get_beacon_state_validators = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = chain.spec.far_future_epoch; + + Ok(state + .validators + .iter() + .zip(state.balances.iter()) + .enumerate() + .map(|(index, (validator, balance))| api_types::ValidatorData { + index: index as u64, + balance: *balance, + status: api_types::ValidatorStatus::from_validator( + Some(validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + .collect::>()) + }) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/states/{state_id}/validators/{validator_id} + let get_beacon_state_validators_id = beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { + blocking_json_task(move || { + state_id + .map_state(&chain, |state| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => { + state.validators.iter().position(|v| v.pubkey == *pubkey) + } + ValidatorId::Index(index) => Some(*index as usize), + }; + + index_opt + .and_then(|index| { + let validator = state.validators.get(index)?; + let balance = *state.balances.get(index)?; + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = chain.spec.far_future_epoch; + + Some(api_types::ValidatorData { + index: index as u64, + balance, + status: api_types::ValidatorStatus::from_validator( + Some(validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + }) + }) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET beacon/states/{state_id}/committees/{epoch} + let get_beacon_state_committees = beacon_states_path + .clone() + .and(warp::path("committees")) + .and(warp::path::param::()) + .and(warp::query::()) + .and(warp::path::end()) + .and_then( + |state_id: StateId, + chain: Arc>, + epoch: Epoch, + query: api_types::CommitteesQuery| { + blocking_json_task(move || { + state_id.map_state(&chain, |state| { + let relative_epoch = + RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err( + |_| { + warp_utils::reject::custom_bad_request(format!( + "state is epoch {} and only previous, current and next epochs are supported", + state.current_epoch() + )) + }, + )?; + + let committee_cache = if state + .committee_cache_is_initialized(relative_epoch) + { + state.committee_cache(relative_epoch).map(Cow::Borrowed) + } else { + CommitteeCache::initialized(state, epoch, &chain.spec).map(Cow::Owned) + } + .map_err(BeaconChainError::BeaconStateError) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Use either the supplied slot or all slots in the epoch. + let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "{} is not in epoch {}", + slot, epoch + ))); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(api_types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } + + Ok(api_types::GenericResponse::from(response)) + }) + }) + }, + ); + + // GET beacon/headers + // + // Note: this endpoint only returns information about blocks in the canonical chain. Given that + // there's a `canonical` flag on the response, I assume it should also return non-canonical + // things. Returning non-canonical things is hard for us since we don't already have a + // mechanism for arbitrary forwards block iteration, we only support iterating forwards along + // the canonical chain. + let get_beacon_headers = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::query::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::HeadersQuery, chain: Arc>| { + blocking_json_task(move || { + let (root, block) = match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => chain + .head_beacon_block() + .map_err(warp_utils::reject::beacon_chain_error) + .map(|block| (block.canonical_root(), block))?, + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let parent = BlockId::from_root(parent_root).block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().map_or(false, |(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; + + BlockId::from_root(root) + .block(&chain) + .map(|block| (root, block))? + } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let root = BlockId::from_slot(slot).root(&chain)?; + let block = BlockId::from_root(root).block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt { + if block.parent_root() != parent_root { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + } + + (root, block) + } + }; + + let data = api_types::BlockHeaderData { + root, + canonical: true, + header: api_types::BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + + Ok(api_types::GenericResponse::from(vec![data])) + }) + }, + ); + + // GET beacon/headers/{block_id} + let get_beacon_headers_block_id = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|block_id: BlockId, chain: Arc>| { + blocking_json_task(move || { + let root = block_id.root(&chain)?; + let block = BlockId::from_root(root).block(&chain)?; + + let canonical = chain + .block_root_at_slot(block.slot()) + .map_err(warp_utils::reject::beacon_chain_error)? + .map_or(false, |canonical| root == canonical); + + let data = api_types::BlockHeaderData { + root, + canonical, + header: api_types::BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + + Ok(api_types::GenericResponse::from(data)) + }) + }); + + /* + * beacon/blocks + */ + + // POST beacon/blocks/{block_id} + let post_beacon_blocks = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .and(log_filter.clone()) + .and_then( + |block: SignedBeaconBlock, + chain: Arc>, + network_tx: UnboundedSender>, + log: Logger| { + blocking_json_task(move || { + // Send the block, regardless of whether or not it is valid. The API + // specification is very clear that this is the desired behaviour. + publish_pubsub_message( + &network_tx, + PubsubMessage::BeaconBlock(Box::new(block.clone())), + )?; + + match chain.process_block(block.clone()) { + Ok(root) => { + info!( + log, + "Valid block from HTTP API"; + "root" => format!("{}", root) + ); + + // Update the head since it's likely this block will become the new + // head. + chain + .fork_choice() + .map_err(warp_utils::reject::beacon_chain_error)?; + + Ok(()) + } + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + Err(warp_utils::reject::broadcast_without_import(msg)) + } + } + }) + }, + ); + + let beacon_blocks_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::path::param::()) + .and(chain_filter.clone()); + + // GET beacon/blocks/{block_id} + let get_beacon_block = beacon_blocks_path.clone().and(warp::path::end()).and_then( + |block_id: BlockId, chain: Arc>| { + blocking_json_task(move || block_id.block(&chain).map(api_types::GenericResponse::from)) + }, + ); + + // GET beacon/blocks/{block_id}/root + let get_beacon_block_root = beacon_blocks_path + .clone() + .and(warp::path("root")) + .and(warp::path::end()) + .and_then(|block_id: BlockId, chain: Arc>| { + blocking_json_task(move || { + block_id + .root(&chain) + .map(api_types::RootData::from) + .map(api_types::GenericResponse::from) + }) + }); + + // GET beacon/blocks/{block_id}/attestations + let get_beacon_block_attestations = beacon_blocks_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and_then(|block_id: BlockId, chain: Arc>| { + blocking_json_task(move || { + block_id + .block(&chain) + .map(|block| block.message.body.attestations) + .map(api_types::GenericResponse::from) + }) + }); + + /* + * beacon/pool + */ + + let beacon_pool_path = eth1_v1 + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(chain_filter.clone()); + + // POST beacon/pool/attestations + let post_beacon_pool_attestations = beacon_pool_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + attestation: Attestation, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let attestation = chain + .verify_unaggregated_attestation_for_gossip(attestation.clone(), None) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + publish_pubsub_message( + &network_tx, + PubsubMessage::Attestation(Box::new(( + attestation.subnet_id(), + attestation.attestation().clone(), + ))), + )?; + + chain + .apply_attestation_to_fork_choice(&attestation) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to fork choice: {:?}", + e + )) + })?; + + chain + .add_to_naive_aggregation_pool(attestation) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to naive aggregation pool: {:?}", + e + )) + })?; + + Ok(()) + }) + }, + ); + + // GET beacon/pool/attestations + let get_beacon_pool_attestations = beacon_pool_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let mut attestations = chain.op_pool.get_all_attestations(); + attestations.extend(chain.naive_aggregation_pool.read().iter().cloned()); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/attester_slashings + let post_beacon_pool_attester_slashings = beacon_pool_path + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + slashing: AttesterSlashing, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let outcome = chain + .verify_attester_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(slashing) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::AttesterSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain + .import_attester_slashing(slashing) + .map_err(warp_utils::reject::beacon_chain_error)?; + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/attester_slashings + let get_beacon_pool_attester_slashings = beacon_pool_path + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_attester_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/proposer_slashings + let post_beacon_pool_proposer_slashings = beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + slashing: ProposerSlashing, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let outcome = chain + .verify_proposer_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(slashing) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::ProposerSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_proposer_slashing(slashing); + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/proposer_slashings + let get_beacon_pool_proposer_slashings = beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_proposer_slashings(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + // POST beacon/pool/voluntary_exits + let post_beacon_pool_voluntary_exits = beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + exit: SignedVoluntaryExit, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let outcome = chain + .verify_voluntary_exit_for_gossip(exit.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + if let ObservationOutcome::New(exit) = outcome { + publish_pubsub_message( + &network_tx, + PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), + )?; + + chain.import_voluntary_exit(exit); + } + + Ok(()) + }) + }, + ); + + // GET beacon/pool/voluntary_exits + let get_beacon_pool_voluntary_exits = beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let attestations = chain.op_pool.get_all_voluntary_exits(); + Ok(api_types::GenericResponse::from(attestations)) + }) + }); + + /* + * config/fork_schedule + */ + + let config_path = eth1_v1.and(warp::path("config")); + + // GET config/fork_schedule + let get_config_fork_schedule = config_path + .clone() + .and(warp::path("fork_schedule")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + StateId::head() + .fork(&chain) + .map(|fork| api_types::GenericResponse::from(vec![fork])) + }) + }); + + // GET config/spec + let get_config_spec = config_path + .clone() + .and(warp::path("spec")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(YamlConfig::from_spec::< + T::EthSpec, + >( + &chain.spec + ))) + }) + }); + + // GET config/deposit_contract + let get_config_deposit_contract = config_path + .clone() + .and(warp::path("deposit_contract")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + api_types::DepositContractData { + address: chain.spec.deposit_contract_address, + chain_id: eth1::DEFAULT_NETWORK_ID.into(), + }, + )) + }) + }); + + /* + * debug + */ + + // GET debug/beacon/states/{state_id} + let get_debug_beacon_states = eth1_v1 + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|state_id: StateId, chain: Arc>| { + blocking_task(move || { + state_id.map_state(&chain, |state| { + Ok(warp::reply::json(&api_types::GenericResponseRef::from( + &state, + ))) + }) + }) + }); + + // GET debug/beacon/heads + let get_debug_beacon_heads = eth1_v1 + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("heads")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_json_task(move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| api_types::ChainHeadData { root, slot }) + .collect::>(); + Ok(api_types::GenericResponse::from(heads)) + }) + }); + + /* + * node + */ + + // GET node/identity + let get_node_identity = eth1_v1 + .and(warp::path("node")) + .and(warp::path("identity")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(api_types::IdentityData { + peer_id: network_globals.local_peer_id().to_base58(), + enr: network_globals.local_enr(), + p2p_addresses: network_globals.listen_multiaddrs(), + })) + }) + }); + + // GET node/version + let get_node_version = eth1_v1 + .and(warp::path("node")) + .and(warp::path("version")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from(api_types::VersionData { + version: version_with_platform(), + })) + }) + }); + + // GET node/syncing + let get_node_syncing = eth1_v1 + .and(warp::path("node")) + .and(warp::path("syncing")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and(chain_filter.clone()) + .and_then( + |network_globals: Arc>, chain: Arc>| { + blocking_json_task(move || { + let head_slot = chain + .head_info() + .map(|info| info.slot) + .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain + .slot() + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; + + let syncing_data = api_types::SyncingData { + is_syncing: network_globals.sync_state.read().is_syncing(), + head_slot, + sync_distance, + }; + + Ok(api_types::GenericResponse::from(syncing_data)) + }) + }, + ); + + /* + * validator + */ + + // GET validator/duties/attester/{epoch} + let get_validator_duties_attester = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("attester")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, query: api_types::ValidatorDutiesQuery, chain: Arc>| { + blocking_json_task(move || { + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + + if epoch > current_epoch + 1 { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch past the current epoch {}", + epoch, current_epoch + ))); + } + + let validator_count = StateId::head() + .map_state(&chain, |state| Ok(state.validators.len() as u64))?; + + let indices = query + .index + .as_ref() + .map(|index| index.0.clone()) + .map(Result::Ok) + .unwrap_or_else(|| { + Ok::<_, warp::Rejection>((0..validator_count).collect()) + })?; + + let pubkeys = indices + .into_iter() + .filter(|i| *i < validator_count as u64) + .map(|i| { + let pubkey = chain + .validator_pubkey(i as usize) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "unknown validator index {}", + i + )) + })?; + + Ok((i, pubkey)) + }) + .collect::, warp::Rejection>>()?; + + // Converts the internal Lighthouse `AttestationDuty` struct into an + // API-conforming `AttesterData` struct. + let convert = |validator_index: u64, + pubkey: PublicKey, + duty: AttestationDuty| + -> api_types::AttesterData { + api_types::AttesterData { + pubkey: pubkey.into(), + validator_index, + committees_at_slot: duty.committees_at_slot, + committee_index: duty.index, + committee_length: duty.committee_len as u64, + validator_committee_index: duty.committee_position as u64, + slot: duty.slot, + } + }; + + // Here we have two paths: + // + // ## Fast + // + // If the request epoch is the current epoch, use the cached beacon chain + // method. + // + // ## Slow + // + // If the request epoch is prior to the current epoch, load a beacon state from + // disk + // + // The idea is to stop historical requests from washing out the cache on the + // beacon chain, whilst allowing a VC to request duties quickly. + let duties = if epoch == current_epoch { + // Fast path. + pubkeys + .into_iter() + // Exclude indices which do not represent a known public key and a + // validator duty. + .filter_map(|(i, pubkey)| { + Some( + chain + .validator_attestation_duty(i as usize, epoch) + .transpose()? + .map_err(warp_utils::reject::beacon_chain_error) + .map(|duty| convert(i, pubkey, duty)), + ) + }) + .collect::, warp::Rejection>>()? + } else { + // If the head state is equal to or earlier than the request epoch, use it. + let mut state = chain + .with_head(|head| { + if head.beacon_state.current_epoch() <= epoch { + Ok(Some( + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + )) + } else { + Ok(None) + } + }) + .map_err(warp_utils::reject::beacon_chain_error)? + .map(Result::Ok) + .unwrap_or_else(|| { + StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) + .state(&chain) + })?; + + // Only skip forward to the epoch prior to the request, since we have a + // one-epoch look-ahead on shuffling. + while state + .next_epoch() + .map_err(warp_utils::reject::beacon_state_error)? + < epoch + { + // Don't calculate state roots since they aren't required for calculating + // shuffling (achieved by providing Hash256::zero()). + per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec) + .map_err(warp_utils::reject::slot_processing_error)?; + } + + let relative_epoch = + RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err( + |e| { + warp_utils::reject::custom_server_error(format!( + "unable to obtain suitable state: {:?}", + e + )) + }, + )?; + + state + .build_committee_cache(relative_epoch, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + pubkeys + .into_iter() + .filter_map(|(i, pubkey)| { + Some( + state + .get_attestation_duties(i as usize, relative_epoch) + .transpose()? + .map_err(warp_utils::reject::beacon_state_error) + .map(|duty| convert(i, pubkey, duty)), + ) + }) + .collect::, warp::Rejection>>()? + }; + + Ok(api_types::GenericResponse::from(duties)) + }) + }, + ); + + // GET validator/duties/proposer/{epoch} + let get_validator_duties_proposer = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("proposer")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(chain_filter.clone()) + .and(beacon_proposer_cache()) + .and_then( + |epoch: Epoch, + chain: Arc>, + beacon_proposer_cache: Arc>| { + blocking_json_task(move || { + beacon_proposer_cache + .lock() + .get_proposers(&chain, epoch) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET validator/blocks/{slot} + let get_validator_blocks = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("blocks")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::()) + .and(chain_filter.clone()) + .and_then( + |slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc>| { + blocking_json_task(move || { + let randao_reveal = (&query.randao_reveal).try_into().map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "randao reveal is not valid BLS signature: {:?}", + e + )) + })?; + + chain + .produce_block(randao_reveal, slot, query.graffiti.map(Into::into)) + .map(|block_and_state| block_and_state.0) + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::block_production_error) + }) + }, + ); + + // GET validator/attestation_data?slot,committee_index + let get_validator_attestation_data = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("attestation_data")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::ValidatorAttestationDataQuery, chain: Arc>| { + blocking_json_task(move || { + chain + .produce_unaggregated_attestation(query.slot, query.committee_index) + .map(|attestation| attestation.data) + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::beacon_chain_error) + }) + }, + ); + + // GET validator/aggregate_attestation?attestation_data_root,slot + let get_validator_aggregate_attestation = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("aggregate_attestation")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(chain_filter.clone()) + .and_then( + |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc>| { + blocking_json_task(move || { + chain + .get_aggregated_attestation_by_slot_and_root( + query.slot, + &query.attestation_data_root, + ) + .map(api_types::GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching aggregate found".to_string(), + ) + }) + }) + }, + ); + + // POST validator/aggregate_and_proofs + let post_validator_aggregate_and_proofs = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("aggregate_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter) + .and(chain_filter.clone()) + .and(warp::body::json()) + .and(network_tx_filter.clone()) + .and_then( + |chain: Arc>, + aggregate: SignedAggregateAndProof, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + let aggregate = + match chain.verify_aggregated_attestation_for_gossip(aggregate.clone()) { + Ok(aggregate) => aggregate, + // If we already know the attestation, don't broadcast it or attempt to + // further verify it. Return success. + // + // It's reasonably likely that two different validators produce + // identical aggregates, especially if they're using the same beacon + // node. + Err(AttnError::AttestationAlreadyKnown(_)) => return Ok(()), + Err(e) => { + return Err(warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + ))) + } + }; + + publish_pubsub_message( + &network_tx, + PubsubMessage::AggregateAndProofAttestation(Box::new( + aggregate.aggregate().clone(), + )), + )?; + + chain + .apply_attestation_to_fork_choice(&aggregate) + .map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to fork choice: {:?}", + e + )) + })?; + + chain.add_to_block_inclusion_pool(aggregate).map_err(|e| { + warp_utils::reject::broadcast_without_import(format!( + "not applied to block inclusion pool: {:?}", + e + )) + })?; + + Ok(()) + }) + }, + ); + + // POST validator/beacon_committee_subscriptions + let post_validator_beacon_committee_subscriptions = eth1_v1 + .and(warp::path("validator")) + .and(warp::path("beacon_committee_subscriptions")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(network_tx_filter) + .and_then( + |subscriptions: Vec, + network_tx: UnboundedSender>| { + blocking_json_task(move || { + for subscription in &subscriptions { + let subscription = api_types::ValidatorSubscription { + validator_index: subscription.validator_index, + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + }; + + publish_network_message( + &network_tx, + NetworkMessage::Subscribe { + subscriptions: vec![subscription], + }, + )?; + } + + Ok(()) + }) + }, + ); + + // GET lighthouse/health + let get_lighthouse_health = warp::path("lighthouse") + .and(warp::path("health")) + .and(warp::path::end()) + .and_then(|| { + blocking_json_task(move || { + eth2::lighthouse::Health::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + + // GET lighthouse/syncing + let get_lighthouse_syncing = warp::path("lighthouse") + .and(warp::path("syncing")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(api_types::GenericResponse::from( + network_globals.sync_state(), + )) + }) + }); + + // GET lighthouse/peers + let get_lighthouse_peers = warp::path("lighthouse") + .and(warp::path("peers")) + .and(warp::path::end()) + .and(network_globals.clone()) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(network_globals + .peers + .read() + .peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::>()) + }) + }); + + // GET lighthouse/peers/connected + let get_lighthouse_peers_connected = warp::path("lighthouse") + .and(warp::path("peers")) + .and(warp::path("connected")) + .and(warp::path::end()) + .and(network_globals) + .and_then(|network_globals: Arc>| { + blocking_json_task(move || { + Ok(network_globals + .peers + .read() + .connected_peers() + .map(|(peer_id, peer_info)| eth2::lighthouse::Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect::>()) + }) + }); + + // GET lighthouse/proto_array + let get_lighthouse_proto_array = warp::path("lighthouse") + .and(warp::path("proto_array")) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then(|chain: Arc>| { + blocking_task(move || { + Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from( + chain.fork_choice.read().proto_array().core_proto_array(), + ))) + }) + }); + + // GET lighthouse/validator_inclusion/{epoch}/{validator_id} + let get_lighthouse_validator_inclusion_global = warp::path("lighthouse") + .and(warp::path("validator_inclusion")) + .and(warp::path::param::()) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(chain_filter.clone()) + .and_then( + |epoch: Epoch, validator_id: ValidatorId, chain: Arc>| { + blocking_json_task(move || { + validator_inclusion::validator_inclusion_data(epoch, &validator_id, &chain) + .map(api_types::GenericResponse::from) + }) + }, + ); + + // GET lighthouse/validator_inclusion/{epoch}/global + let get_lighthouse_validator_inclusion = warp::path("lighthouse") + .and(warp::path("validator_inclusion")) + .and(warp::path::param::()) + .and(warp::path("global")) + .and(warp::path::end()) + .and(chain_filter) + .and_then(|epoch: Epoch, chain: Arc>| { + blocking_json_task(move || { + validator_inclusion::global_validator_inclusion_data(epoch, &chain) + .map(api_types::GenericResponse::from) + }) + }); + + // Define the ultimate set of routes that will be provided to the server. + let routes = warp::get() + .and( + get_beacon_genesis + .or(get_beacon_state_root.boxed()) + .or(get_beacon_state_fork.boxed()) + .or(get_beacon_state_finality_checkpoints.boxed()) + .or(get_beacon_state_validators.boxed()) + .or(get_beacon_state_validators_id.boxed()) + .or(get_beacon_state_committees.boxed()) + .or(get_beacon_headers.boxed()) + .or(get_beacon_headers_block_id.boxed()) + .or(get_beacon_block.boxed()) + .or(get_beacon_block_attestations.boxed()) + .or(get_beacon_block_root.boxed()) + .or(get_beacon_pool_attestations.boxed()) + .or(get_beacon_pool_attester_slashings.boxed()) + .or(get_beacon_pool_proposer_slashings.boxed()) + .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_config_fork_schedule.boxed()) + .or(get_config_spec.boxed()) + .or(get_config_deposit_contract.boxed()) + .or(get_debug_beacon_states.boxed()) + .or(get_debug_beacon_heads.boxed()) + .or(get_node_identity.boxed()) + .or(get_node_version.boxed()) + .or(get_node_syncing.boxed()) + .or(get_validator_duties_attester.boxed()) + .or(get_validator_duties_proposer.boxed()) + .or(get_validator_blocks.boxed()) + .or(get_validator_attestation_data.boxed()) + .or(get_validator_aggregate_attestation.boxed()) + .or(get_lighthouse_health.boxed()) + .or(get_lighthouse_syncing.boxed()) + .or(get_lighthouse_peers.boxed()) + .or(get_lighthouse_peers_connected.boxed()) + .or(get_lighthouse_proto_array.boxed()) + .or(get_lighthouse_validator_inclusion_global.boxed()) + .or(get_lighthouse_validator_inclusion.boxed()) + .boxed(), + ) + .or(warp::post() + .and( + post_beacon_blocks + .or(post_beacon_pool_attestations.boxed()) + .or(post_beacon_pool_attester_slashings.boxed()) + .or(post_beacon_pool_proposer_slashings.boxed()) + .or(post_beacon_pool_voluntary_exits.boxed()) + .or(post_validator_aggregate_and_proofs.boxed()) + .or(post_validator_beacon_committee_subscriptions.boxed()) + .boxed(), + ) + .boxed()) + .boxed() + // Maps errors into HTTP responses. + .recover(warp_utils::reject::handle_rejection) + .with(slog_logging(log.clone())) + .with(prometheus_metrics()) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + // Maybe add some CORS headers. + .map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref())); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "HTTP API started"; + "listen_address" => listening_socket.to_string(), + ); + + Ok((listening_socket, server)) +} + +/// Publish a message to the libp2p pubsub network. +fn publish_pubsub_message( + network_tx: &UnboundedSender>, + message: PubsubMessage, +) -> Result<(), warp::Rejection> { + publish_network_message( + network_tx, + NetworkMessage::Publish { + messages: vec![message], + }, + ) +} + +/// Publish a message to the libp2p network. +fn publish_network_message( + network_tx: &UnboundedSender>, + message: NetworkMessage, +) -> Result<(), warp::Rejection> { + network_tx.send(message).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to publish to network channel: {}", + e + )) + }) +} diff --git a/beacon_node/http_api/src/metrics.rs b/beacon_node/http_api/src/metrics.rs new file mode 100644 index 000000000..c641df6a4 --- /dev/null +++ b/beacon_node/http_api/src/metrics.rs @@ -0,0 +1,32 @@ +pub use lighthouse_metrics::*; + +lazy_static::lazy_static! { + pub static ref HTTP_API_PATHS_TOTAL: Result = try_create_int_counter_vec( + "http_api_paths_total", + "Count of HTTP requests received", + &["path"] + ); + pub static ref HTTP_API_STATUS_CODES_TOTAL: Result = try_create_int_counter_vec( + "http_api_status_codes_total", + "Count of HTTP status codes returned", + &["status"] + ); + pub static ref HTTP_API_PATHS_TIMES: Result = try_create_histogram_vec( + "http_api_paths_times", + "Duration to process HTTP requests per path", + &["path"] + ); + + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_TIMES: Result = try_create_histogram( + "http_api_beacon_proposer_cache_build_times", + "Duration to process HTTP requests per path", + ); + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL: Result = try_create_int_counter( + "http_api_beacon_proposer_cache_hits_total", + "Count of times the proposer cache has been hit", + ); + pub static ref HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL: Result = try_create_int_counter( + "http_api_beacon_proposer_cache_misses_total", + "Count of times the proposer cache has been missed", + ); +} diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs new file mode 100644 index 000000000..11800648f --- /dev/null +++ b/beacon_node/http_api/src/state_id.rs @@ -0,0 +1,118 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::StateId as CoreStateId; +use std::str::FromStr; +use types::{BeaconState, EthSpec, Fork, Hash256, Slot}; + +/// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading +/// states or parts of states from the database. +pub struct StateId(CoreStateId); + +impl StateId { + pub fn head() -> Self { + Self(CoreStateId::Head) + } + + pub fn slot(slot: Slot) -> Self { + Self(CoreStateId::Slot(slot)) + } + + /// Return the state root identified by `self`. + pub fn root( + &self, + chain: &BeaconChain, + ) -> Result { + let slot = match &self.0 { + CoreStateId::Head => { + return chain + .head_info() + .map(|head| head.state_root) + .map_err(warp_utils::reject::beacon_chain_error) + } + CoreStateId::Genesis => return Ok(chain.genesis_state_root), + CoreStateId::Finalized => chain.head_info().map(|head| { + head.finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }), + CoreStateId::Justified => chain.head_info().map(|head| { + head.current_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }), + CoreStateId::Slot(slot) => Ok(*slot), + CoreStateId::Root(root) => return Ok(*root), + } + .map_err(warp_utils::reject::beacon_chain_error)?; + + chain + .state_root_at_slot(slot) + .map_err(warp_utils::reject::beacon_chain_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) + }) + } + + /// Return the `fork` field of the state identified by `self`. + pub fn fork( + &self, + chain: &BeaconChain, + ) -> Result { + self.map_state(chain, |state| Ok(state.fork)) + } + + /// Return the `BeaconState` identified by `self`. + pub fn state( + &self, + chain: &BeaconChain, + ) -> Result, warp::Rejection> { + let (state_root, slot_opt) = match &self.0 { + CoreStateId::Head => { + return chain + .head_beacon_state() + .map_err(warp_utils::reject::beacon_chain_error) + } + CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)), + _ => (self.root(chain)?, None), + }; + + chain + .get_state(&state_root, slot_opt) + .map_err(warp_utils::reject::beacon_chain_error) + .and_then(|opt| { + opt.ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "beacon state at root {}", + state_root + )) + }) + }) + } + + /// Map a function across the `BeaconState` identified by `self`. + /// + /// This function will avoid instantiating/copying a new state when `self` points to the head + /// of the chain. + pub fn map_state( + &self, + chain: &BeaconChain, + func: F, + ) -> Result + where + F: Fn(&BeaconState) -> Result, + { + match &self.0 { + CoreStateId::Head => chain + .with_head(|snapshot| Ok(func(&snapshot.beacon_state))) + .map_err(warp_utils::reject::beacon_chain_error)?, + _ => func(&self.state(chain)?), + } + } +} + +impl FromStr for StateId { + type Err = String; + + fn from_str(s: &str) -> Result { + CoreStateId::from_str(s).map(Self) + } +} diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs new file mode 100644 index 000000000..90847dd6b --- /dev/null +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -0,0 +1,88 @@ +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::{ + lighthouse::{GlobalValidatorInclusionData, ValidatorInclusionData}, + types::ValidatorId, +}; +use state_processing::per_epoch_processing::ValidatorStatuses; +use types::{Epoch, EthSpec}; + +/// Returns information about *all validators* (i.e., global) and how they performed during a given +/// epoch. +pub fn global_validator_inclusion_data( + epoch: Epoch, + chain: &BeaconChain, +) -> Result { + let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); + + let state = StateId::slot(target_slot).state(chain)?; + + let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + validator_statuses + .process_attestations(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + + let totals = validator_statuses.total_balances; + + Ok(GlobalValidatorInclusionData { + current_epoch_active_gwei: totals.current_epoch(), + previous_epoch_active_gwei: totals.previous_epoch(), + current_epoch_attesting_gwei: totals.current_epoch_attesters(), + current_epoch_target_attesting_gwei: totals.current_epoch_target_attesters(), + previous_epoch_attesting_gwei: totals.previous_epoch_attesters(), + previous_epoch_target_attesting_gwei: totals.previous_epoch_target_attesters(), + previous_epoch_head_attesting_gwei: totals.previous_epoch_head_attesters(), + }) +} + +/// Returns information about a single validator and how it performed during a given epoch. +pub fn validator_inclusion_data( + epoch: Epoch, + validator_id: &ValidatorId, + chain: &BeaconChain, +) -> Result, warp::Rejection> { + let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch()); + + let mut state = StateId::slot(target_slot).state(chain)?; + + let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + validator_statuses + .process_attestations(&state, &chain.spec) + .map_err(warp_utils::reject::beacon_state_error)?; + + state + .update_pubkey_cache() + .map_err(warp_utils::reject::beacon_state_error)?; + + let validator_index = match validator_id { + ValidatorId::Index(index) => *index as usize, + ValidatorId::PublicKey(pubkey) => { + if let Some(index) = state + .get_validator_index(pubkey) + .map_err(warp_utils::reject::beacon_state_error)? + { + index + } else { + return Ok(None); + } + } + }; + + Ok(validator_statuses + .statuses + .get(validator_index) + .map(|vote| ValidatorInclusionData { + is_slashed: vote.is_slashed, + is_withdrawable_in_current_epoch: vote.is_withdrawable_in_current_epoch, + is_active_in_current_epoch: vote.is_active_in_current_epoch, + is_active_in_previous_epoch: vote.is_active_in_previous_epoch, + current_epoch_effective_balance_gwei: vote.current_epoch_effective_balance, + is_current_epoch_attester: vote.is_current_epoch_attester, + is_current_epoch_target_attester: vote.is_current_epoch_target_attester, + is_previous_epoch_attester: vote.is_previous_epoch_attester, + is_previous_epoch_target_attester: vote.is_previous_epoch_target_attester, + is_previous_epoch_head_attester: vote.is_previous_epoch_head_attester, + })) +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs new file mode 100644 index 000000000..2a7e8f6d4 --- /dev/null +++ b/beacon_node/http_api/tests/tests.rs @@ -0,0 +1,1786 @@ +use beacon_chain::{ + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, + BlockingMigratorEphemeralHarnessType, + }, + BeaconChain, StateSkipConfig, +}; +use discv5::enr::{CombinedKey, EnrBuilder}; +use environment::null_logger; +use eth2::{types::*, BeaconNodeHttpClient, Url}; +use eth2_libp2p::{ + rpc::methods::MetaData, + types::{EnrBitfield, SyncState}, + NetworkGlobals, +}; +use http_api::{Config, Context}; +use network::NetworkMessage; +use state_processing::per_slot_processing; +use std::convert::TryInto; +use std::net::Ipv4Addr; +use std::sync::Arc; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tree_hash::TreeHash; +use types::{ + test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain, + EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, +}; + +type E = MainnetEthSpec; + +const SLOTS_PER_EPOCH: u64 = 32; +const VALIDATOR_COUNT: usize = SLOTS_PER_EPOCH as usize; +const CHAIN_LENGTH: u64 = SLOTS_PER_EPOCH * 5; +const JUSTIFIED_EPOCH: u64 = 4; +const FINALIZED_EPOCH: u64 = 3; + +/// Skipping the slots around the epoch boundary allows us to check that we're obtaining states +/// from skipped slots for the finalized and justified checkpoints (instead of the state from the +/// block that those roots point to). +const SKIPPED_SLOTS: &[u64] = &[ + JUSTIFIED_EPOCH * SLOTS_PER_EPOCH - 1, + JUSTIFIED_EPOCH * SLOTS_PER_EPOCH, + FINALIZED_EPOCH * SLOTS_PER_EPOCH - 1, + FINALIZED_EPOCH * SLOTS_PER_EPOCH, +]; + +struct ApiTester { + chain: Arc>>, + client: BeaconNodeHttpClient, + next_block: SignedBeaconBlock, + attestations: Vec>, + attester_slashing: AttesterSlashing, + proposer_slashing: ProposerSlashing, + voluntary_exit: SignedVoluntaryExit, + _server_shutdown: oneshot::Sender<()>, + validator_keypairs: Vec, + network_rx: mpsc::UnboundedReceiver>, +} + +impl ApiTester { + pub fn new() -> Self { + let mut harness = BeaconChainHarness::new( + MainnetEthSpec, + generate_deterministic_keypairs(VALIDATOR_COUNT), + ); + + harness.advance_slot(); + + for _ in 0..CHAIN_LENGTH { + let slot = harness.chain.slot().unwrap().as_u64(); + + if !SKIPPED_SLOTS.contains(&slot) { + harness.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + } + + harness.advance_slot(); + } + + let head = harness.chain.head().unwrap(); + + assert_eq!( + harness.chain.slot().unwrap(), + head.beacon_block.slot() + 1, + "precondition: current slot is one after head" + ); + + let (next_block, _next_state) = + harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + + let attestations = harness + .get_unaggregated_attestations( + &AttestationStrategy::AllValidators, + &head.beacon_state, + head.beacon_block_root, + harness.chain.slot().unwrap(), + ) + .into_iter() + .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) + .flatten() + .collect::>(); + + assert!( + !attestations.is_empty(), + "precondition: attestations for testing" + ); + + let attester_slashing = harness.make_attester_slashing(vec![0, 1]); + let proposer_slashing = harness.make_proposer_slashing(2); + let voluntary_exit = harness.make_voluntary_exit(3, harness.chain.epoch().unwrap()); + + // Changing this *after* the chain has been initialized is a bit cheeky, but it shouldn't + // cause issue. + // + // This allows for testing voluntary exits without building out a massive chain. + harness.chain.spec.shard_committee_period = 2; + + let chain = Arc::new(harness.chain); + + assert_eq!( + chain.head_info().unwrap().finalized_checkpoint.epoch, + 3, + "precondition: finality" + ); + assert_eq!( + chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch, + 4, + "precondition: justification" + ); + + let (network_tx, network_rx) = mpsc::unbounded_channel(); + + let log = null_logger().unwrap(); + + // Default metadata + let meta_data = MetaData { + seq_number: 0, + attnets: EnrBitfield::::default(), + }; + let enr_key = CombinedKey::generate_secp256k1(); + let enr = EnrBuilder::new("v4").build(&enr_key).unwrap(); + let network_globals = NetworkGlobals::new(enr, 42, 42, meta_data, vec![], &log); + + *network_globals.sync_state.write() = SyncState::Synced; + + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + chain: Some(chain.clone()), + network_tx: Some(network_tx), + network_globals: Some(Arc::new(network_globals)), + log, + }); + let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(async { server.await }); + + let client = BeaconNodeHttpClient::new( + Url::parse(&format!( + "http://{}:{}", + listening_socket.ip(), + listening_socket.port() + )) + .unwrap(), + ); + + Self { + chain, + client, + next_block, + attestations, + attester_slashing, + proposer_slashing, + voluntary_exit, + _server_shutdown: shutdown_tx, + validator_keypairs: harness.validators_keypairs, + network_rx, + } + } + + fn skip_slots(self, count: u64) -> Self { + for _ in 0..count { + self.chain + .slot_clock + .set_slot(self.chain.slot().unwrap().as_u64() + 1); + } + + self + } + + fn interesting_state_ids(&self) -> Vec { + let mut ids = vec![ + StateId::Head, + StateId::Genesis, + StateId::Finalized, + StateId::Justified, + StateId::Slot(Slot::new(0)), + StateId::Slot(Slot::new(32)), + StateId::Slot(Slot::from(SKIPPED_SLOTS[0])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[1])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[2])), + StateId::Slot(Slot::from(SKIPPED_SLOTS[3])), + StateId::Root(Hash256::zero()), + ]; + ids.push(StateId::Root(self.chain.head_info().unwrap().state_root)); + ids + } + + fn interesting_block_ids(&self) -> Vec { + let mut ids = vec![ + BlockId::Head, + BlockId::Genesis, + BlockId::Finalized, + BlockId::Justified, + BlockId::Slot(Slot::new(0)), + BlockId::Slot(Slot::new(32)), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[0])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[1])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[2])), + BlockId::Slot(Slot::from(SKIPPED_SLOTS[3])), + BlockId::Root(Hash256::zero()), + ]; + ids.push(BlockId::Root(self.chain.head_info().unwrap().block_root)); + ids + } + + fn get_state(&self, state_id: StateId) -> Option> { + match state_id { + StateId::Head => Some(self.chain.head().unwrap().beacon_state), + StateId::Genesis => self + .chain + .get_state(&self.chain.genesis_state_root, None) + .unwrap(), + StateId::Finalized => { + let finalized_slot = self + .chain + .head_info() + .unwrap() + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let root = self + .chain + .state_root_at_slot(finalized_slot) + .unwrap() + .unwrap(); + + self.chain.get_state(&root, Some(finalized_slot)).unwrap() + } + StateId::Justified => { + let justified_slot = self + .chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + let root = self + .chain + .state_root_at_slot(justified_slot) + .unwrap() + .unwrap(); + + self.chain.get_state(&root, Some(justified_slot)).unwrap() + } + StateId::Slot(slot) => { + let root = self.chain.state_root_at_slot(slot).unwrap().unwrap(); + + self.chain.get_state(&root, Some(slot)).unwrap() + } + StateId::Root(root) => self.chain.get_state(&root, None).unwrap(), + } + } + + pub async fn test_beacon_genesis(self) -> Self { + let result = self.client.get_beacon_genesis().await.unwrap().data; + + let state = self.chain.head().unwrap().beacon_state; + let expected = GenesisData { + genesis_time: state.genesis_time, + genesis_validators_root: state.genesis_validators_root, + genesis_fork_version: self.chain.spec.genesis_fork_version, + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_beacon_states_root(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_root(state_id) + .await + .unwrap() + .map(|res| res.data.root); + + let expected = match state_id { + StateId::Head => Some(self.chain.head_info().unwrap().state_root), + StateId::Genesis => Some(self.chain.genesis_state_root), + StateId::Finalized => { + let finalized_slot = self + .chain + .head_info() + .unwrap() + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + self.chain.state_root_at_slot(finalized_slot).unwrap() + } + StateId::Justified => { + let justified_slot = self + .chain + .head_info() + .unwrap() + .current_justified_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + + self.chain.state_root_at_slot(justified_slot).unwrap() + } + StateId::Slot(slot) => self.chain.state_root_at_slot(slot).unwrap(), + StateId::Root(root) => Some(root), + }; + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_fork(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_fork(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_state(state_id).map(|state| state.fork); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_finality_checkpoints(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_finality_checkpoints(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self + .get_state(state_id) + .map(|state| FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint, + current_justified: state.current_justified_checkpoint, + finalized: state.finalized_checkpoint, + }); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_validators(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_beacon_states_validators(state_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_state(state_id).map(|state| { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = self.chain.spec.far_future_epoch; + + let mut validators = Vec::with_capacity(state.validators.len()); + + for i in 0..state.validators.len() { + let validator = state.validators[i].clone(); + + validators.push(ValidatorData { + index: i as u64, + balance: state.balances[i], + status: ValidatorStatus::from_validator( + Some(&validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator, + }) + } + + validators + }); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_beacon_states_validator_id(self) -> Self { + for state_id in self.interesting_state_ids() { + let state_opt = self.get_state(state_id); + let validators = match state_opt.as_ref() { + Some(state) => state.validators.clone().into(), + None => vec![], + }; + + for (i, validator) in validators.into_iter().enumerate() { + let validator_ids = &[ + ValidatorId::PublicKey(validator.pubkey.clone()), + ValidatorId::Index(i as u64), + ]; + + for validator_id in validator_ids { + let result = self + .client + .get_beacon_states_validator_id(state_id, validator_id) + .await + .unwrap() + .map(|res| res.data); + + if result.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_ref().expect("result should be none"); + + let expected = { + let epoch = state.current_epoch(); + let finalized_epoch = state.finalized_checkpoint.epoch; + let far_future_epoch = self.chain.spec.far_future_epoch; + + ValidatorData { + index: i as u64, + balance: state.balances[i], + status: ValidatorStatus::from_validator( + Some(&validator), + epoch, + finalized_epoch, + far_future_epoch, + ), + validator: validator.clone(), + } + }; + + assert_eq!(result, Some(expected), "{:?}, {:?}", state_id, validator_id); + } + } + } + + self + } + + pub async fn test_beacon_states_committees(self) -> Self { + for state_id in self.interesting_state_ids() { + let mut state_opt = self.get_state(state_id); + + let epoch = state_opt + .as_ref() + .map(|state| state.current_epoch()) + .unwrap_or_else(|| Epoch::new(0)); + + let results = self + .client + .get_beacon_states_committees(state_id, epoch, None, None) + .await + .unwrap() + .map(|res| res.data); + + if results.is_none() && state_opt.is_none() { + continue; + } + + let state = state_opt.as_mut().expect("result should be none"); + state.build_all_committee_caches(&self.chain.spec).unwrap(); + let committees = state + .get_beacon_committees_at_epoch( + RelativeEpoch::from_epoch(state.current_epoch(), epoch).unwrap(), + ) + .unwrap(); + + for (i, result) in results.unwrap().into_iter().enumerate() { + let expected = &committees[i]; + + assert_eq!(result.index, expected.index, "{}", state_id); + assert_eq!(result.slot, expected.slot, "{}", state_id); + assert_eq!( + result + .validators + .into_iter() + .map(|i| i as usize) + .collect::>(), + expected.committee.to_vec(), + "{}", + state_id + ); + } + } + + self + } + + fn get_block_root(&self, block_id: BlockId) -> Option { + match block_id { + BlockId::Head => Some(self.chain.head_info().unwrap().block_root), + BlockId::Genesis => Some(self.chain.genesis_block_root), + BlockId::Finalized => Some(self.chain.head_info().unwrap().finalized_checkpoint.root), + BlockId::Justified => Some( + self.chain + .head_info() + .unwrap() + .current_justified_checkpoint + .root, + ), + BlockId::Slot(slot) => self.chain.block_root_at_slot(slot).unwrap(), + BlockId::Root(root) => Some(root), + } + } + + fn get_block(&self, block_id: BlockId) -> Option> { + let root = self.get_block_root(block_id); + root.and_then(|root| self.chain.get_block(&root).unwrap()) + } + + pub async fn test_beacon_headers_all_slots(self) -> Self { + for slot in 0..CHAIN_LENGTH { + let slot = Slot::from(slot); + + let result = self + .client + .get_beacon_headers(Some(slot), None) + .await + .unwrap() + .map(|res| res.data); + + let root = self.chain.block_root_at_slot(slot).unwrap(); + + if root.is_none() && result.is_none() { + continue; + } + + let root = root.unwrap(); + let block = self.chain.block_at_slot(slot).unwrap().unwrap(); + let header = BlockHeaderData { + root, + canonical: true, + header: BlockHeaderAndSignature { + message: block.message.block_header(), + signature: block.signature.into(), + }, + }; + let expected = vec![header]; + + assert_eq!(result.unwrap(), expected, "slot {:?}", slot); + } + + self + } + + pub async fn test_beacon_headers_all_parents(self) -> Self { + let mut roots = self + .chain + .rev_iter_block_roots() + .unwrap() + .map(Result::unwrap) + .map(|(root, _slot)| root) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // The iterator natively returns duplicate roots for skipped slots. + roots.dedup(); + + for i in 1..roots.len() { + let parent_root = roots[i - 1]; + let child_root = roots[i]; + + let result = self + .client + .get_beacon_headers(None, Some(parent_root)) + .await + .unwrap() + .unwrap() + .data; + + assert_eq!(result.len(), 1, "i {}", i); + assert_eq!(result[0].root, child_root, "i {}", i); + } + + self + } + + pub async fn test_beacon_headers_block_id(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_headers_block_id(block_id) + .await + .unwrap() + .map(|res| res.data); + + let block_root_opt = self.get_block_root(block_id); + + let block_opt = block_root_opt.and_then(|root| self.chain.get_block(&root).unwrap()); + + if block_opt.is_none() && result.is_none() { + continue; + } + + let result = result.unwrap(); + let block = block_opt.unwrap(); + let block_root = block_root_opt.unwrap(); + let canonical = self + .chain + .block_root_at_slot(block.slot()) + .unwrap() + .map_or(false, |canonical| block_root == canonical); + + assert_eq!(result.canonical, canonical, "{:?}", block_id); + assert_eq!(result.root, block_root, "{:?}", block_id); + assert_eq!( + result.header.message, + block.message.block_header(), + "{:?}", + block_id + ); + assert_eq!( + result.header.signature, + block.signature.into(), + "{:?}", + block_id + ); + } + + self + } + + pub async fn test_beacon_blocks_root(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks_root(block_id) + .await + .unwrap() + .map(|res| res.data.root); + + let expected = self.get_block_root(block_id); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_post_beacon_blocks_valid(mut self) -> Self { + let next_block = &self.next_block; + + self.client.post_beacon_blocks(next_block).await.unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid blocks should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { + let mut next_block = self.next_block.clone(); + next_block.message.proposer_index += 1; + + assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); + + assert!( + self.network_rx.try_recv().is_ok(), + "invalid blocks should be sent to network" + ); + + self + } + + pub async fn test_beacon_blocks(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks(block_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self.get_block(block_id); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_beacon_blocks_attestations(self) -> Self { + for block_id in self.interesting_block_ids() { + let result = self + .client + .get_beacon_blocks_attestations(block_id) + .await + .unwrap() + .map(|res| res.data); + + let expected = self + .get_block(block_id) + .map(|block| block.message.body.attestations.into()); + + assert_eq!(result, expected, "{:?}", block_id); + } + + self + } + + pub async fn test_post_beacon_pool_attestations_valid(mut self) -> Self { + for attestation in &self.attestations { + self.client + .post_beacon_pool_attestations(attestation) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid attestation should be sent to network" + ); + } + + self + } + + pub async fn test_post_beacon_pool_attestations_invalid(mut self) -> Self { + for attestation in &self.attestations { + let mut attestation = attestation.clone(); + attestation.data.slot += 1; + + assert!(self + .client + .post_beacon_pool_attestations(&attestation) + .await + .is_err()); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid attestation should not be sent to network" + ); + } + + self + } + + pub async fn test_get_beacon_pool_attestations(self) -> Self { + let result = self + .client + .get_beacon_pool_attestations() + .await + .unwrap() + .data; + + let mut expected = self.chain.op_pool.get_all_attestations(); + expected.extend(self.chain.naive_aggregation_pool.read().iter().cloned()); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_valid(mut self) -> Self { + self.client + .post_beacon_pool_attester_slashings(&self.attester_slashing) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid attester slashing should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_attester_slashings_invalid(mut self) -> Self { + let mut slashing = self.attester_slashing.clone(); + slashing.attestation_1.data.slot += 1; + + self.client + .post_beacon_pool_attester_slashings(&slashing) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid attester slashing should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_attester_slashings(self) -> Self { + let result = self + .client + .get_beacon_pool_attester_slashings() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_attester_slashings(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_proposer_slashings_valid(mut self) -> Self { + self.client + .post_beacon_pool_proposer_slashings(&self.proposer_slashing) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid proposer slashing should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_proposer_slashings_invalid(mut self) -> Self { + let mut slashing = self.proposer_slashing.clone(); + slashing.signed_header_1.message.slot += 1; + + self.client + .post_beacon_pool_proposer_slashings(&slashing) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid proposer slashing should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_proposer_slashings(self) -> Self { + let result = self + .client + .get_beacon_pool_proposer_slashings() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_proposer_slashings(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_post_beacon_pool_voluntary_exits_valid(mut self) -> Self { + self.client + .post_beacon_pool_voluntary_exits(&self.voluntary_exit) + .await + .unwrap(); + + assert!( + self.network_rx.try_recv().is_ok(), + "valid exit should be sent to network" + ); + + self + } + + pub async fn test_post_beacon_pool_voluntary_exits_invalid(mut self) -> Self { + let mut exit = self.voluntary_exit.clone(); + exit.message.epoch += 1; + + self.client + .post_beacon_pool_voluntary_exits(&exit) + .await + .unwrap_err(); + + assert!( + self.network_rx.try_recv().is_err(), + "invalid exit should not be sent to network" + ); + + self + } + + pub async fn test_get_beacon_pool_voluntary_exits(self) -> Self { + let result = self + .client + .get_beacon_pool_voluntary_exits() + .await + .unwrap() + .data; + + let expected = self.chain.op_pool.get_all_voluntary_exits(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_fork_schedule(self) -> Self { + let result = self.client.get_config_fork_schedule().await.unwrap().data; + + let expected = vec![self.chain.head_info().unwrap().fork]; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_spec(self) -> Self { + let result = self.client.get_config_spec().await.unwrap().data; + + let expected = YamlConfig::from_spec::(&self.chain.spec); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_config_deposit_contract(self) -> Self { + let result = self + .client + .get_config_deposit_contract() + .await + .unwrap() + .data; + + let expected = DepositContractData { + address: self.chain.spec.deposit_contract_address, + chain_id: eth1::DEFAULT_NETWORK_ID.into(), + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_node_version(self) -> Self { + let result = self.client.get_node_version().await.unwrap().data; + + let expected = VersionData { + version: lighthouse_version::version_with_platform(), + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_node_syncing(self) -> Self { + let result = self.client.get_node_syncing().await.unwrap().data; + let head_slot = self.chain.head_info().unwrap().slot; + let sync_distance = self.chain.slot().unwrap() - head_slot; + + let expected = SyncingData { + is_syncing: false, + head_slot, + sync_distance, + }; + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_debug_beacon_states(self) -> Self { + for state_id in self.interesting_state_ids() { + let result = self + .client + .get_debug_beacon_states(state_id) + .await + .unwrap() + .map(|res| res.data); + + let mut expected = self.get_state(state_id); + expected.as_mut().map(|state| state.drop_all_caches()); + + assert_eq!(result, expected, "{:?}", state_id); + } + + self + } + + pub async fn test_get_debug_beacon_heads(self) -> Self { + let result = self + .client + .get_debug_beacon_heads() + .await + .unwrap() + .data + .into_iter() + .map(|head| (head.root, head.slot)) + .collect::>(); + + let expected = self.chain.heads(); + + assert_eq!(result, expected); + + self + } + + fn validator_count(&self) -> usize { + self.chain.head().unwrap().beacon_state.validators.len() + } + + fn interesting_validator_indices(&self) -> Vec> { + let validator_count = self.validator_count() as u64; + + let mut interesting = vec![ + vec![], + vec![0], + vec![0, 1], + vec![0, 1, 3], + vec![validator_count], + vec![validator_count, 1], + vec![validator_count, 1, 3], + vec![u64::max_value()], + vec![u64::max_value(), 1], + vec![u64::max_value(), 1, 3], + ]; + + interesting.push((0..validator_count).collect()); + + interesting + } + + pub async fn test_get_validator_duties_attester(self) -> Self { + let current_epoch = self.chain.epoch().unwrap().as_u64(); + + let half = current_epoch / 2; + let first = current_epoch - half; + let last = current_epoch + half; + + for epoch in first..=last { + for indices in self.interesting_validator_indices() { + let epoch = Epoch::from(epoch); + + // The endpoint does not allow getting duties past the next epoch. + if epoch > current_epoch + 1 { + assert_eq!( + self.client + .get_validator_duties_attester(epoch, Some(&indices)) + .await + .unwrap_err() + .status() + .map(Into::into), + Some(400) + ); + continue; + } + + let results = self + .client + .get_validator_duties_attester(epoch, Some(&indices)) + .await + .unwrap() + .data; + + let mut state = self + .chain + .state_at_slot( + epoch.start_slot(E::slots_per_epoch()), + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected_len = indices + .iter() + .filter(|i| **i < state.validators.len() as u64) + .count(); + + assert_eq!(results.len(), expected_len); + + for (indices_set, &i) in indices.iter().enumerate() { + if let Some(duty) = state + .get_attestation_duties(i as usize, RelativeEpoch::Current) + .unwrap() + { + let expected = AttesterData { + pubkey: state.validators[i as usize].pubkey.clone().into(), + validator_index: i, + committees_at_slot: duty.committees_at_slot, + committee_index: duty.index, + committee_length: duty.committee_len as u64, + validator_committee_index: duty.committee_position as u64, + slot: duty.slot, + }; + + let result = results + .iter() + .find(|duty| duty.validator_index == i) + .unwrap(); + + assert_eq!( + *result, expected, + "epoch: {}, indices_set: {}", + epoch, indices_set + ); + } else { + assert!( + !results.iter().any(|duty| duty.validator_index == i), + "validator index should not exist in response" + ); + } + } + } + } + + self + } + + pub async fn test_get_validator_duties_proposer(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + + let result = self + .client + .get_validator_duties_proposer(current_epoch) + .await + .unwrap() + .data; + + let mut state = self.chain.head_beacon_state().unwrap(); + + while state.current_epoch() < current_epoch { + per_slot_processing(&mut state, None, &self.chain.spec).unwrap(); + } + + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected = current_epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let index = state + .get_beacon_proposer_index(slot, &self.chain.spec) + .unwrap(); + let pubkey = state.validators[index].pubkey.clone().into(); + + ProposerData { pubkey, slot } + }) + .collect::>(); + + assert_eq!(result, expected); + + self + } + + pub async fn test_block_production(self) -> Self { + let fork = self.chain.head_info().unwrap().fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let block = self + .client + .get_validator_blocks::(slot, randao_reveal, None) + .await + .unwrap() + .data; + + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client.post_beacon_blocks(&signed_block).await.unwrap(); + + assert_eq!(self.chain.head_beacon_block().unwrap(), signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + + self + } + + pub async fn test_get_validator_attestation_data(self) -> Self { + let mut state = self.chain.head_beacon_state().unwrap(); + let slot = state.slot; + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + for index in 0..state.get_committee_count_at_slot(slot).unwrap() { + let result = self + .client + .get_validator_attestation_data(slot, index) + .await + .unwrap() + .data; + + let expected = self + .chain + .produce_unaggregated_attestation(slot, index) + .unwrap() + .data; + + assert_eq!(result, expected); + } + + self + } + + pub async fn test_get_validator_aggregate_attestation(self) -> Self { + let attestation = self + .chain + .head_beacon_block() + .unwrap() + .message + .body + .attestations[0] + .clone(); + + let result = self + .client + .get_validator_aggregate_attestation( + attestation.data.slot, + attestation.data.tree_hash_root(), + ) + .await + .unwrap() + .unwrap() + .data; + + let expected = attestation; + + assert_eq!(result, expected); + + self + } + + pub async fn get_aggregate(&mut self) -> SignedAggregateAndProof { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let mut head = self.chain.head().unwrap(); + while head.beacon_state.current_epoch() < epoch { + per_slot_processing(&mut head.beacon_state, None, &self.chain.spec).unwrap(); + } + head.beacon_state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let committee_len = head.beacon_state.get_committee_count_at_slot(slot).unwrap(); + let fork = head.beacon_state.fork; + let genesis_validators_root = self.chain.genesis_validators_root; + + let mut duties = vec![]; + for i in 0..self.validator_keypairs.len() { + duties.push( + self.client + .get_validator_duties_attester(epoch, Some(&[i as u64])) + .await + .unwrap() + .data[0] + .clone(), + ) + } + + let (i, kp, duty, proof) = self + .validator_keypairs + .iter() + .enumerate() + .find_map(|(i, kp)| { + let duty = duties[i].clone(); + + let proof = SelectionProof::new::( + duty.slot, + &kp.sk, + &fork, + genesis_validators_root, + &self.chain.spec, + ); + + if proof + .is_aggregator(committee_len as usize, &self.chain.spec) + .unwrap() + { + Some((i, kp, duty, proof)) + } else { + None + } + }) + .expect("there is at least one aggregator for this epoch") + .clone(); + + if duty.slot > slot { + self.chain.slot_clock.set_slot(duty.slot.into()); + } + + let attestation_data = self + .client + .get_validator_attestation_data(duty.slot, duty.committee_index) + .await + .unwrap() + .data; + + let mut attestation = Attestation { + aggregation_bits: BitList::with_capacity(duty.committee_length as usize).unwrap(), + data: attestation_data, + signature: AggregateSignature::infinity(), + }; + + attestation + .sign( + &kp.sk, + duty.validator_committee_index as usize, + &fork, + genesis_validators_root, + &self.chain.spec, + ) + .unwrap(); + + SignedAggregateAndProof::from_aggregate( + i as u64, + attestation, + Some(proof), + &kp.sk, + &fork, + genesis_validators_root, + &self.chain.spec, + ) + } + + pub async fn test_get_validator_aggregate_and_proofs_valid(mut self) -> Self { + let aggregate = self.get_aggregate().await; + + self.client + .post_validator_aggregate_and_proof::(&aggregate) + .await + .unwrap(); + + assert!(self.network_rx.try_recv().is_ok()); + + self + } + + pub async fn test_get_validator_aggregate_and_proofs_invalid(mut self) -> Self { + let mut aggregate = self.get_aggregate().await; + + aggregate.message.aggregate.data.slot += 1; + + self.client + .post_validator_aggregate_and_proof::(&aggregate) + .await + .unwrap_err(); + + assert!(self.network_rx.try_recv().is_err()); + + self + } + + pub async fn test_get_validator_beacon_committee_subscriptions(mut self) -> Self { + let subscription = BeaconCommitteeSubscription { + validator_index: 0, + committee_index: 0, + committees_at_slot: 1, + slot: Slot::new(1), + is_aggregator: true, + }; + + self.client + .post_validator_beacon_committee_subscriptions(&[subscription]) + .await + .unwrap(); + + self.network_rx.try_recv().unwrap(); + + self + } + + #[cfg(target_os = "linux")] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap(); + + self + } + + #[cfg(not(target_os = "linux"))] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap_err(); + + self + } + + pub async fn test_get_lighthouse_syncing(self) -> Self { + self.client.get_lighthouse_syncing().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_proto_array(self) -> Self { + self.client.get_lighthouse_proto_array().await.unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion_global(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion_global(epoch) + .await + .unwrap(); + + self + } + + pub async fn test_get_lighthouse_validator_inclusion(self) -> Self { + let epoch = self.chain.epoch().unwrap() - 1; + self.client + .get_lighthouse_validator_inclusion(epoch, ValidatorId::Index(0)) + .await + .unwrap(); + + self + } +} + +#[tokio::test(core_threads = 2)] +async fn beacon_genesis() { + ApiTester::new().test_beacon_genesis().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_root() { + ApiTester::new().test_beacon_states_root().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_fork() { + ApiTester::new().test_beacon_states_fork().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_finality_checkpoints() { + ApiTester::new() + .test_beacon_states_finality_checkpoints() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_validators() { + ApiTester::new().test_beacon_states_validators().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_committees() { + ApiTester::new().test_beacon_states_committees().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_states_validator_id() { + ApiTester::new().test_beacon_states_validator_id().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_headers() { + ApiTester::new() + .test_beacon_headers_all_slots() + .await + .test_beacon_headers_all_parents() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_headers_block_id() { + ApiTester::new().test_beacon_headers_block_id().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks() { + ApiTester::new().test_beacon_blocks().await; +} + +#[tokio::test(core_threads = 2)] +async fn post_beacon_blocks_valid() { + ApiTester::new().test_post_beacon_blocks_valid().await; +} + +#[tokio::test(core_threads = 2)] +async fn post_beacon_blocks_invalid() { + ApiTester::new().test_post_beacon_blocks_invalid().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks_root() { + ApiTester::new().test_beacon_blocks_root().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_blocks_attestations() { + ApiTester::new().test_beacon_blocks_attestations().await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_get() { + ApiTester::new() + .test_get_beacon_pool_attestations() + .await + .test_get_beacon_pool_attester_slashings() + .await + .test_get_beacon_pool_proposer_slashings() + .await + .test_get_beacon_pool_voluntary_exits() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attestations_valid() { + ApiTester::new() + .test_post_beacon_pool_attestations_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attestations_invalid() { + ApiTester::new() + .test_post_beacon_pool_attestations_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attester_slashings_valid() { + ApiTester::new() + .test_post_beacon_pool_attester_slashings_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_attester_slashings_invalid() { + ApiTester::new() + .test_post_beacon_pool_attester_slashings_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_proposer_slashings_valid() { + ApiTester::new() + .test_post_beacon_pool_proposer_slashings_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_proposer_slashings_invalid() { + ApiTester::new() + .test_post_beacon_pool_proposer_slashings_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_voluntary_exits_valid() { + ApiTester::new() + .test_post_beacon_pool_voluntary_exits_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn beacon_pools_post_voluntary_exits_invalid() { + ApiTester::new() + .test_post_beacon_pool_voluntary_exits_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn config_get() { + ApiTester::new() + .test_get_config_fork_schedule() + .await + .test_get_config_spec() + .await + .test_get_config_deposit_contract() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn debug_get() { + ApiTester::new() + .test_get_debug_beacon_states() + .await + .test_get_debug_beacon_heads() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn node_get() { + ApiTester::new() + .test_get_node_version() + .await + .test_get_node_syncing() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_attester() { + ApiTester::new().test_get_validator_duties_attester().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_attester_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_attester() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_proposer() { + ApiTester::new().test_get_validator_duties_proposer().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_duties_proposer_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_duties_proposer() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn block_production() { + ApiTester::new().test_block_production().await; +} + +#[tokio::test(core_threads = 2)] +async fn block_production_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_block_production() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_attestation_data() { + ApiTester::new().test_get_validator_attestation_data().await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_attestation_data_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_attestation_data() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_attestation() { + ApiTester::new() + .test_get_validator_aggregate_attestation() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_attestation_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_attestation() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid() { + ApiTester::new() + .test_get_validator_aggregate_and_proofs_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_valid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_invalid() { + ApiTester::new() + .test_get_validator_aggregate_and_proofs_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() { + ApiTester::new() + .skip_slots(E::slots_per_epoch() * 2) + .test_get_validator_aggregate_and_proofs_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn get_validator_beacon_committee_subscriptions() { + ApiTester::new() + .test_get_validator_beacon_committee_subscriptions() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn lighthouse_endpoints() { + ApiTester::new() + .test_get_lighthouse_health() + .await + .test_get_lighthouse_syncing() + .await + .test_get_lighthouse_proto_array() + .await + .test_get_lighthouse_validator_inclusion() + .await + .test_get_lighthouse_validator_inclusion_global() + .await; +} diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml new file mode 100644 index 000000000..1b9197917 --- /dev/null +++ b/beacon_node/http_metrics/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "http_metrics" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +prometheus = "0.10.0" +warp = "0.2.5" +serde = { version = "1.0.116", features = ["derive"] } +slog = "2.5.2" +beacon_chain = { path = "../beacon_chain" } +store = { path = "../store" } +eth2_libp2p = { path = "../eth2_libp2p" } +slot_clock = { path = "../../common/slot_clock" } +lighthouse_metrics = { path = "../../common/lighthouse_metrics" } +lazy_static = "1.4.0" +eth2 = { path = "../../common/eth2" } +lighthouse_version = { path = "../../common/lighthouse_version" } +warp_utils = { path = "../../common/warp_utils" } + +[dev-dependencies] +tokio = { version = "0.2.22", features = ["sync"] } +reqwest = { version = "0.10.8", features = ["json"] } +environment = { path = "../../lighthouse/environment" } +types = { path = "../../consensus/types" } diff --git a/beacon_node/http_metrics/src/lib.rs b/beacon_node/http_metrics/src/lib.rs new file mode 100644 index 000000000..37eac82bd --- /dev/null +++ b/beacon_node/http_metrics/src/lib.rs @@ -0,0 +1,135 @@ +//! This crate provides a HTTP server that is solely dedicated to serving the `/metrics` endpoint. +//! +//! For other endpoints, see the `http_api` crate. + +#[macro_use] +extern crate lazy_static; + +mod metrics; + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use lighthouse_version::version_with_platform; +use serde::{Deserialize, Serialize}; +use slog::{crit, info, Logger}; +use std::future::Future; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::path::PathBuf; +use std::sync::Arc; +use warp::{http::Response, Filter}; + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context { + pub config: Config, + pub chain: Option>>, + pub db_path: Option, + pub freezer_db_path: Option, + pub log: Logger, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + pub listen_addr: Ipv4Addr, + pub listen_port: u16, + pub allow_origin: Option, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 5054, + allow_origin: None, + } + } +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve( + ctx: Arc>, + shutdown: impl Future + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future), Error> { + let config = &ctx.config; + let log = ctx.log.clone(); + let allow_origin = config.allow_origin.clone(); + + // Sanity check. + if !config.enabled { + crit!(log, "Cannot start disabled metrics HTTP server"); + return Err(Error::Other( + "A disabled metrics server should not be started".to_string(), + )); + } + + let inner_ctx = ctx.clone(); + let routes = warp::get() + .and(warp::path("metrics")) + .map(move || inner_ctx.clone()) + .and_then(|ctx: Arc>| async move { + Ok::<_, warp::Rejection>( + metrics::gather_prometheus_metrics(&ctx) + .map(|body| Response::builder().status(200).body(body).unwrap()) + .unwrap_or_else(|e| { + Response::builder() + .status(500) + .body(format!("Unable to gather metrics: {:?}", e)) + .unwrap() + }), + ) + }) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + // Maybe add some CORS headers. + .map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref())); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "Metrics HTTP server started"; + "listen_address" => listening_socket.to_string(), + ); + + Ok((listening_socket, server)) +} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs similarity index 69% rename from beacon_node/rest_api/src/metrics.rs rename to beacon_node/http_metrics/src/metrics.rs index 4b1ba737d..bcd803c40 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -1,38 +1,11 @@ -use crate::{ApiError, Context}; +use crate::Context; use beacon_chain::BeaconChainTypes; +use eth2::lighthouse::Health; use lighthouse_metrics::{Encoder, TextEncoder}; -use rest_types::Health; -use std::sync::Arc; pub use lighthouse_metrics::*; lazy_static! { - pub static ref BEACON_HTTP_API_REQUESTS_TOTAL: Result = - try_create_int_counter_vec( - "beacon_http_api_requests_total", - "Count of HTTP requests received", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_SUCCESS_TOTAL: Result = - try_create_int_counter_vec( - "beacon_http_api_success_total", - "Count of HTTP requests that returned 200 OK", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_ERROR_TOTAL: Result = try_create_int_counter_vec( - "beacon_http_api_error_total", - "Count of HTTP that did not return 200 OK", - &["endpoint"] - ); - pub static ref BEACON_HTTP_API_TIMES_TOTAL: Result = try_create_histogram_vec( - "beacon_http_api_times_total", - "Duration to process HTTP requests", - &["endpoint"] - ); - pub static ref REQUEST_RESPONSE_TIME: Result = try_create_histogram( - "http_server_request_duration_seconds", - "Time taken to build a response to a HTTP request" - ); pub static ref PROCESS_NUM_THREADS: Result = try_create_int_gauge( "process_num_threads", "Number of threads used by the current process" @@ -67,14 +40,9 @@ lazy_static! { try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes"); } -/// Returns the full set of Prometheus metrics for the Beacon Node application. -/// -/// # Note -/// -/// This is a HTTP handler method. -pub fn get_prometheus( - ctx: Arc>, -) -> std::result::Result { +pub fn gather_prometheus_metrics( + ctx: &Context, +) -> std::result::Result { let mut buffer = vec![]; let encoder = TextEncoder::new(); @@ -94,9 +62,17 @@ pub fn get_prometheus( // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // a string that can be returned via HTTP. - slot_clock::scrape_for_metrics::(&ctx.beacon_chain.slot_clock); - store::scrape_for_metrics(&ctx.db_path, &ctx.freezer_db_path); - beacon_chain::scrape_for_metrics(&ctx.beacon_chain); + if let Some(beacon_chain) = ctx.chain.as_ref() { + slot_clock::scrape_for_metrics::(&beacon_chain.slot_clock); + beacon_chain::scrape_for_metrics(beacon_chain); + } + + if let (Some(db_path), Some(freezer_db_path)) = + (ctx.db_path.as_ref(), ctx.freezer_db_path.as_ref()) + { + store::scrape_for_metrics(db_path, freezer_db_path); + } + eth2_libp2p::scrape_discovery_metrics(); // This will silently fail if we are unable to observe the health. This is desired behaviour @@ -125,6 +101,5 @@ pub fn get_prometheus( .encode(&lighthouse_metrics::gather(), &mut buffer) .unwrap(); - String::from_utf8(buffer) - .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) + String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) } diff --git a/beacon_node/http_metrics/tests/tests.rs b/beacon_node/http_metrics/tests/tests.rs new file mode 100644 index 000000000..18a40d4f8 --- /dev/null +++ b/beacon_node/http_metrics/tests/tests.rs @@ -0,0 +1,46 @@ +use beacon_chain::test_utils::BlockingMigratorEphemeralHarnessType; +use environment::null_logger; +use http_metrics::Config; +use reqwest::StatusCode; +use std::net::Ipv4Addr; +use std::sync::Arc; +use tokio::sync::oneshot; +use types::MainnetEthSpec; + +type Context = http_metrics::Context>; + +#[tokio::test(core_threads = 2)] +async fn returns_200_ok() { + let log = null_logger().unwrap(); + + let context = Arc::new(Context { + config: Config { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + chain: None, + db_path: None, + freezer_db_path: None, + log, + }); + + let ctx = context.clone(); + let (_shutdown_tx, shutdown_rx) = oneshot::channel::<()>(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(async { server.await }); + + let url = format!( + "http://{}:{}/metrics", + listening_socket.ip(), + listening_socket.port() + ); + + assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); +} diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index c5d786ff0..c2d81bf9d 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Sigma Prime "] edition = "2018" [dev-dependencies] -sloggers = "1.0.0" +sloggers = "1.0.1" genesis = { path = "../genesis" } lazy_static = "1.4.0" matches = "0.1.8" @@ -17,7 +17,6 @@ beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } eth2_libp2p = { path = "../eth2_libp2p" } hashset_delay = { path = "../../common/hashset_delay" } -rest_types = { path = "../../common/rest_types" } types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slot_clock = { path = "../../common/slot_clock" } @@ -25,18 +24,18 @@ slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../../consensus/ssz_types" } -tree_hash = "0.1.0" +tree_hash = "0.1.1" futures = "0.3.5" -error-chain = "0.12.2" +error-chain = "0.12.4" tokio = { version = "0.2.22", features = ["full"] } parking_lot = "0.11.0" -smallvec = "1.4.1" +smallvec = "1.4.2" rand = "0.7.3" -fnv = "1.0.6" -rlp = "0.4.5" +fnv = "1.0.7" +rlp = "0.4.6" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -environment = { path = "../../lighthouse/environment" } +task_executor = { path = "../../common/task_executor" } igd = "0.11.1" itertools = "0.9.0" num_cpus = "1.13.0" diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs index 59f63890a..cd8a9b5a1 100644 --- a/beacon_node/network/src/attestation_service/mod.rs +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -15,9 +15,8 @@ use slog::{debug, error, o, trace, warn}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::SubnetDiscovery; use hashset_delay::HashSetDelay; -use rest_types::ValidatorSubscription; use slot_clock::SlotClock; -use types::{Attestation, EthSpec, Slot, SubnetId}; +use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription}; use crate::metrics; @@ -196,14 +195,9 @@ impl AttestationService { slot: subscription.slot, }; - // determine if the validator is an aggregator. If so, we subscribe to the subnet and + // Determine if the validator is an aggregator. If so, we subscribe to the subnet and // if successful add the validator to a mapping of known aggregators for that exact // subnet. - // NOTE: There is a chance that a fork occurs between now and when the validator needs - // to aggregate attestations. If this happens, the signature will no longer be valid - // and it could be likely the validator no longer needs to aggregate. More - // sophisticated logic should be added using known future forks. - // TODO: Implement if subscription.is_aggregator { metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS); @@ -287,8 +281,6 @@ impl AttestationService { min_ttl, }) } else { - // TODO: Send the time frame needed to have a peer connected, so that we can - // maintain peers for a least this duration. // We may want to check the global PeerInfo to see estimated timeouts for each // peer before they can be removed. warn!(self.log, diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 18983b620..b23b40e54 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -37,12 +37,12 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; -use environment::TaskExecutor; use eth2_libp2p::{MessageId, NetworkGlobals, PeerId}; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; +use task_executor::TaskExecutor; use tokio::sync::{mpsc, oneshot}; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, ProposerSlashing, SignedAggregateAndProof, diff --git a/beacon_node/network/src/beacon_processor/worker.rs b/beacon_node/network/src/beacon_processor/worker.rs index cef373f17..44f96372f 100644 --- a/beacon_node/network/src/beacon_processor/worker.rs +++ b/beacon_node/network/src/beacon_processor/worker.rs @@ -45,7 +45,7 @@ impl Worker { let attestation = match self .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) { Ok(attestation) => attestation, Err(e) => { diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index 214932442..c11fcd448 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -3,15 +3,14 @@ use std::sync::Arc; use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem}; use types::{EthSpec, Hash256}; -/// 32-byte key for accessing the `DhtEnrs`. -pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE"; +/// 32-byte key for accessing the `DhtEnrs`. All zero because `DhtEnrs` has its own column. +pub const DHT_DB_KEY: Hash256 = Hash256::zero(); pub fn load_dht, Cold: ItemStore>( store: Arc>, ) -> Vec { // Load DHT from store - let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); - match store.get_item(&key) { + match store.get_item(&DHT_DB_KEY) { Ok(Some(p)) => { let p: PersistedDht = p; p.enrs @@ -25,9 +24,7 @@ pub fn persist_dht, Cold: ItemStore>( store: Arc>, enrs: Vec, ) -> Result<(), store::Error> { - let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); - store.put_item(&key, &PersistedDht { enrs })?; - Ok(()) + store.put_item(&DHT_DB_KEY, &PersistedDht { enrs }) } /// Wrapper around DHT for persistence to disk. @@ -61,7 +58,7 @@ mod tests { use std::str::FromStr; use store::config::StoreConfig; use store::{HotColdDB, MemoryStore}; - use types::{ChainSpec, Hash256, MinimalEthSpec}; + use types::{ChainSpec, MinimalEthSpec}; #[test] fn test_persisted_dht() { let log = NullLoggerBuilder.build().unwrap(); @@ -71,11 +68,10 @@ mod tests { MemoryStore, > = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(); let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; - let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); store - .put_item(&key, &PersistedDht { enrs: enrs.clone() }) + .put_item(&DHT_DB_KEY, &PersistedDht { enrs: enrs.clone() }) .unwrap(); - let dht: PersistedDht = store.get_item(&key).unwrap().unwrap(); + let dht: PersistedDht = store.get_item(&DHT_DB_KEY).unwrap().unwrap(); assert_eq!(dht.enrs, enrs); } } diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs index 0fa2494ff..4701bdb73 100644 --- a/beacon_node/network/src/router/mod.rs +++ b/beacon_node/network/src/router/mod.rs @@ -74,7 +74,7 @@ impl Router { beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, log: slog::Logger, ) -> error::Result>> { let message_handler_log = log.new(o!("service"=> "router")); diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index ae6959437..266b1d45d 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -41,7 +41,7 @@ pub struct Processor { impl Processor { /// Instantiate a `Processor` instance pub fn new( - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a2af46e68..1188211ef 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -15,13 +15,12 @@ use eth2_libp2p::{ }; use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; use futures::prelude::*; -use rest_types::ValidatorSubscription; use slog::{debug, error, info, o, trace, warn}; use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; use store::HotColdDB; use tokio::sync::mpsc; use tokio::time::Delay; -use types::EthSpec; +use types::{EthSpec, ValidatorSubscription}; mod tests; @@ -52,7 +51,7 @@ pub enum NetworkMessage { }, /// Respond to a peer's request with an error. SendError { - // TODO: note that this is never used, we just say goodbye without nicely closing the + // NOTE: Currently this is never used, we just say goodbye without nicely closing the // stream assigned to the request peer_id: PeerId, error: RPCResponseErrorCode, @@ -122,7 +121,7 @@ impl NetworkService { pub async fn start( beacon_chain: Arc>, config: &NetworkConfig, - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, ) -> error::Result<( Arc>, mpsc::UnboundedSender>, @@ -164,7 +163,7 @@ impl NetworkService { "Loading peers into the routing table"; "peers" => enrs_to_load.len() ); for enr in enrs_to_load { - libp2p.swarm.add_enr(enr.clone()); //TODO change? + libp2p.swarm.add_enr(enr.clone()); } // launch derived network services @@ -208,7 +207,7 @@ impl NetworkService { } fn spawn_service( - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, mut service: NetworkService, ) -> error::Result<()> { let mut exit_rx = executor.exit(); @@ -350,7 +349,6 @@ fn spawn_service( // process any attestation service events Some(attestation_service_message) = service.attestation_service.next() => { match attestation_service_message { - // TODO: Implement AttServiceMessage::Subscribe(subnet_id) => { service.libp2p.swarm.subscribe_to_subnet(subnet_id); } diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index 2efcf889e..9888dd784 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -41,7 +41,7 @@ mod tests { let (signal, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let executor = environment::TaskExecutor::new( + let executor = task_executor::TaskExecutor::new( runtime.handle().clone(), exit, log.clone(), diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index deedc1448..4f59c6cff 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -119,7 +119,6 @@ pub enum SyncMessage { } /// The result of processing a multiple blocks (a chain segment). -// TODO: When correct batch error handling occurs, we will include an error type. #[derive(Debug)] pub enum BatchProcessResult { /// The batch was completed successfully. It carries whether the sent batch contained blocks. @@ -205,7 +204,7 @@ impl SingleBlockRequest { /// chain. This allows the chain to be /// dropped during the syncing process which will gracefully end the `SyncManager`. pub fn spawn( - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, beacon_chain: Arc>, network_globals: Arc>, network_send: mpsc::UnboundedSender>, @@ -629,7 +628,7 @@ impl SyncManager { self.update_sync_state(); } - // TODO: Group these functions into one. + // TODO: Group these functions into one for cleaner code. /// Updates the syncing state of a peer to be synced. fn synced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) { if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { @@ -792,7 +791,6 @@ impl SyncManager { // This currently can be a host of errors. We permit this due to the partial // ambiguity. - // TODO: Refine the error types and score the peer appropriately. self.network.report_peer( parent_request.last_submitted_peer, PeerAction::MidToleranceError, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 8ed21616d..864ac6124 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -613,9 +613,7 @@ impl SyncingChain { BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => { unreachable!("batch indicates inconsistent chain state while advancing chain") } - BatchState::AwaitingProcessing(..) => { - // TODO: can we be sure the old attempts are wrong? - } + BatchState::AwaitingProcessing(..) => {} BatchState::Processing(_) => { assert_eq!( id, @@ -651,9 +649,6 @@ impl SyncingChain { /// These events occur when a peer has successfully responded with blocks, but the blocks we /// have received are incorrect or invalid. This indicates the peer has not performed as /// intended and can result in downvoting a peer. - // TODO: Batches could have been partially downloaded due to RPC size-limit restrictions. We - // need to add logic for partial batch downloads. Potentially, if another peer returns the same - // batch, we try a partial download. fn handle_invalid_batch( &mut self, network: &mut SyncNetworkContext, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 48a9bd5d4..c1ae653a7 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -220,7 +220,10 @@ impl RangeSync { if let Some(removed_chain) = removed_chain { debug!(self.log, "Chain removed after block response"; "sync_type" => ?sync_type, "chain_id" => chain_id); removed_chain.status_peers(network); - // TODO: update & update_sync_state? + // update the state of the collection + self.chains.update(network); + // update the global state and inform the user + self.chains.update_sync_state(network); } } Err(_) => { @@ -319,7 +322,10 @@ impl RangeSync { .call_all(|chain| chain.remove_peer(peer_id, network)) { debug!(self.log, "Chain removed after removing peer"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id()); - // TODO: anything else to do? + // update the state of the collection + self.chains.update(network); + // update the global state and inform the user + self.chains.update_sync_state(network); } } @@ -343,7 +349,10 @@ impl RangeSync { if let Some(removed_chain) = removed_chain { debug!(self.log, "Chain removed on rpc error"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id()); removed_chain.status_peers(network); - // TODO: update & update_sync_state? + // update the state of the collection + self.chains.update(network); + // update the global state and inform the user + self.chains.update_sync_state(network); } } Err(_) => { diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index d7309d412..c16ab8fb5 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -11,8 +11,8 @@ types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -serde = "1.0.110" -serde_derive = "1.0.110" +serde = "1.0.116" +serde_derive = "1.0.116" store = { path = "../store" } [dev-dependencies] diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 5b664c877..6d6a8d1cd 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -332,6 +332,51 @@ impl OperationPool { pub fn num_voluntary_exits(&self) -> usize { self.voluntary_exits.read().len() } + + /// Returns all known `Attestation` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_attestations(&self) -> Vec> { + self.attestations + .read() + .iter() + .map(|(_, attns)| attns.iter().cloned()) + .flatten() + .collect() + } + + /// Returns all known `AttesterSlashing` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_attester_slashings(&self) -> Vec> { + self.attester_slashings + .read() + .iter() + .map(|(slashing, _)| slashing.clone()) + .collect() + } + + /// Returns all known `ProposerSlashing` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_proposer_slashings(&self) -> Vec { + self.proposer_slashings + .read() + .iter() + .map(|(_, slashing)| slashing.clone()) + .collect() + } + + /// Returns all known `SignedVoluntaryExit` objects. + /// + /// This method may return objects that are invalid for block inclusion. + pub fn get_all_voluntary_exits(&self) -> Vec { + self.voluntary_exits + .read() + .iter() + .map(|(_, exit)| exit.clone()) + .collect() + } } /// Filter up to a maximum number of operations out of an iterator. diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml deleted file mode 100644 index 359d4e736..000000000 --- a/beacon_node/rest_api/Cargo.toml +++ /dev/null @@ -1,50 +0,0 @@ -[package] -name = "rest_api" -version = "0.2.0" -authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] -edition = "2018" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[dependencies] -bls = { path = "../../crypto/bls" } -rest_types = { path = "../../common/rest_types" } -beacon_chain = { path = "../beacon_chain" } -network = { path = "../network" } -eth2_libp2p = { path = "../eth2_libp2p" } -store = { path = "../store" } -serde = { version = "1.0.110", features = ["derive"] } -serde_json = "1.0.52" -serde_yaml = "0.8.11" -slog = "2.5.2" -slog-term = "2.5.0" -slog-async = "2.5.0" -eth2_ssz = "0.1.2" -eth2_ssz_derive = "0.1.0" -state_processing = { path = "../../consensus/state_processing" } -types = { path = "../../consensus/types" } -http = "0.2.1" -hyper = "0.13.5" -tokio = { version = "0.2.22", features = ["sync"] } -url = "2.1.1" -lazy_static = "1.4.0" -eth2_config = { path = "../../common/eth2_config" } -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -slot_clock = { path = "../../common/slot_clock" } -hex = "0.4.2" -parking_lot = "0.11.0" -futures = "0.3.5" -operation_pool = { path = "../operation_pool" } -environment = { path = "../../lighthouse/environment" } -uhttp_sse = "0.5.1" -bus = "2.2.3" -itertools = "0.9.0" -lighthouse_version = { path = "../../common/lighthouse_version" } - -[dev-dependencies] -assert_matches = "1.3.0" -remote_beacon_node = { path = "../../common/remote_beacon_node" } -node_test_rig = { path = "../../testing/node_test_rig" } -tree_hash = "0.1.0" - -[features] -fake_crypto = [] diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs deleted file mode 100644 index ad2688bb0..000000000 --- a/beacon_node/rest_api/src/beacon.rs +++ /dev/null @@ -1,499 +0,0 @@ -use crate::helpers::*; -use crate::validator::get_state_for_epoch; -use crate::Context; -use crate::{ApiError, UrlQuery}; -use beacon_chain::{ - observed_operations::ObservationOutcome, BeaconChain, BeaconChainTypes, StateSkipConfig, -}; -use futures::executor::block_on; -use hyper::body::Bytes; -use hyper::{Body, Request}; -use rest_types::{ - BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, - ValidatorRequest, ValidatorResponse, -}; -use std::io::Write; -use std::sync::Arc; - -use slog::error; -use types::{ - AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes, - RelativeEpoch, SignedBeaconBlockHash, Slot, -}; - -/// Returns a summary of the head of the beacon chain. -pub fn get_head( - ctx: Arc>, -) -> Result { - let beacon_chain = &ctx.beacon_chain; - let chain_head = beacon_chain.head()?; - - Ok(CanonicalHeadResponse { - slot: chain_head.beacon_state.slot, - block_root: chain_head.beacon_block_root, - state_root: chain_head.beacon_state_root, - finalized_slot: chain_head - .beacon_state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - finalized_block_root: chain_head.beacon_state.finalized_checkpoint.root, - justified_slot: chain_head - .beacon_state - .current_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - justified_block_root: chain_head.beacon_state.current_justified_checkpoint.root, - previous_justified_slot: chain_head - .beacon_state - .previous_justified_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()), - previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, - }) -} - -/// Return the list of heads of the beacon chain. -pub fn get_heads(ctx: Arc>) -> Vec { - ctx.beacon_chain - .heads() - .into_iter() - .map(|(beacon_block_root, beacon_block_slot)| HeadBeaconBlock { - beacon_block_root, - beacon_block_slot, - }) - .collect() -} - -/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_block( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let beacon_chain = &ctx.beacon_chain; - let query_params = ["root", "slot"]; - let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; - - let block_root = match (key.as_ref(), value) { - ("slot", value) => { - let target = parse_slot(&value)?; - - block_root_at_slot(beacon_chain, target)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for slot {:?}", - target - )) - })? - } - ("root", value) => parse_root(&value)?, - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), - }; - - let block = beacon_chain.store.get_block(&block_root)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for root {:?}", - block_root - )) - })?; - - Ok(BlockResponse { - root: block_root, - beacon_block: block, - }) -} - -/// HTTP handler to return a `SignedBeaconBlock` root at a given `slot`. -pub fn get_block_root( - req: Request>, - ctx: Arc>, -) -> Result { - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let target = parse_slot(&slot_string)?; - - block_root_at_slot(&ctx.beacon_chain, target)?.ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find SignedBeaconBlock for slot {:?}", - target - )) - }) -} - -fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Result { - let mut buffer = Vec::new(); - { - let mut sse_message = uhttp_sse::SseMessage::new(&mut buffer); - let untyped_hash: Hash256 = new_head_hash.into(); - write!(sse_message.data()?, "{:?}", untyped_hash)?; - } - let bytes: Bytes = buffer.into(); - Ok(bytes) -} - -pub fn stream_forks(ctx: Arc>) -> Result { - let mut events = ctx.events.lock().add_rx(); - let (mut sender, body) = Body::channel(); - std::thread::spawn(move || { - while let Ok(new_head_hash) = events.recv() { - let chunk = match make_sse_response_chunk(new_head_hash) { - Ok(chunk) => chunk, - Err(e) => { - error!(ctx.log, "Failed to make SSE chunk"; "error" => e.to_string()); - sender.abort(); - break; - } - }; - match block_on(sender.send_data(chunk)) { - Err(e) if e.is_closed() => break, - Err(e) => error!(ctx.log, "Couldn't stream piece {:?}", e), - Ok(_) => (), - } - } - }); - Ok(body) -} - -/// HTTP handler to which accepts a query string of a list of validator pubkeys and maps it to a -/// `ValidatorResponse`. -/// -/// This method is limited to as many `pubkeys` that can fit in a URL. See `post_validators` for -/// doing bulk requests. -pub fn get_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let validator_pubkeys = query - .all_of("validator_pubkeys")? - .iter() - .map(|validator_pubkey_str| parse_pubkey_bytes(validator_pubkey_str)) - .collect::, _>>()?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - validator_responses_by_pubkey(&ctx.beacon_chain, state_root_opt, validator_pubkeys) -} - -/// HTTP handler to return all validators, each as a `ValidatorResponse`. -pub fn get_all_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - - let validators = state.validators.clone(); - validators - .iter() - .map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone())) - .collect::, _>>() -} - -/// HTTP handler to return all active validators, each as a `ValidatorResponse`. -pub fn get_active_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) { - Some(parse_root(&value)?) - } else { - None - }; - - let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?; - - let validators = state.validators.clone(); - let current_epoch = state.current_epoch(); - - validators - .iter() - .filter(|validator| validator.is_active_at(current_epoch)) - .map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone())) - .collect::, _>>() -} - -/// HTTP handler to which accepts a `ValidatorRequest` and returns a `ValidatorResponse` for -/// each of the given `pubkeys`. When `state_root` is `None`, the canonical head is used. -/// -/// This method allows for a basically unbounded list of `pubkeys`, where as the `get_validators` -/// request is limited by the max number of pubkeys you can fit in a URL. -pub fn post_validators( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - serde_json::from_slice::(&req.into_body()) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorRequest: {:?}", - e - )) - }) - .and_then(|bulk_request| { - validator_responses_by_pubkey( - &ctx.beacon_chain, - bulk_request.state_root, - bulk_request.pubkeys, - ) - }) -} - -/// Returns either the state given by `state_root_opt`, or the canonical head state if it is -/// `None`. -fn get_state_from_root_opt( - beacon_chain: &BeaconChain, - state_root_opt: Option, -) -> Result, ApiError> { - if let Some(state_root) = state_root_opt { - beacon_chain - .get_state(&state_root, None) - .map_err(|e| { - ApiError::ServerError(format!( - "Database error when reading state root {}: {:?}", - state_root, e - )) - })? - .ok_or_else(|| ApiError::NotFound(format!("No state exists with root: {}", state_root))) - } else { - Ok(beacon_chain.head()?.beacon_state) - } -} - -/// Maps a vec of `validator_pubkey` to a vec of `ValidatorResponse`, using the state at the given -/// `state_root`. If `state_root.is_none()`, uses the canonial head state. -fn validator_responses_by_pubkey( - beacon_chain: &BeaconChain, - state_root_opt: Option, - validator_pubkeys: Vec, -) -> Result, ApiError> { - let mut state = get_state_from_root_opt(beacon_chain, state_root_opt)?; - - validator_pubkeys - .into_iter() - .map(|validator_pubkey| validator_response_by_pubkey(&mut state, validator_pubkey)) - .collect::, ApiError>>() -} - -/// Maps a `validator_pubkey` to a `ValidatorResponse`, using the given state. -/// -/// The provided `state` must have a fully up-to-date pubkey cache. -fn validator_response_by_pubkey( - state: &mut BeaconState, - validator_pubkey: PublicKeyBytes, -) -> Result { - let validator_index_opt = state - .get_validator_index(&validator_pubkey) - .map_err(|e| ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)))?; - - if let Some(validator_index) = validator_index_opt { - let balance = state.balances.get(validator_index).ok_or_else(|| { - ApiError::ServerError(format!("Invalid balances index: {:?}", validator_index)) - })?; - - let validator = state - .validators - .get(validator_index) - .ok_or_else(|| { - ApiError::ServerError(format!("Invalid validator index: {:?}", validator_index)) - })? - .clone(); - - Ok(ValidatorResponse { - pubkey: validator_pubkey, - validator_index: Some(validator_index), - balance: Some(*balance), - validator: Some(validator), - }) - } else { - Ok(ValidatorResponse { - pubkey: validator_pubkey, - validator_index: None, - balance: None, - validator: None, - }) - } -} - -/// HTTP handler -pub fn get_committees( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let mut state = - get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err(|e| { - ApiError::ServerError(format!("Failed to get state suitable for epoch: {:?}", e)) - })?; - - state - .build_committee_cache(relative_epoch, &ctx.beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - - Ok(state - .get_beacon_committees_at_epoch(relative_epoch) - .map_err(|e| ApiError::ServerError(format!("Unable to get all committees: {:?}", e)))? - .into_iter() - .map(|c| Committee { - slot: c.slot, - index: c.index, - committee: c.committee.to_vec(), - }) - .collect::>()) -} - -/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn get_state( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let head_state = ctx.beacon_chain.head()?.beacon_state; - - let (key, value) = match UrlQuery::from_request(&req) { - Ok(query) => { - // We have *some* parameters, just check them. - let query_params = ["root", "slot"]; - query.first_of(&query_params)? - } - Err(ApiError::BadRequest(_)) => { - // No parameters provided at all, use current slot. - (String::from("slot"), head_state.slot.to_string()) - } - Err(e) => { - return Err(e); - } - }; - - let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { - ("slot", value) => state_at_slot(&ctx.beacon_chain, parse_slot(&value)?)?, - ("root", value) => { - let root = &parse_root(&value)?; - - let state = ctx - .beacon_chain - .store - .get_state(root, None)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; - - (*root, state) - } - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), - }; - - Ok(StateResponse { - root, - beacon_state: state, - }) -} - -/// HTTP handler to return a `BeaconState` root at a given `slot`. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn get_state_root( - req: Request>, - ctx: Arc>, -) -> Result { - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let slot = parse_slot(&slot_string)?; - - state_root_at_slot(&ctx.beacon_chain, slot, StateSkipConfig::WithStateRoots) -} - -/// HTTP handler to return a `BeaconState` at the genesis block. -/// -/// This is an undocumented convenience method used during testing. For production, simply do a -/// state request at slot 0. -pub fn get_genesis_state( - ctx: Arc>, -) -> Result, ApiError> { - state_at_slot(&ctx.beacon_chain, Slot::new(0)).map(|(_root, state)| state) -} - -pub fn proposer_slashing( - req: Request>, - ctx: Arc>, -) -> Result { - let body = req.into_body(); - - serde_json::from_slice::(&body) - .map_err(|e| format!("Unable to parse JSON into ProposerSlashing: {:?}", e)) - .and_then(move |proposer_slashing| { - if ctx.beacon_chain.eth1_chain.is_some() { - let obs_outcome = ctx - .beacon_chain - .verify_proposer_slashing_for_gossip(proposer_slashing) - .map_err(|e| format!("Error while verifying proposer slashing: {:?}", e))?; - if let ObservationOutcome::New(verified_proposer_slashing) = obs_outcome { - ctx.beacon_chain - .import_proposer_slashing(verified_proposer_slashing); - Ok(()) - } else { - Err("Proposer slashing for that validator index already known".into()) - } - } else { - Err("Cannot insert proposer slashing on node without Eth1 connection.".to_string()) - } - }) - .map_err(ApiError::BadRequest)?; - - Ok(true) -} - -pub fn attester_slashing( - req: Request>, - ctx: Arc>, -) -> Result { - let body = req.into_body(); - serde_json::from_slice::>(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into AttesterSlashing: {:?}", - e - )) - }) - .and_then(move |attester_slashing| { - if ctx.beacon_chain.eth1_chain.is_some() { - ctx.beacon_chain - .verify_attester_slashing_for_gossip(attester_slashing) - .map_err(|e| format!("Error while verifying attester slashing: {:?}", e)) - .and_then(|outcome| { - if let ObservationOutcome::New(verified_attester_slashing) = outcome { - ctx.beacon_chain - .import_attester_slashing(verified_attester_slashing) - .map_err(|e| { - format!("Error while importing attester slashing: {:?}", e) - }) - } else { - Err("Attester slashing only covers already slashed indices".to_string()) - } - }) - .map_err(ApiError::BadRequest) - } else { - Err(ApiError::BadRequest( - "Cannot insert attester slashing on node without Eth1 connection.".to_string(), - )) - } - })?; - - Ok(true) -} diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs deleted file mode 100644 index 815fccfd0..000000000 --- a/beacon_node/rest_api/src/config.rs +++ /dev/null @@ -1,55 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::net::Ipv4Addr; - -/// Defines the encoding for the API. -#[derive(Clone, Serialize, Deserialize, Copy)] -pub enum ApiEncodingFormat { - JSON, - YAML, - SSZ, -} - -impl ApiEncodingFormat { - pub fn get_content_type(&self) -> &str { - match self { - ApiEncodingFormat::JSON => "application/json", - ApiEncodingFormat::YAML => "application/yaml", - ApiEncodingFormat::SSZ => "application/ssz", - } - } -} - -impl From<&str> for ApiEncodingFormat { - fn from(f: &str) -> ApiEncodingFormat { - match f { - "application/yaml" => ApiEncodingFormat::YAML, - "application/ssz" => ApiEncodingFormat::SSZ, - _ => ApiEncodingFormat::JSON, - } - } -} - -/// HTTP REST API Configuration -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - /// Enable the REST API server. - pub enabled: bool, - /// The IPv4 address the REST API HTTP server will listen on. - pub listen_address: Ipv4Addr, - /// The port the REST API HTTP server will listen on. - pub port: u16, - /// If something else than "", a 'Access-Control-Allow-Origin' header will be present in - /// responses. Put *, to allow any origin. - pub allow_origin: String, -} - -impl Default for Config { - fn default() -> Self { - Config { - enabled: false, - listen_address: Ipv4Addr::new(127, 0, 0, 1), - port: 5052, - allow_origin: "".to_string(), - } - } -} diff --git a/beacon_node/rest_api/src/consensus.rs b/beacon_node/rest_api/src/consensus.rs deleted file mode 100644 index 9df57f055..000000000 --- a/beacon_node/rest_api/src/consensus.rs +++ /dev/null @@ -1,126 +0,0 @@ -use crate::helpers::*; -use crate::{ApiError, Context, UrlQuery}; -use beacon_chain::BeaconChainTypes; -use hyper::Request; -use rest_types::{IndividualVotesRequest, IndividualVotesResponse}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatuses}; -use std::sync::Arc; -use types::EthSpec; - -/// The results of validators voting during an epoch. -/// -/// Provides information about the current and previous epochs. -#[derive(Serialize, Deserialize, Encode, Decode)] -pub struct VoteCount { - /// The total effective balance of all active validators during the _current_ epoch. - pub current_epoch_active_gwei: u64, - /// The total effective balance of all active validators during the _previous_ epoch. - pub previous_epoch_active_gwei: u64, - /// The total effective balance of all validators who attested during the _current_ epoch. - pub current_epoch_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _current_ epoch and - /// agreed with the state about the beacon block at the first slot of the _current_ epoch. - pub current_epoch_target_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch. - pub previous_epoch_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch and - /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. - pub previous_epoch_target_attesting_gwei: u64, - /// The total effective balance of all validators who attested during the _previous_ epoch and - /// agreed with the state about the beacon block at the time of attestation. - pub previous_epoch_head_attesting_gwei: u64, -} - -impl Into for TotalBalances { - fn into(self) -> VoteCount { - VoteCount { - current_epoch_active_gwei: self.current_epoch(), - previous_epoch_active_gwei: self.previous_epoch(), - current_epoch_attesting_gwei: self.current_epoch_attesters(), - current_epoch_target_attesting_gwei: self.current_epoch_target_attesters(), - previous_epoch_attesting_gwei: self.previous_epoch_attesters(), - previous_epoch_target_attesting_gwei: self.previous_epoch_target_attesters(), - previous_epoch_head_attesting_gwei: self.previous_epoch_head_attesters(), - } - } -} - -/// HTTP handler return a `VoteCount` for some given `Epoch`. -pub fn get_vote_count( - req: Request>, - ctx: Arc>, -) -> Result { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - // This is the last slot of the given epoch (one prior to the first slot of the next epoch). - let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - - let (_root, state) = state_at_slot(&ctx.beacon_chain, target_slot)?; - let spec = &ctx.beacon_chain.spec; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - - Ok(validator_statuses.total_balances.into()) -} - -pub fn post_individual_votes( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let body = req.into_body(); - - serde_json::from_slice::(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) - .and_then(move |body| { - let epoch = body.epoch; - - // This is the last slot of the given epoch (one prior to the first slot of the next epoch). - let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - - let (_root, mut state) = state_at_slot(&ctx.beacon_chain, target_slot)?; - let spec = &ctx.beacon_chain.spec; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - - body.pubkeys - .into_iter() - .map(|pubkey| { - let validator_index_opt = state.get_validator_index(&pubkey).map_err(|e| { - ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)) - })?; - - if let Some(validator_index) = validator_index_opt { - let vote = validator_statuses - .statuses - .get(validator_index) - .cloned() - .map(Into::into); - - Ok(IndividualVotesResponse { - epoch, - pubkey, - validator_index: Some(validator_index), - vote, - }) - } else { - Ok(IndividualVotesResponse { - epoch, - pubkey, - validator_index: None, - vote: None, - }) - } - }) - .collect::, _>>() - }) -} diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs deleted file mode 100644 index 66b5bd1a0..000000000 --- a/beacon_node/rest_api/src/helpers.rs +++ /dev/null @@ -1,260 +0,0 @@ -use crate::{ApiError, NetworkChannel}; -use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; -use bls::PublicKeyBytes; -use eth2_libp2p::PubsubMessage; -use itertools::process_results; -use network::NetworkMessage; -use ssz::Decode; -use store::iter::AncestorIter; -use types::{ - BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedBeaconBlock, Slot, -}; - -/// Parse a slot. -/// -/// E.g., `"1234"` -pub fn parse_slot(string: &str) -> Result { - string - .parse::() - .map(Slot::from) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e))) -} - -/// Parse an epoch. -/// -/// E.g., `"13"` -pub fn parse_epoch(string: &str) -> Result { - string - .parse::() - .map(Epoch::from) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse epoch: {:?}", e))) -} - -/// Parse a CommitteeIndex. -/// -/// E.g., `"18"` -pub fn parse_committee_index(string: &str) -> Result { - string - .parse::() - .map_err(|e| ApiError::BadRequest(format!("Unable to parse committee index: {:?}", e))) -} - -/// Parse an SSZ object from some hex-encoded bytes. -/// -/// E.g., A signature is `"0x0000000000000000000000000000000000000000000000000000000000000000"` -pub fn parse_hex_ssz_bytes(string: &str) -> Result { - const PREFIX: &str = "0x"; - - if string.starts_with(PREFIX) { - let trimmed = string.trim_start_matches(PREFIX); - let bytes = hex::decode(trimmed) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ hex: {:?}", e)))?; - T::from_ssz_bytes(&bytes) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ bytes: {:?}", e))) - } else { - Err(ApiError::BadRequest( - "Hex bytes must have a 0x prefix".to_string(), - )) - } -} - -/// Parse a root from a `0x` prefixed string. -/// -/// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"` -pub fn parse_root(string: &str) -> Result { - const PREFIX: &str = "0x"; - - if string.starts_with(PREFIX) { - let trimmed = string.trim_start_matches(PREFIX); - trimmed - .parse() - .map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e))) - } else { - Err(ApiError::BadRequest( - "Root must have a 0x prefix".to_string(), - )) - } -} - -/// Parse a PublicKey from a `0x` prefixed hex string -pub fn parse_pubkey_bytes(string: &str) -> Result { - const PREFIX: &str = "0x"; - if string.starts_with(PREFIX) { - let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX)) - .map_err(|e| ApiError::BadRequest(format!("Invalid hex string: {:?}", e)))?; - let pubkey = PublicKeyBytes::deserialize(pubkey_bytes.as_slice()).map_err(|e| { - ApiError::BadRequest(format!("Unable to deserialize public key: {:?}.", e)) - })?; - Ok(pubkey) - } else { - Err(ApiError::BadRequest( - "Public key must have a 0x prefix".to_string(), - )) - } -} - -/// Returns the root of the `SignedBeaconBlock` in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// May return a root for a previous slot, in the case of skip slots. -pub fn block_root_at_slot( - beacon_chain: &BeaconChain, - target: Slot, -) -> Result, ApiError> { - Ok(process_results( - beacon_chain.rev_iter_block_roots()?, - |iter| { - iter.take_while(|(_, slot)| *slot >= target) - .find(|(_, slot)| *slot == target) - .map(|(root, _)| root) - }, - )?) -} - -/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn state_at_slot( - beacon_chain: &BeaconChain, - slot: Slot, -) -> Result<(Hash256, BeaconState), ApiError> { - let head = beacon_chain.head()?; - - if head.beacon_state.slot == slot { - Ok((head.beacon_state_root, head.beacon_state)) - } else { - let root = state_root_at_slot(beacon_chain, slot, StateSkipConfig::WithStateRoots)?; - - let state: BeaconState = beacon_chain - .store - .get_state(&root, Some(slot))? - .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?; - - Ok((root, state)) - } -} - -/// Returns the root of the `BeaconState` in the canonical chain of `beacon_chain` at the given -/// `slot`, if possible. -/// -/// Will not return a state root if the request slot is in the future. Will return state roots -/// higher than the current head by skipping slots. -pub fn state_root_at_slot( - beacon_chain: &BeaconChain, - slot: Slot, - config: StateSkipConfig, -) -> Result { - let head_state = &beacon_chain.head()?.beacon_state; - let current_slot = beacon_chain - .slot() - .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; - - // There are four scenarios when obtaining a state for a given slot: - // - // 1. The request slot is in the future. - // 2. The request slot is the same as the best block (head) slot. - // 3. The request slot is prior to the head slot. - // 4. The request slot is later than the head slot. - if current_slot < slot { - // 1. The request slot is in the future. Reject the request. - // - // We could actually speculate about future state roots by skipping slots, however that's - // likely to cause confusion for API users. - Err(ApiError::BadRequest(format!( - "Requested slot {} is past the current slot {}", - slot, current_slot - ))) - } else if head_state.slot == slot { - // 2. The request slot is the same as the best block (head) slot. - // - // The head state root is stored in memory, return a reference. - Ok(beacon_chain.head()?.beacon_state_root) - } else if head_state.slot > slot { - // 3. The request slot is prior to the head slot. - // - // Iterate through the state roots on the head state to find the root for that - // slot. Once the root is found, load it from the database. - process_results( - head_state - .try_iter_ancestor_roots(beacon_chain.store.clone()) - .ok_or_else(|| { - ApiError::ServerError("Failed to create roots iterator".to_string()) - })?, - |mut iter| iter.find(|(_, s)| *s == slot).map(|(root, _)| root), - )? - .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at slot {}", slot))) - } else { - // 4. The request slot is later than the head slot. - // - // Use `per_slot_processing` to advance the head state to the present slot, - // assuming that all slots do not contain a block (i.e., they are skipped slots). - let mut state = beacon_chain.head()?.beacon_state; - let spec = &T::EthSpec::default_spec(); - - let skip_state_root = match config { - StateSkipConfig::WithStateRoots => None, - StateSkipConfig::WithoutStateRoots => Some(Hash256::zero()), - }; - - for _ in state.slot.as_u64()..slot.as_u64() { - // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - state_processing::per_slot_processing(&mut state, skip_state_root, spec)?; - } - - // Note: this is an expensive operation. Once the tree hash cache is implement it may be - // used here. - Ok(state.canonical_root()) - } -} - -pub fn publish_beacon_block_to_network( - chan: &NetworkChannel, - block: SignedBeaconBlock, -) -> Result<(), ApiError> { - // send the block via SSZ encoding - let messages = vec![PubsubMessage::BeaconBlock(Box::new(block))]; - - // Publish the block to the p2p network via gossipsub. - if let Err(e) = chan.send(NetworkMessage::Publish { messages }) { - return Err(ApiError::ServerError(format!( - "Unable to send new block to network: {:?}", - e - ))); - } - - Ok(()) -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn parse_root_works() { - assert_eq!( - parse_root("0x0000000000000000000000000000000000000000000000000000000000000000"), - Ok(Hash256::zero()) - ); - assert_eq!( - parse_root("0x000000000000000000000000000000000000000000000000000000000000002a"), - Ok(Hash256::from_low_u64_be(42)) - ); - assert!( - parse_root("0000000000000000000000000000000000000000000000000000000000000042").is_err() - ); - assert!(parse_root("0x").is_err()); - assert!(parse_root("0x00").is_err()); - } - - #[test] - fn parse_slot_works() { - assert_eq!(parse_slot("0"), Ok(Slot::new(0))); - assert_eq!(parse_slot("42"), Ok(Slot::new(42))); - assert_eq!(parse_slot("10000000"), Ok(Slot::new(10_000_000))); - assert!(parse_slot("cats").is_err()); - } -} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs deleted file mode 100644 index 405e08e21..000000000 --- a/beacon_node/rest_api/src/lib.rs +++ /dev/null @@ -1,127 +0,0 @@ -#[macro_use] -extern crate lazy_static; -mod router; -extern crate network as client_network; - -mod beacon; -pub mod config; -mod consensus; -mod helpers; -mod lighthouse; -mod metrics; -mod node; -mod url_query; -mod validator; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bus::Bus; -use client_network::NetworkMessage; -pub use config::ApiEncodingFormat; -use eth2_config::Eth2Config; -use eth2_libp2p::NetworkGlobals; -use futures::future::TryFutureExt; -use hyper::server::conn::AddrStream; -use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Request, Server}; -use parking_lot::Mutex; -use rest_types::ApiError; -use slog::{info, warn}; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::sync::Arc; -use tokio::sync::mpsc; -use types::SignedBeaconBlockHash; -use url_query::UrlQuery; - -pub use crate::helpers::parse_pubkey_bytes; -pub use config::Config; -pub use router::Context; - -pub type NetworkChannel = mpsc::UnboundedSender>; - -pub struct NetworkInfo { - pub network_globals: Arc>, - pub network_chan: NetworkChannel, -} - -// Allowing more than 7 arguments. -#[allow(clippy::too_many_arguments)] -pub fn start_server( - executor: environment::TaskExecutor, - config: &Config, - beacon_chain: Arc>, - network_info: NetworkInfo, - db_path: PathBuf, - freezer_db_path: PathBuf, - eth2_config: Eth2Config, - events: Arc>>, -) -> Result { - let log = executor.log(); - let eth2_config = Arc::new(eth2_config); - - let context = Arc::new(Context { - executor: executor.clone(), - config: config.clone(), - beacon_chain, - network_globals: network_info.network_globals.clone(), - network_chan: network_info.network_chan, - eth2_config, - log: log.clone(), - db_path, - freezer_db_path, - events, - }); - - // Define the function that will build the request handler. - let make_service = make_service_fn(move |_socket: &AddrStream| { - let ctx = context.clone(); - - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request| { - router::on_http_request(req, ctx.clone()) - })) - } - }); - - let bind_addr = (config.listen_address, config.port).into(); - let server = Server::bind(&bind_addr).serve(make_service); - - // Determine the address the server is actually listening on. - // - // This may be different to `bind_addr` if bind port was 0 (this allows the OS to choose a free - // port). - let actual_listen_addr = server.local_addr(); - - // Build a channel to kill the HTTP server. - let exit = executor.exit(); - let inner_log = log.clone(); - let server_exit = async move { - let _ = exit.await; - info!(inner_log, "HTTP service shutdown"); - }; - - // Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered. - let inner_log = log.clone(); - let server_future = server - .with_graceful_shutdown(async { - server_exit.await; - }) - .map_err(move |e| { - warn!( - inner_log, - "HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e) - ) - }) - .unwrap_or_else(|_| ()); - - info!( - log, - "HTTP API started"; - "address" => format!("{}", actual_listen_addr.ip()), - "port" => actual_listen_addr.port(), - ); - - executor.spawn_without_exit(server_future, "http"); - - Ok(actual_listen_addr) -} diff --git a/beacon_node/rest_api/src/lighthouse.rs b/beacon_node/rest_api/src/lighthouse.rs deleted file mode 100644 index 4d0fae926..000000000 --- a/beacon_node/rest_api/src/lighthouse.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! This contains a collection of lighthouse specific HTTP endpoints. - -use crate::{ApiError, Context}; -use beacon_chain::BeaconChainTypes; -use eth2_libp2p::PeerInfo; -use serde::Serialize; -use std::sync::Arc; -use types::EthSpec; - -/// Returns all known peers and corresponding information -pub fn peers(ctx: Arc>) -> Result>, ApiError> { - Ok(ctx - .network_globals - .peers - .read() - .peers() - .map(|(peer_id, peer_info)| Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect()) -} - -/// Returns all known connected peers and their corresponding information -pub fn connected_peers( - ctx: Arc>, -) -> Result>, ApiError> { - Ok(ctx - .network_globals - .peers - .read() - .connected_peers() - .map(|(peer_id, peer_info)| Peer { - peer_id: peer_id.to_string(), - peer_info: peer_info.clone(), - }) - .collect()) -} - -/// Information returned by `peers` and `connected_peers`. -#[derive(Clone, Debug, Serialize)] -#[serde(bound = "T: EthSpec")] -pub struct Peer { - /// The Peer's ID - peer_id: String, - /// The PeerInfo associated with the peer. - peer_info: PeerInfo, -} diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs deleted file mode 100644 index bd5615de3..000000000 --- a/beacon_node/rest_api/src/node.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::{ApiError, Context}; -use beacon_chain::BeaconChainTypes; -use eth2_libp2p::types::SyncState; -use rest_types::{SyncingResponse, SyncingStatus}; -use std::sync::Arc; -use types::Slot; - -/// Returns a syncing status. -pub fn syncing(ctx: Arc>) -> Result { - let current_slot = ctx - .beacon_chain - .head_info() - .map_err(|e| ApiError::ServerError(format!("Unable to read head slot: {:?}", e)))? - .slot; - - let (starting_slot, highest_slot) = match ctx.network_globals.sync_state() { - SyncState::SyncingFinalized { - start_slot, - head_slot, - .. - } - | SyncState::SyncingHead { - start_slot, - head_slot, - } => (start_slot, head_slot), - SyncState::Synced | SyncState::Stalled => (Slot::from(0u64), current_slot), - }; - - let sync_status = SyncingStatus { - starting_slot, - current_slot, - highest_slot, - }; - - Ok(SyncingResponse { - is_syncing: ctx.network_globals.is_syncing(), - sync_status, - }) -} diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs deleted file mode 100644 index bed7ba77a..000000000 --- a/beacon_node/rest_api/src/router.rs +++ /dev/null @@ -1,322 +0,0 @@ -use crate::{ - beacon, config::Config, consensus, lighthouse, metrics, node, validator, NetworkChannel, -}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bus::Bus; -use environment::TaskExecutor; -use eth2_config::Eth2Config; -use eth2_libp2p::{NetworkGlobals, PeerId}; -use hyper::header::HeaderValue; -use hyper::{Body, Method, Request, Response}; -use lighthouse_version::version_with_platform; -use operation_pool::PersistedOperationPool; -use parking_lot::Mutex; -use rest_types::{ApiError, Handler, Health}; -use slog::debug; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::Instant; -use types::{EthSpec, SignedBeaconBlockHash}; - -pub struct Context { - pub executor: TaskExecutor, - pub config: Config, - pub beacon_chain: Arc>, - pub network_globals: Arc>, - pub network_chan: NetworkChannel, - pub eth2_config: Arc, - pub log: slog::Logger, - pub db_path: PathBuf, - pub freezer_db_path: PathBuf, - pub events: Arc>>, -} - -pub async fn on_http_request( - req: Request, - ctx: Arc>, -) -> Result, ApiError> { - let path = req.uri().path().to_string(); - - let _timer = metrics::start_timer_vec(&metrics::BEACON_HTTP_API_TIMES_TOTAL, &[&path]); - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_REQUESTS_TOTAL, &[&path]); - - let received_instant = Instant::now(); - let log = ctx.log.clone(); - let allow_origin = ctx.config.allow_origin.clone(); - - match route(req, ctx).await { - Ok(mut response) => { - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_SUCCESS_TOTAL, &[&path]); - - if allow_origin != "" { - let headers = response.headers_mut(); - headers.insert( - hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN, - HeaderValue::from_str(&allow_origin)?, - ); - headers.insert(hyper::header::VARY, HeaderValue::from_static("Origin")); - } - - debug!( - log, - "HTTP API request successful"; - "path" => path, - "duration_ms" => Instant::now().duration_since(received_instant).as_millis() - ); - Ok(response) - } - - Err(error) => { - metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_ERROR_TOTAL, &[&path]); - - debug!( - log, - "HTTP API request failure"; - "path" => path, - "duration_ms" => Instant::now().duration_since(received_instant).as_millis() - ); - Ok(error.into()) - } - } -} - -async fn route( - req: Request, - ctx: Arc>, -) -> Result, ApiError> { - let path = req.uri().path().to_string(); - let ctx = ctx.clone(); - let method = req.method().clone(); - let executor = ctx.executor.clone(); - let handler = Handler::new(req, ctx, executor)?; - - match (method, path.as_ref()) { - (Method::GET, "/node/version") => handler - .static_value(version_with_platform()) - .await? - .serde_encodings(), - (Method::GET, "/node/health") => handler - .static_value(Health::observe().map_err(ApiError::ServerError)?) - .await? - .serde_encodings(), - (Method::GET, "/node/syncing") => handler - .allow_body() - .in_blocking_task(|_, ctx| node::syncing(ctx)) - .await? - .serde_encodings(), - (Method::GET, "/network/enr") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.local_enr().to_base64())) - .await? - .serde_encodings(), - (Method::GET, "/network/peer_count") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.connected_peers())) - .await? - .serde_encodings(), - (Method::GET, "/network/peer_id") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.local_peer_id().to_base58())) - .await? - .serde_encodings(), - (Method::GET, "/network/peers") => handler - .in_blocking_task(|_, ctx| { - Ok(ctx - .network_globals - .peers - .read() - .connected_peer_ids() - .map(PeerId::to_string) - .collect::>()) - }) - .await? - .serde_encodings(), - (Method::GET, "/network/listen_port") => handler - .in_core_task(|_, ctx| Ok(ctx.network_globals.listen_port_tcp())) - .await? - .serde_encodings(), - (Method::GET, "/network/listen_addresses") => handler - .in_blocking_task(|_, ctx| Ok(ctx.network_globals.listen_multiaddrs())) - .await? - .serde_encodings(), - (Method::GET, "/beacon/head") => handler - .in_blocking_task(|_, ctx| beacon::get_head(ctx)) - .await? - .all_encodings(), - (Method::GET, "/beacon/heads") => handler - .in_blocking_task(|_, ctx| Ok(beacon::get_heads(ctx))) - .await? - .all_encodings(), - (Method::GET, "/beacon/block") => handler - .in_blocking_task(beacon::get_block) - .await? - .all_encodings(), - (Method::GET, "/beacon/block_root") => handler - .in_blocking_task(beacon::get_block_root) - .await? - .all_encodings(), - (Method::GET, "/beacon/fork") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.fork)) - .await? - .all_encodings(), - (Method::GET, "/beacon/fork/stream") => { - handler.sse_stream(|_, ctx| beacon::stream_forks(ctx)).await - } - (Method::GET, "/beacon/genesis_time") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_time)) - .await? - .all_encodings(), - (Method::GET, "/beacon/genesis_validators_root") => handler - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_validators_root)) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators") => handler - .in_blocking_task(beacon::get_validators) - .await? - .all_encodings(), - (Method::POST, "/beacon/validators") => handler - .allow_body() - .in_blocking_task(beacon::post_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators/all") => handler - .in_blocking_task(beacon::get_all_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/validators/active") => handler - .in_blocking_task(beacon::get_active_validators) - .await? - .all_encodings(), - (Method::GET, "/beacon/state") => handler - .in_blocking_task(beacon::get_state) - .await? - .all_encodings(), - (Method::GET, "/beacon/state_root") => handler - .in_blocking_task(beacon::get_state_root) - .await? - .all_encodings(), - (Method::GET, "/beacon/state/genesis") => handler - .in_blocking_task(|_, ctx| beacon::get_genesis_state(ctx)) - .await? - .all_encodings(), - (Method::GET, "/beacon/committees") => handler - .in_blocking_task(beacon::get_committees) - .await? - .all_encodings(), - (Method::POST, "/beacon/proposer_slashing") => handler - .allow_body() - .in_blocking_task(beacon::proposer_slashing) - .await? - .serde_encodings(), - (Method::POST, "/beacon/attester_slashing") => handler - .allow_body() - .in_blocking_task(beacon::attester_slashing) - .await? - .serde_encodings(), - (Method::POST, "/validator/duties") => handler - .allow_body() - .in_blocking_task(validator::post_validator_duties) - .await? - .serde_encodings(), - (Method::POST, "/validator/subscribe") => handler - .allow_body() - .in_blocking_task(validator::post_validator_subscriptions) - .await? - .serde_encodings(), - (Method::GET, "/validator/duties/all") => handler - .in_blocking_task(validator::get_all_validator_duties) - .await? - .serde_encodings(), - (Method::GET, "/validator/duties/active") => handler - .in_blocking_task(validator::get_active_validator_duties) - .await? - .serde_encodings(), - (Method::GET, "/validator/block") => handler - .in_blocking_task(validator::get_new_beacon_block) - .await? - .serde_encodings(), - (Method::POST, "/validator/block") => handler - .allow_body() - .in_blocking_task(validator::publish_beacon_block) - .await? - .serde_encodings(), - (Method::GET, "/validator/attestation") => handler - .in_blocking_task(validator::get_new_attestation) - .await? - .serde_encodings(), - (Method::GET, "/validator/aggregate_attestation") => handler - .in_blocking_task(validator::get_aggregate_attestation) - .await? - .serde_encodings(), - (Method::POST, "/validator/attestations") => handler - .allow_body() - .in_blocking_task(validator::publish_attestations) - .await? - .serde_encodings(), - (Method::POST, "/validator/aggregate_and_proofs") => handler - .allow_body() - .in_blocking_task(validator::publish_aggregate_and_proofs) - .await? - .serde_encodings(), - (Method::GET, "/consensus/global_votes") => handler - .allow_body() - .in_blocking_task(consensus::get_vote_count) - .await? - .serde_encodings(), - (Method::POST, "/consensus/individual_votes") => handler - .allow_body() - .in_blocking_task(consensus::post_individual_votes) - .await? - .serde_encodings(), - (Method::GET, "/spec") => handler - // TODO: this clone is not ideal. - .in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.spec.clone())) - .await? - .serde_encodings(), - (Method::GET, "/spec/slots_per_epoch") => handler - .static_value(T::EthSpec::slots_per_epoch()) - .await? - .serde_encodings(), - (Method::GET, "/spec/eth2_config") => handler - // TODO: this clone is not ideal. - .in_blocking_task(|_, ctx| Ok(ctx.eth2_config.as_ref().clone())) - .await? - .serde_encodings(), - (Method::GET, "/advanced/fork_choice") => handler - .in_blocking_task(|_, ctx| { - Ok(ctx - .beacon_chain - .fork_choice - .read() - .proto_array() - .core_proto_array() - .clone()) - }) - .await? - .serde_encodings(), - (Method::GET, "/advanced/operation_pool") => handler - .in_blocking_task(|_, ctx| { - Ok(PersistedOperationPool::from_operation_pool( - &ctx.beacon_chain.op_pool, - )) - }) - .await? - .serde_encodings(), - (Method::GET, "/metrics") => handler - .in_blocking_task(|_, ctx| metrics::get_prometheus(ctx)) - .await? - .text_encoding(), - (Method::GET, "/lighthouse/syncing") => handler - .in_blocking_task(|_, ctx| Ok(ctx.network_globals.sync_state())) - .await? - .serde_encodings(), - (Method::GET, "/lighthouse/peers") => handler - .in_blocking_task(|_, ctx| lighthouse::peers(ctx)) - .await? - .serde_encodings(), - (Method::GET, "/lighthouse/connected_peers") => handler - .in_blocking_task(|_, ctx| lighthouse::connected_peers(ctx)) - .await? - .serde_encodings(), - _ => Err(ApiError::NotFound( - "Request path and/or method not found.".to_owned(), - )), - } -} diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs deleted file mode 100644 index fee0cf437..000000000 --- a/beacon_node/rest_api/src/url_query.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::helpers::{parse_committee_index, parse_epoch, parse_hex_ssz_bytes, parse_slot}; -use crate::ApiError; -use hyper::Request; -use types::{AttestationData, CommitteeIndex, Epoch, Signature, Slot}; - -/// Provides handy functions for parsing the query parameters of a URL. - -#[derive(Clone, Copy)] -pub struct UrlQuery<'a>(url::form_urlencoded::Parse<'a>); - -impl<'a> UrlQuery<'a> { - /// Instantiate from an existing `Request`. - /// - /// Returns `Err` if `req` does not contain any query parameters. - pub fn from_request(req: &'a Request) -> Result { - let query_str = req.uri().query().unwrap_or_else(|| ""); - - Ok(UrlQuery(url::form_urlencoded::parse(query_str.as_bytes()))) - } - - /// Returns the first `(key, value)` pair found where the `key` is in `keys`. - /// - /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn first_of(mut self, keys: &[&str]) -> Result<(String, String), ApiError> { - self.0 - .find(|(key, _value)| keys.contains(&&**key)) - .map(|(key, value)| (key.into_owned(), value.into_owned())) - .ok_or_else(|| { - ApiError::BadRequest(format!( - "URL query must be valid and contain at least one of the following keys: {:?}", - keys - )) - }) - } - - /// Returns the first `(key, value)` pair found where the `key` is in `keys`, if any. - /// - /// Returns `None` if no match is found. - pub fn first_of_opt(mut self, keys: &[&str]) -> Option<(String, String)> { - self.0 - .find(|(key, _value)| keys.contains(&&**key)) - .map(|(key, value)| (key.into_owned(), value.into_owned())) - } - - /// Returns the value for `key`, if and only if `key` is the only key present in the query - /// parameters. - pub fn only_one(self, key: &str) -> Result { - let queries: Vec<_> = self - .0 - .map(|(k, v)| (k.into_owned(), v.into_owned())) - .collect(); - - if queries.len() == 1 { - let (first_key, first_value) = &queries[0]; // Must have 0 index if len is 1. - if first_key == key { - Ok(first_value.to_string()) - } else { - Err(ApiError::BadRequest(format!( - "Only the {} query parameter is supported", - key - ))) - } - } else { - Err(ApiError::BadRequest(format!( - "Only one query parameter is allowed, {} supplied", - queries.len() - ))) - } - } - - /// Returns a vector of all values present where `key` is in `keys - /// - /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn all_of(self, key: &str) -> Result, ApiError> { - let queries: Vec<_> = self - .0 - .filter_map(|(k, v)| { - if k.eq(key) { - Some(v.into_owned()) - } else { - None - } - }) - .collect(); - Ok(queries) - } - - /// Returns the value of the first occurrence of the `epoch` key. - pub fn epoch(self) -> Result { - self.first_of(&["epoch"]) - .and_then(|(_key, value)| parse_epoch(&value)) - } - - /// Returns the value of the first occurrence of the `slot` key. - pub fn slot(self) -> Result { - self.first_of(&["slot"]) - .and_then(|(_key, value)| parse_slot(&value)) - } - - /// Returns the value of the first occurrence of the `committee_index` key. - pub fn committee_index(self) -> Result { - self.first_of(&["committee_index"]) - .and_then(|(_key, value)| parse_committee_index(&value)) - } - - /// Returns the value of the first occurrence of the `randao_reveal` key. - pub fn randao_reveal(self) -> Result { - self.first_of(&["randao_reveal"]) - .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) - } - - /// Returns the value of the first occurrence of the `attestation_data` key. - pub fn attestation_data(self) -> Result { - self.first_of(&["attestation_data"]) - .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn only_one() { - let get_result = |addr: &str, key: &str| -> Result { - UrlQuery(url::Url::parse(addr).unwrap().query_pairs()).only_one(key) - }; - - assert_eq!(get_result("http://cat.io/?a=42", "a"), Ok("42".to_string())); - assert!(get_result("http://cat.io/?a=42", "b").is_err()); - assert!(get_result("http://cat.io/?a=42&b=12", "a").is_err()); - assert!(get_result("http://cat.io/", "").is_err()); - } - - #[test] - fn first_of() { - let url = url::Url::parse("http://lighthouse.io/cats?a=42&b=12&c=100").unwrap(); - let get_query = || UrlQuery(url.query_pairs()); - - assert_eq!( - get_query().first_of(&["a"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "b", "c"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "a", "a"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["a", "b", "c"]), - Ok(("a".to_string(), "42".to_string())) - ); - assert_eq!( - get_query().first_of(&["b", "c"]), - Ok(("b".to_string(), "12".to_string())) - ); - assert_eq!( - get_query().first_of(&["c"]), - Ok(("c".to_string(), "100".to_string())) - ); - assert!(get_query().first_of(&["nothing"]).is_err()); - } -} diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs deleted file mode 100644 index 49342ddaa..000000000 --- a/beacon_node/rest_api/src/validator.rs +++ /dev/null @@ -1,747 +0,0 @@ -use crate::helpers::{parse_hex_ssz_bytes, publish_beacon_block_to_network}; -use crate::{ApiError, Context, NetworkChannel, UrlQuery}; -use beacon_chain::{ - attestation_verification::Error as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - BlockError, ForkChoiceError, StateSkipConfig, -}; -use bls::PublicKeyBytes; -use eth2_libp2p::PubsubMessage; -use hyper::Request; -use network::NetworkMessage; -use rest_types::{ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorSubscription}; -use slog::{error, info, trace, warn, Logger}; -use std::sync::Arc; -use types::beacon_state::EthSpec; -use types::{ - Attestation, AttestationData, BeaconBlock, BeaconState, Epoch, RelativeEpoch, SelectionProof, - SignedAggregateAndProof, SignedBeaconBlock, SubnetId, -}; - -/// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This -/// method allows for collecting bulk sets of validator duties without risking exceeding the max -/// URL length with query pairs. -pub fn post_validator_duties( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let body = req.into_body(); - - serde_json::from_slice::(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorDutiesRequest: {:?}", - e - )) - }) - .and_then(|bulk_request| { - return_validator_duties( - &ctx.beacon_chain.clone(), - bulk_request.epoch, - bulk_request.pubkeys.into_iter().map(Into::into).collect(), - ) - }) -} - -/// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to -/// organise peer discovery and topic subscription for known validators. -pub fn post_validator_subscriptions( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to parse JSON into ValidatorSubscriptions: {:?}", - e - )) - }) - .and_then(move |subscriptions: Vec| { - ctx.network_chan - .send(NetworkMessage::Subscribe { subscriptions }) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to subscriptions to the network: {:?}", - e - )) - })?; - Ok(()) - }) -} - -/// HTTP Handler to retrieve all validator duties for the given epoch. -pub fn get_all_validator_duties( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let validator_pubkeys = state - .validators - .iter() - .map(|validator| validator.pubkey.clone()) - .collect(); - - return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) -} - -/// HTTP Handler to retrieve all active validator duties for the given epoch. -pub fn get_active_validator_duties( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let epoch = query.epoch()?; - - let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let validator_pubkeys = state - .validators - .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())) - .map(|validator| validator.pubkey.clone()) - .collect(); - - return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys) -} - -/// Helper function to return the state that can be used to determine the duties for some `epoch`. -pub fn get_state_for_epoch( - beacon_chain: &BeaconChain, - epoch: Epoch, - config: StateSkipConfig, -) -> Result, ApiError> { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let head = beacon_chain.head()?; - let current_epoch = beacon_chain.epoch()?; - let head_epoch = head.beacon_state.current_epoch(); - - if head_epoch == current_epoch && RelativeEpoch::from_epoch(current_epoch, epoch).is_ok() { - Ok(head.beacon_state) - } else { - // If epoch is ahead of current epoch, then it should be a "next epoch" request for - // attestation duties. So, go to the start slot of the epoch prior to that, - // which should be just the next wall-clock epoch. - let slot = if epoch > current_epoch { - (epoch - 1).start_slot(slots_per_epoch) - } - // Otherwise, go to the start of the request epoch. - else { - epoch.start_slot(slots_per_epoch) - }; - - beacon_chain.state_at_slot(slot, config).map_err(|e| { - ApiError::ServerError(format!("Unable to load state for epoch {}: {:?}", epoch, e)) - }) - } -} - -/// Helper function to get the duties for some `validator_pubkeys` in some `epoch`. -fn return_validator_duties( - beacon_chain: &BeaconChain, - epoch: Epoch, - validator_pubkeys: Vec, -) -> Result, ApiError> { - let mut state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; - - let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) - .map_err(|_| ApiError::ServerError(String::from("Loaded state is in the wrong epoch")))?; - - state - .build_committee_cache(relative_epoch, &beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; - - // Get a list of all validators for this epoch. - // - // Used for quickly determining the slot for a proposer. - let validator_proposers = if epoch == state.current_epoch() { - Some( - epoch - .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| { - state - .get_beacon_proposer_index(slot, &beacon_chain.spec) - .map(|i| (i, slot)) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to get proposer index for validator: {:?}", - e - )) - }) - }) - .collect::, _>>()?, - ) - } else { - None - }; - - validator_pubkeys - .into_iter() - .map(|validator_pubkey| { - // The `beacon_chain` can return a validator index that does not exist in all states. - // Therefore, we must check to ensure that the validator index is valid for our - // `state`. - let validator_index = beacon_chain - .validator_index(&validator_pubkey) - .map_err(|e| { - ApiError::ServerError(format!("Unable to get validator index: {:?}", e)) - })? - .filter(|i| *i < state.validators.len()); - - if let Some(validator_index) = validator_index { - let duties = state - .get_attestation_duties(validator_index, relative_epoch) - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to obtain attestation duties: {:?}", - e - )) - })?; - - let committee_count_at_slot = duties - .map(|d| state.get_committee_count_at_slot(d.slot)) - .transpose() - .map_err(|e| { - ApiError::ServerError(format!( - "Unable to find committee count at slot: {:?}", - e - )) - })?; - - let aggregator_modulo = duties - .map(|duties| SelectionProof::modulo(duties.committee_len, &beacon_chain.spec)) - .transpose() - .map_err(|e| { - ApiError::ServerError(format!("Unable to find modulo: {:?}", e)) - })?; - - let block_proposal_slots = validator_proposers.as_ref().map(|proposers| { - proposers - .iter() - .filter(|(i, _slot)| validator_index == *i) - .map(|(_i, slot)| *slot) - .collect() - }); - - Ok(ValidatorDutyBytes { - validator_pubkey, - validator_index: Some(validator_index as u64), - attestation_slot: duties.map(|d| d.slot), - attestation_committee_index: duties.map(|d| d.index), - committee_count_at_slot, - attestation_committee_position: duties.map(|d| d.committee_position), - block_proposal_slots, - aggregator_modulo, - }) - } else { - Ok(ValidatorDutyBytes { - validator_pubkey, - validator_index: None, - attestation_slot: None, - attestation_committee_index: None, - attestation_committee_position: None, - block_proposal_slots: None, - committee_count_at_slot: None, - aggregator_modulo: None, - }) - } - }) - .collect::, ApiError>>() -} - -/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. -pub fn get_new_beacon_block( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let slot = query.slot()?; - let randao_reveal = query.randao_reveal()?; - - let validator_graffiti = if let Some((_key, value)) = query.first_of_opt(&["graffiti"]) { - Some(parse_hex_ssz_bytes(&value)?) - } else { - None - }; - - let (new_block, _state) = ctx - .beacon_chain - .produce_block(randao_reveal, slot, validator_graffiti) - .map_err(|e| { - error!( - ctx.log, - "Error whilst producing block"; - "error" => format!("{:?}", e) - ); - - ApiError::ServerError(format!( - "Beacon node is not able to produce a block: {:?}", - e - )) - })?; - - Ok(new_block) -} - -/// HTTP Handler to publish a SignedBeaconBlock, which has been signed by a validator. -pub fn publish_beacon_block( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body).map_err(|e| { - ApiError::BadRequest(format!("Unable to parse JSON into SignedBeaconBlock: {:?}", e)) - }) - .and_then(move |block: SignedBeaconBlock| { - let slot = block.slot(); - match ctx.beacon_chain.process_block(block.clone()) { - Ok(block_root) => { - // Block was processed, publish via gossipsub - info!( - ctx.log, - "Block from local validator"; - "block_root" => format!("{}", block_root), - "block_slot" => slot, - ); - - publish_beacon_block_to_network::(&ctx.network_chan, block)?; - - // Run the fork choice algorithm and enshrine a new canonical head, if - // found. - // - // The new head may or may not be the block we just received. - if let Err(e) = ctx.beacon_chain.fork_choice() { - error!( - ctx.log, - "Failed to find beacon chain head"; - "error" => format!("{:?}", e) - ); - } else { - // In the best case, validators should produce blocks that become the - // head. - // - // Potential reasons this may not be the case: - // - // - A quick re-org between block produce and publish. - // - Excessive time between block produce and publish. - // - A validator is using another beacon node to produce blocks and - // submitting them here. - if ctx.beacon_chain.head()?.beacon_block_root != block_root { - warn!( - ctx.log, - "Block from validator is not head"; - "desc" => "potential re-org", - ); - - } - } - - Ok(()) - } - Err(BlockError::BeaconChainError(e)) => { - error!( - ctx.log, - "Error whilst processing block"; - "error" => format!("{:?}", e) - ); - - Err(ApiError::ServerError(format!( - "Error while processing block: {:?}", - e - ))) - } - Err(other) => { - warn!( - ctx.log, - "Invalid block from local validator"; - "outcome" => format!("{:?}", other) - ); - - Err(ApiError::ProcessingError(format!( - "The SignedBeaconBlock could not be processed and has not been published: {:?}", - other - ))) - } - } - }) -} - -/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. -pub fn get_new_attestation( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let slot = query.slot()?; - let index = query.committee_index()?; - - ctx.beacon_chain - .produce_unaggregated_attestation(slot, index) - .map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e))) -} - -/// HTTP Handler to retrieve the aggregate attestation for a slot -pub fn get_aggregate_attestation( - req: Request>, - ctx: Arc>, -) -> Result, ApiError> { - let query = UrlQuery::from_request(&req)?; - - let attestation_data = query.attestation_data()?; - - match ctx - .beacon_chain - .get_aggregated_attestation(&attestation_data) - { - Ok(Some(attestation)) => Ok(attestation), - Ok(None) => Err(ApiError::NotFound(format!( - "No matching aggregate attestation for slot {:?} is known in slot {:?}", - attestation_data.slot, - ctx.beacon_chain.slot() - ))), - Err(e) => Err(ApiError::ServerError(format!( - "Unable to obtain attestation: {:?}", - e - ))), - } -} - -/// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators. -pub fn publish_attestations( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let bytes = req.into_body(); - - serde_json::from_slice(&bytes) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of attestations: {:?}", - e - )) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map( - move |attestations: Vec<(Attestation, SubnetId)>| { - attestations - .into_iter() - .enumerate() - .map(|(i, (attestation, subnet_id))| { - process_unaggregated_attestation( - &ctx.beacon_chain, - ctx.network_chan.clone(), - attestation, - subnet_id, - i, - &ctx.log, - ) - }) - .collect::>>() - }, - ) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) - .map(|_| ()) -} - -/// Processes an unaggregrated attestation that was included in a list of attestations with the -/// index `i`. -#[allow(clippy::redundant_clone)] // false positives in this function. -fn process_unaggregated_attestation( - beacon_chain: &BeaconChain, - network_chan: NetworkChannel, - attestation: Attestation, - subnet_id: SubnetId, - i: usize, - log: &Logger, -) -> Result<(), ApiError> { - let data = &attestation.data.clone(); - - // Verify that the attestation is valid to included on the gossip network. - let verified_attestation = beacon_chain - .verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) - .map_err(|e| { - handle_attestation_error( - e, - &format!("unaggregated attestation {} failed gossip verification", i), - data, - log, - ) - })?; - - // Publish the attestation to the network - if let Err(e) = network_chan.send(NetworkMessage::Publish { - messages: vec![PubsubMessage::Attestation(Box::new(( - subnet_id, - attestation, - )))], - }) { - return Err(ApiError::ServerError(format!( - "Unable to send unaggregated attestation {} to network: {:?}", - i, e - ))); - } - - beacon_chain - .apply_attestation_to_fork_choice(&verified_attestation) - .map_err(|e| { - handle_fork_choice_error( - e, - &format!( - "unaggregated attestation {} was unable to be added to fork choice", - i - ), - data, - log, - ) - })?; - - beacon_chain - .add_to_naive_aggregation_pool(verified_attestation) - .map_err(|e| { - handle_attestation_error( - e, - &format!( - "unaggregated attestation {} was unable to be added to aggregation pool", - i - ), - data, - log, - ) - })?; - - Ok(()) -} - -/// HTTP Handler to publish an Attestation, which has been signed by a validator. -pub fn publish_aggregate_and_proofs( - req: Request>, - ctx: Arc>, -) -> Result<(), ApiError> { - let body = req.into_body(); - - serde_json::from_slice(&body) - .map_err(|e| { - ApiError::BadRequest(format!( - "Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}", - e - )) - }) - // Process all of the aggregates _without_ exiting early if one fails. - .map( - move |signed_aggregates: Vec>| { - signed_aggregates - .into_iter() - .enumerate() - .map(|(i, signed_aggregate)| { - process_aggregated_attestation( - &ctx.beacon_chain, - ctx.network_chan.clone(), - signed_aggregate, - i, - &ctx.log, - ) - }) - .collect::>>() - }, - ) - // Iterate through all the results and return on the first `Err`. - // - // Note: this will only provide info about the _first_ failure, not all failures. - .and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result)) -} - -/// Processes an aggregrated attestation that was included in a list of attestations with the index -/// `i`. -#[allow(clippy::redundant_clone)] // false positives in this function. -fn process_aggregated_attestation( - beacon_chain: &BeaconChain, - network_chan: NetworkChannel, - signed_aggregate: SignedAggregateAndProof, - i: usize, - log: &Logger, -) -> Result<(), ApiError> { - let data = &signed_aggregate.message.aggregate.data.clone(); - - // Verify that the attestation is valid to be included on the gossip network. - // - // Using this gossip check for local validators is not necessarily ideal, there will be some - // attestations that we reject that could possibly be included in a block (e.g., attestations - // that late by more than 1 epoch but less than 2). We can come pick this back up if we notice - // that it's materially affecting validator profits. Until then, I'm hesitant to introduce yet - // _another_ attestation verification path. - let verified_attestation = - match beacon_chain.verify_aggregated_attestation_for_gossip(signed_aggregate.clone()) { - Ok(verified_attestation) => verified_attestation, - Err(AttnError::AttestationAlreadyKnown(attestation_root)) => { - trace!( - log, - "Ignored known attn from local validator"; - "attn_root" => format!("{}", attestation_root) - ); - - // Exit early with success for a known attestation, there's no need to re-process - // an aggregate we already know. - return Ok(()); - } - /* - * It's worth noting that we don't check for `Error::AggregatorAlreadyKnown` since (at - * the time of writing) we check for `AttestationAlreadyKnown` first. - * - * Given this, it's impossible to hit `Error::AggregatorAlreadyKnown` without that - * aggregator having already produced a conflicting aggregation. This is not slashable - * but I think it's still the sort of condition we should error on, at least for now. - */ - Err(e) => { - return Err(handle_attestation_error( - e, - &format!("aggregated attestation {} failed gossip verification", i), - data, - log, - )) - } - }; - - // Publish the attestation to the network - if let Err(e) = network_chan.send(NetworkMessage::Publish { - messages: vec![PubsubMessage::AggregateAndProofAttestation(Box::new( - signed_aggregate, - ))], - }) { - return Err(ApiError::ServerError(format!( - "Unable to send aggregated attestation {} to network: {:?}", - i, e - ))); - } - - beacon_chain - .apply_attestation_to_fork_choice(&verified_attestation) - .map_err(|e| { - handle_fork_choice_error( - e, - &format!( - "aggregated attestation {} was unable to be added to fork choice", - i - ), - data, - log, - ) - })?; - - beacon_chain - .add_to_block_inclusion_pool(verified_attestation) - .map_err(|e| { - handle_attestation_error( - e, - &format!( - "aggregated attestation {} was unable to be added to op pool", - i - ), - data, - log, - ) - })?; - - Ok(()) -} - -/// Common handler for `AttnError` during attestation verification. -fn handle_attestation_error( - e: AttnError, - detail: &str, - data: &AttestationData, - log: &Logger, -) -> ApiError { - match e { - AttnError::BeaconChainError(e) => { - error!( - log, - "Internal error verifying local attestation"; - "detail" => detail, - "error" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ServerError(format!( - "Internal error verifying local attestation. Error: {:?}. Detail: {}", - e, detail - )) - } - e => { - error!( - log, - "Invalid local attestation"; - "detail" => detail, - "reason" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ProcessingError(format!( - "Invalid local attestation. Error: {:?} Detail: {}", - e, detail - )) - } - } -} - -/// Common handler for `ForkChoiceError` during attestation verification. -fn handle_fork_choice_error( - e: BeaconChainError, - detail: &str, - data: &AttestationData, - log: &Logger, -) -> ApiError { - match e { - BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => { - error!( - log, - "Local attestation invalid for fork choice"; - "detail" => detail, - "reason" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ProcessingError(format!( - "Invalid local attestation. Error: {:?} Detail: {}", - e, detail - )) - } - e => { - error!( - log, - "Internal error applying attn to fork choice"; - "detail" => detail, - "error" => format!("{:?}", e), - "target" => data.target.epoch, - "source" => data.source.epoch, - "index" => data.index, - "slot" => data.slot, - ); - - ApiError::ServerError(format!( - "Internal error verifying local attestation. Error: {:?}. Detail: {}", - e, detail - )) - } - } -} diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs deleted file mode 100644 index 160ee667c..000000000 --- a/beacon_node/rest_api/tests/test.rs +++ /dev/null @@ -1,1345 +0,0 @@ -#![cfg(test)] - -#[macro_use] -extern crate assert_matches; - -use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; -use node_test_rig::{ - environment::{Environment, EnvironmentBuilder}, - testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, -}; -use remote_beacon_node::{ - Committee, HeadBeaconBlock, PersistedOperationPool, PublishStatus, ValidatorResponse, -}; -use rest_types::ValidatorDutyBytes; -use std::convert::TryInto; -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{ - test_utils::{ - build_double_vote_attester_slashing, build_proposer_slashing, - generate_deterministic_keypair, AttesterSlashingTestTask, ProposerSlashingTestTask, - }, - BeaconBlock, BeaconState, ChainSpec, Domain, Epoch, EthSpec, MinimalEthSpec, PublicKey, - RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, - SubnetId, Validator, -}; - -type E = MinimalEthSpec; - -fn build_env() -> Environment { - EnvironmentBuilder::minimal() - .null_logger() - .expect("should build env logger") - .single_thread_tokio_runtime() - .expect("should start tokio runtime") - .build() - .expect("environment should build") -} - -fn build_node(env: &mut Environment, config: ClientConfig) -> LocalBeaconNode { - let context = env.core_context(); - env.runtime() - .block_on(LocalBeaconNode::production(context, config)) - .expect("should block until node created") -} - -/// Returns the randao reveal for the given slot (assuming the given `beacon_chain` uses -/// deterministic keypairs). -fn get_randao_reveal( - beacon_chain: Arc>, - slot: Slot, - spec: &ChainSpec, -) -> Signature { - let head = beacon_chain.head().expect("should get head"); - let fork = head.beacon_state.fork; - let genesis_validators_root = head.beacon_state.genesis_validators_root; - let proposer_index = beacon_chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - let epoch = slot.epoch(E::slots_per_epoch()); - let domain = spec.get_domain(epoch, Domain::Randao, &fork, genesis_validators_root); - let message = epoch.signing_root(domain); - keypair.sk.sign(message) -} - -/// Signs the given block (assuming the given `beacon_chain` uses deterministic keypairs). -fn sign_block( - beacon_chain: Arc>, - block: BeaconBlock, - spec: &ChainSpec, -) -> SignedBeaconBlock { - let head = beacon_chain.head().expect("should get head"); - let fork = head.beacon_state.fork; - let genesis_validators_root = head.beacon_state.genesis_validators_root; - let proposer_index = beacon_chain - .block_proposer(block.slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - block.sign(&keypair.sk, &fork, genesis_validators_root, spec) -} - -#[test] -fn validator_produce_attestation() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - let genesis_validators_root = beacon_chain.genesis_validators_root; - let state = beacon_chain.head().expect("should get head").beacon_state; - - // Find a validator that has duties in the current slot of the chain. - let mut validator_index = 0; - let duties = loop { - let duties = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - if duties.slot == node.client.beacon_chain().unwrap().slot().unwrap() { - break duties; - } else { - validator_index += 1 - } - }; - - let mut attestation = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_attestation(duties.slot, duties.index), - ) - .expect("should fetch attestation from http api"); - - assert_eq!( - attestation.data.index, duties.index, - "should have same index" - ); - assert_eq!(attestation.data.slot, duties.slot, "should have same slot"); - assert_eq!( - attestation.aggregation_bits.num_set_bits(), - 0, - "should have empty aggregation bits" - ); - - let keypair = generate_deterministic_keypair(validator_index); - - // Fetch the duties again, but via HTTP for authenticity. - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties( - attestation.data.slot.epoch(E::slots_per_epoch()), - &[keypair.pk.clone()], - )) - .expect("should fetch duties from http api"); - let duties = &duties[0]; - let committee_count = duties - .committee_count_at_slot - .expect("should have committee count"); - let subnet_id = SubnetId::compute_subnet::( - attestation.data.slot, - attestation.data.index, - committee_count, - spec, - ) - .unwrap(); - // Try publishing the attestation without a signature or a committee bit set, ensure it is - // raises an error. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish unsigned attestation"); - assert!( - !publish_status.is_valid(), - "the unsigned published attestation should be invalid" - ); - - // Set the aggregation bit. - attestation - .aggregation_bits - .set( - duties - .attestation_committee_position - .expect("should have committee position"), - true, - ) - .expect("should set attestation bit"); - - // Try publishing with an aggreagation bit set, but an invalid signature. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish attestation with invalid signature"); - assert!( - !publish_status.is_valid(), - "the unsigned published attestation should not be valid" - ); - - // Un-set the aggregation bit, so signing doesn't error. - attestation - .aggregation_bits - .set( - duties - .attestation_committee_position - .expect("should have committee position"), - false, - ) - .expect("should un-set attestation bit"); - - attestation - .sign( - &keypair.sk, - duties - .attestation_committee_position - .expect("should have committee position"), - &state.fork, - state.genesis_validators_root, - spec, - ) - .expect("should sign attestation"); - - // Try publishing the valid attestation. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_attestations(vec![(attestation.clone(), subnet_id)]), - ) - .expect("should publish attestation"); - assert!( - publish_status.is_valid(), - "the signed published attestation should be valid" - ); - - // Try obtaining an aggregated attestation with a matching attestation data to the previous - // one. - let aggregated_attestation = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_aggregate_attestation(&attestation.data), - ) - .expect("should fetch aggregated attestation from http api"); - - let signed_aggregate_and_proof = SignedAggregateAndProof::from_aggregate( - validator_index as u64, - aggregated_attestation, - None, - &keypair.sk, - &state.fork, - genesis_validators_root, - spec, - ); - - // Publish the signed aggregate. - let publish_status = env - .runtime() - .block_on( - remote_node - .http - .validator() - .publish_aggregate_and_proof(vec![signed_aggregate_and_proof]), - ) - .expect("should publish aggregate and proof"); - assert!( - publish_status.is_valid(), - "the signed aggregate and proof should be valid" - ); -} - -#[test] -fn validator_duties() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let mut epoch = Epoch::new(0); - - let validators = beacon_chain - .head() - .expect("should get head") - .beacon_state - .validators - .iter() - .map(|v| (&v.pubkey).try_into().expect("pubkey should be valid")) - .collect::>(); - - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties(epoch, &validators)) - .expect("should fetch duties from http api"); - - // 1. Check at the current epoch. - check_duties( - duties, - epoch, - validators.clone(), - beacon_chain.clone(), - spec, - ); - - epoch += 4; - let duties = env - .runtime() - .block_on(remote_node.http.validator().get_duties(epoch, &validators)) - .expect("should fetch duties from http api"); - - // 2. Check with a long skip forward. - check_duties(duties, epoch, validators, beacon_chain, spec); - - // TODO: test an epoch in the past. Blocked because the `LocalBeaconNode` cannot produce a - // chain, yet. -} - -fn check_duties( - duties: Vec, - epoch: Epoch, - validators: Vec, - beacon_chain: Arc>, - spec: &ChainSpec, -) { - assert_eq!( - validators.len(), - duties.len(), - "there should be a duty for each validator" - ); - - // Are the duties from the current epoch of the beacon chain, and thus are proposer indices - // known? - let proposers_known = epoch == beacon_chain.epoch().unwrap(); - - let mut state = beacon_chain - .state_at_slot( - epoch.start_slot(T::EthSpec::slots_per_epoch()), - StateSkipConfig::WithStateRoots, - ) - .expect("should get state at slot"); - - state.build_all_caches(spec).expect("should build caches"); - - validators - .iter() - .zip(duties.iter()) - .for_each(|(validator, duty)| { - assert_eq!( - *validator, - (&duty.validator_pubkey) - .try_into() - .expect("should be valid pubkey"), - "pubkey should match" - ); - - let validator_index = state - .get_validator_index(&validator.clone().into()) - .expect("should have pubkey cache") - .expect("pubkey should exist"); - - let attestation_duty = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - assert_eq!( - Some(attestation_duty.slot), - duty.attestation_slot, - "attestation slot should match" - ); - - assert_eq!( - Some(attestation_duty.index), - duty.attestation_committee_index, - "attestation index should match" - ); - - if proposers_known { - let block_proposal_slots = duty.block_proposal_slots.as_ref().unwrap(); - - if !block_proposal_slots.is_empty() { - for slot in block_proposal_slots { - let expected_proposer = state - .get_beacon_proposer_index(*slot, spec) - .expect("should know proposer"); - assert_eq!( - expected_proposer, validator_index, - "should get correct proposal slot" - ); - } - } else { - epoch.slot_iter(E::slots_per_epoch()).for_each(|slot| { - let slot_proposer = state - .get_beacon_proposer_index(slot, spec) - .expect("should know proposer"); - assert_ne!( - slot_proposer, validator_index, - "validator should not have proposal slot in this epoch" - ) - }) - } - } else { - assert_eq!(duty.block_proposal_slots, None); - } - }); - - if proposers_known { - // Validator duties should include a proposer for every slot of the epoch. - let mut all_proposer_slots: Vec = duties - .iter() - .flat_map(|duty| duty.block_proposal_slots.clone().unwrap()) - .collect(); - all_proposer_slots.sort(); - - let all_slots: Vec = epoch.slot_iter(E::slots_per_epoch()).collect(); - assert_eq!(all_proposer_slots, all_slots); - } -} - -#[test] -fn validator_block_post() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let two_slots_secs = (spec.milliseconds_per_slot / 1_000) * 2; - - let mut config = testing_client_config(); - config.genesis = ClientGenesis::Interop { - validator_count: 8, - genesis_time: SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs() - - two_slots_secs, - }; - - let node = build_node(&mut env, config); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain.clone(), slot, spec); - - let block = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_block(slot, randao_reveal, None), - ) - .expect("should fetch block from http api"); - - // Try publishing the block without a signature, ensure it is flagged as invalid. - let empty_sig_block = SignedBeaconBlock { - message: block.clone(), - signature: Signature::empty(), - }; - let publish_status = env - .runtime() - .block_on(remote_node.http.validator().publish_block(empty_sig_block)) - .expect("should publish block"); - if cfg!(not(feature = "fake_crypto")) { - assert!( - !publish_status.is_valid(), - "the unsigned published block should not be valid" - ); - } - - let signed_block = sign_block(beacon_chain.clone(), block, spec); - let block_root = signed_block.canonical_root(); - - let publish_status = env - .runtime() - .block_on(remote_node.http.validator().publish_block(signed_block)) - .expect("should publish block"); - - if cfg!(not(feature = "fake_crypto")) { - assert_eq!( - publish_status, - PublishStatus::Valid, - "the signed published block should be valid" - ); - } - - let head = env - .runtime() - .block_on(remote_node.http.beacon().get_head()) - .expect("should get head"); - - assert_eq!( - head.block_root, block_root, - "the published block should become the head block" - ); - - // Note: this heads check is not super useful for this test, however it is include so it get - // _some_ testing. If you remove this call, make sure it's tested somewhere else. - let heads = env - .runtime() - .block_on(remote_node.http.beacon().get_heads()) - .expect("should get heads"); - - assert_eq!(heads.len(), 1, "there should be only one head"); - assert_eq!( - heads, - vec![HeadBeaconBlock { - beacon_block_root: head.block_root, - beacon_block_slot: head.slot, - }], - "there should be only one head" - ); -} - -#[test] -fn validator_block_get() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); - - let block = env - .runtime() - .block_on( - remote_node - .http - .validator() - .produce_block(slot, randao_reveal.clone(), None), - ) - .expect("should fetch block from http api"); - - let (expected_block, _state) = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .produce_block(randao_reveal, slot, None) - .expect("should produce block"); - - assert_eq!( - block, expected_block, - "the block returned from the API should be as expected" - ); -} - -#[test] -fn validator_block_get_with_graffiti() { - let mut env = build_env(); - - let spec = &E::default_spec(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - - let slot = Slot::new(1); - let randao_reveal = get_randao_reveal(beacon_chain, slot, spec); - - let block = env - .runtime() - .block_on(remote_node.http.validator().produce_block( - slot, - randao_reveal.clone(), - Some(*b"test-graffiti-test-graffiti-test"), - )) - .expect("should fetch block from http api"); - - let (expected_block, _state) = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .produce_block( - randao_reveal, - slot, - Some(*b"test-graffiti-test-graffiti-test"), - ) - .expect("should produce block"); - - assert_eq!( - block, expected_block, - "the block returned from the API should be as expected" - ); -} - -#[test] -fn beacon_state() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let (state_by_slot, root) = env - .runtime() - .block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0))) - .expect("should fetch state from http api"); - - let (state_by_root, root_2) = env - .runtime() - .block_on(remote_node.http.beacon().get_state_by_root(root)) - .expect("should fetch state from http api"); - - let mut db_state = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .state_at_slot(Slot::new(0), StateSkipConfig::WithStateRoots) - .expect("should find state"); - db_state.drop_all_caches(); - - assert_eq!( - root, root_2, - "the two roots returned from the api should be identical" - ); - assert_eq!( - root, - db_state.canonical_root(), - "root from database should match that from the API" - ); - assert_eq!( - state_by_slot, db_state, - "genesis state by slot from api should match that from the DB" - ); - assert_eq!( - state_by_root, db_state, - "genesis state by root from api should match that from the DB" - ); -} - -#[test] -fn beacon_block() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let (block_by_slot, root) = env - .runtime() - .block_on(remote_node.http.beacon().get_block_by_slot(Slot::new(0))) - .expect("should fetch block from http api"); - - let (block_by_root, root_2) = env - .runtime() - .block_on(remote_node.http.beacon().get_block_by_root(root)) - .expect("should fetch block from http api"); - - let db_block = node - .client - .beacon_chain() - .expect("client should have beacon chain") - .block_at_slot(Slot::new(0)) - .expect("should find block") - .expect("block should not be none"); - - assert_eq!( - root, root_2, - "the two roots returned from the api should be identical" - ); - assert_eq!( - root, - db_block.canonical_root(), - "root from database should match that from the API" - ); - assert_eq!( - block_by_slot, db_block, - "genesis block by slot from api should match that from the DB" - ); - assert_eq!( - block_by_root, db_block, - "genesis block by root from api should match that from the DB" - ); -} - -#[test] -fn genesis_time() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let genesis_time = env - .runtime() - .block_on(remote_node.http.beacon().get_genesis_time()) - .expect("should fetch genesis time from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .genesis_time, - genesis_time, - "should match genesis time from head state" - ); -} - -#[test] -fn genesis_validators_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let genesis_validators_root = env - .runtime() - .block_on(remote_node.http.beacon().get_genesis_validators_root()) - .expect("should fetch genesis time from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .genesis_validators_root, - genesis_validators_root, - "should match genesis time from head state" - ); -} - -#[test] -fn fork() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let fork = env - .runtime() - .block_on(remote_node.http.beacon().get_fork()) - .expect("should fetch from http api"); - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .head() - .expect("should get head") - .beacon_state - .fork, - fork, - "should match head state" - ); -} - -#[test] -fn eth2_config() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let eth2_config = env - .runtime() - .block_on(remote_node.http.spec().get_eth2_config()) - .expect("should fetch eth2 config from http api"); - - // TODO: check the entire eth2_config, not just the spec. - - assert_eq!( - node.client - .beacon_chain() - .expect("should have beacon chain") - .spec, - eth2_config.spec, - "should match genesis time from head state" - ); -} - -#[test] -fn get_version() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let version = env - .runtime() - .block_on(remote_node.http.node().get_version()) - .expect("should fetch version from http api"); - - assert_eq!( - lighthouse_version::version_with_platform(), - version, - "result should be as expected" - ); -} - -#[test] -fn get_genesis_state_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let slot = Slot::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_state_root(slot)) - .expect("should fetch from http api"); - - let expected = node - .client - .beacon_chain() - .expect("should have beacon chain") - .rev_iter_state_roots() - .expect("should get iter") - .map(Result::unwrap) - .find(|(_cur_root, cur_slot)| slot == *cur_slot) - .map(|(cur_root, _)| cur_root) - .expect("chain should have state root at slot"); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_genesis_block_root() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let slot = Slot::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_block_root(slot)) - .expect("should fetch from http api"); - - let expected = node - .client - .beacon_chain() - .expect("should have beacon chain") - .rev_iter_block_roots() - .expect("should get iter") - .map(Result::unwrap) - .find(|(_cur_root, cur_slot)| slot == *cur_slot) - .map(|(cur_root, _)| cur_root) - .expect("chain should have state root at slot"); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let validators = state.validators.iter().take(2).collect::>(); - let pubkeys = validators - .iter() - .map(|v| (&v.pubkey).try_into().expect("should decode pubkey bytes")) - .collect(); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_validators(pubkeys, None)) - .expect("should fetch from http api"); - - result - .iter() - .zip(validators.iter()) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_all_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_all_validators(None)) - .expect("should fetch from http api"); - - result - .iter() - .zip(state.validators.iter()) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_active_validators() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - let state = &chain.head().expect("should get head").beacon_state; - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_active_validators(None)) - .expect("should fetch from http api"); - - /* - * This test isn't comprehensive because all of the validators in the state are active (i.e., - * there is no one to exclude. - * - * This should be fixed once we can generate more interesting scenarios with the - * `NodeTestRig`. - */ - - let validators = state - .validators - .iter() - .filter(|validator| validator.is_active_at(state.current_epoch())); - - result - .iter() - .zip(validators) - .for_each(|(response, validator)| compare_validator_response(state, response, validator)); -} - -#[test] -fn get_committees() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let epoch = Epoch::new(0); - - let result = env - .runtime() - .block_on(remote_node.http.beacon().get_committees(epoch)) - .expect("should fetch from http api"); - - let expected = chain - .head() - .expect("should get head") - .beacon_state - .get_beacon_committees_at_epoch(RelativeEpoch::Current) - .expect("should get committees") - .iter() - .map(|c| Committee { - slot: c.slot, - index: c.index, - committee: c.committee.to_vec(), - }) - .collect::>(); - - assert_eq!(result, expected, "result should be as expected"); -} - -#[test] -fn get_fork_choice() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let fork_choice = env - .runtime() - .block_on(remote_node.http.advanced().get_fork_choice()) - .expect("should not error when getting fork choice"); - - assert_eq!( - fork_choice, - *node - .client - .beacon_chain() - .expect("node should have beacon chain") - .fork_choice - .read() - .proto_array() - .core_proto_array(), - "result should be as expected" - ); -} - -#[test] -fn get_operation_pool() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - let result = env - .runtime() - .block_on(remote_node.http.advanced().get_operation_pool()) - .expect("should not error when getting fork choice"); - - let expected = PersistedOperationPool::from_operation_pool( - &node - .client - .beacon_chain() - .expect("node should have chain") - .op_pool, - ); - - assert_eq!(result, expected, "result should be as expected"); -} - -fn compare_validator_response( - state: &BeaconState, - response: &ValidatorResponse, - validator: &Validator, -) { - let response_validator = response.validator.clone().expect("should have validator"); - let i = response - .validator_index - .expect("should have validator index"); - let balance = response.balance.expect("should have balance"); - - assert_eq!(response.pubkey, validator.pubkey, "pubkey"); - assert_eq!(response_validator, *validator, "validator"); - assert_eq!(state.balances[i], balance, "balances"); - assert_eq!(state.validators[i], *validator, "validator index"); -} - -#[test] -fn proposer_slashing() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let state = chain - .head() - .expect("should have retrieved state") - .beacon_state; - - let spec = &chain.spec; - - // Check that there are no proposer slashings before insertion - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 0); - - let slot = state.slot; - let proposer_index = chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - let key = &keypair.sk; - let fork = &state.fork; - let proposer_slashing = build_proposer_slashing::( - ProposerSlashingTestTask::Valid, - proposer_index as u64, - &key, - fork, - state.genesis_validators_root, - spec, - ); - - let result = env - .runtime() - .block_on( - remote_node - .http - .beacon() - .proposer_slashing(proposer_slashing.clone()), - ) - .expect("should fetch from http api"); - assert!(result, true); - - // Length should be just one as we've inserted only one proposer slashing - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 1); - assert_eq!(proposer_slashing.clone(), proposer_slashings[0]); - - let mut invalid_proposer_slashing = build_proposer_slashing::( - ProposerSlashingTestTask::Valid, - proposer_index as u64, - &key, - fork, - state.genesis_validators_root, - spec, - ); - invalid_proposer_slashing.signed_header_2 = invalid_proposer_slashing.signed_header_1.clone(); - - let result = env.runtime().block_on( - remote_node - .http - .beacon() - .proposer_slashing(invalid_proposer_slashing), - ); - assert!(result.is_err()); - - // Length should still be one as we've inserted nothing since last time. - let (proposer_slashings, _attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(proposer_slashings.len(), 1); - assert_eq!(proposer_slashing, proposer_slashings[0]); -} - -#[test] -fn attester_slashing() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let chain = node - .client - .beacon_chain() - .expect("node should have beacon chain"); - - let state = chain - .head() - .expect("should have retrieved state") - .beacon_state; - let slot = state.slot; - let spec = &chain.spec; - - let proposer_index = chain - .block_proposer(slot) - .expect("should get proposer index"); - let keypair = generate_deterministic_keypair(proposer_index); - - let secret_keys = vec![&keypair.sk]; - let validator_indices = vec![proposer_index as u64]; - let fork = &state.fork; - - // Checking there are no attester slashings before insertion - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 0); - - let attester_slashing = build_double_vote_attester_slashing( - AttesterSlashingTestTask::Valid, - &validator_indices[..], - &secret_keys[..], - fork, - state.genesis_validators_root, - spec, - ); - - let result = env - .runtime() - .block_on( - remote_node - .http - .beacon() - .attester_slashing(attester_slashing.clone()), - ) - .expect("should fetch from http api"); - assert!(result, true); - - // Length should be just one as we've inserted only one attester slashing - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 1); - assert_eq!(attester_slashing, attester_slashings[0]); - - // Building an invalid attester slashing - let mut invalid_attester_slashing = build_double_vote_attester_slashing( - AttesterSlashingTestTask::Valid, - &validator_indices[..], - &secret_keys[..], - fork, - state.genesis_validators_root, - spec, - ); - invalid_attester_slashing.attestation_2 = invalid_attester_slashing.attestation_1.clone(); - - let result = env.runtime().block_on( - remote_node - .http - .beacon() - .attester_slashing(invalid_attester_slashing), - ); - result.unwrap_err(); - - // Length should still be one as we've failed to insert the attester slashing. - let (_proposer_slashings, attester_slashings) = chain.op_pool.get_slashings(&state); - assert_eq!(attester_slashings.len(), 1); - assert_eq!(attester_slashing, attester_slashings[0]); -} - -mod validator_attestation { - use super::*; - use http::StatusCode; - use node_test_rig::environment::Environment; - use remote_beacon_node::{Error::DidNotSucceed, HttpClient}; - use types::{Attestation, AttestationDuty, MinimalEthSpec}; - use url::Url; - - fn setup() -> ( - Environment, - LocalBeaconNode, - HttpClient, - Url, - AttestationDuty, - ) { - let mut env = build_env(); - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - let client = remote_node.http.clone(); - let socket_addr = node - .client - .http_listen_addr() - .expect("A remote beacon node must have a http server"); - let url = Url::parse(&format!( - "http://{}:{}/validator/attestation", - socket_addr.ip(), - socket_addr.port() - )) - .expect("should be valid endpoint"); - - // Find a validator that has duties in the current slot of the chain. - let mut validator_index = 0; - let beacon_chain = node - .client - .beacon_chain() - .expect("client should have beacon chain"); - let state = beacon_chain.head().expect("should get head").beacon_state; - let duties = loop { - let duties = state - .get_attestation_duties(validator_index, RelativeEpoch::Current) - .expect("should have attestation duties cache") - .expect("should have attestation duties"); - - if duties.slot == node.client.beacon_chain().unwrap().slot().unwrap() { - break duties; - } else { - validator_index += 1 - } - }; - - (env, node, client, url, duties) - } - - #[test] - fn requires_query_parameters() { - let (mut env, _node, client, url, _duties) = setup(); - - let attestation = env.runtime().block_on( - // query parameters are missing - client.json_get::>(url.clone(), vec![]), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"slot\"]".to_owned()); - } - ); - } - - #[test] - fn requires_slot() { - let (mut env, _node, client, url, duties) = setup(); - - let attestation = env.runtime().block_on( - // `slot` is missing - client.json_get::>( - url.clone(), - vec![("committee_index".into(), format!("{}", duties.index))], - ), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"slot\"]".to_owned()); - } - ); - } - - #[test] - fn requires_committee_index() { - let (mut env, _node, client, url, duties) = setup(); - - let attestation = env.runtime().block_on( - // `committee_index` is missing. - client.json_get::>( - url.clone(), - vec![("slot".into(), format!("{}", duties.slot))], - ), - ); - - assert_matches!( - attestation.expect_err("should not succeed"), - DidNotSucceed { status, body } => { - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!(body, "URL query must be valid and contain at least one of the following keys: [\"committee_index\"]".to_owned()); - } - ); - } -} - -#[cfg(target_os = "linux")] -#[test] -fn get_health() { - let mut env = build_env(); - - let node = build_node(&mut env, testing_client_config()); - let remote_node = node.remote_node().expect("should produce remote node"); - - env.runtime() - .block_on(remote_node.http.node().get_health()) - .unwrap(); -} diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index aabafe40e..cb7ea121d 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -148,7 +148,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("http") .long("http") - .help("Enable RESTful HTTP API server. Disabled by default.") + .help("Enable the RESTful HTTP API server. Disabled by default.") .takes_value(false), ) .arg( @@ -175,6 +175,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("") .takes_value(true), ) + /* Prometheus metrics HTTP server related arguments */ + .arg( + Arg::with_name("metrics") + .long("metrics") + .help("Enable the Prometheus metrics HTTP server. Disabled by default.") + .takes_value(false), + ) + .arg( + Arg::with_name("metrics-address") + .long("metrics-address") + .value_name("ADDRESS") + .help("Set the listen address for the Prometheus metrics HTTP server.") + .default_value("127.0.0.1") + .takes_value(true), + ) + .arg( + Arg::with_name("metrics-port") + .long("metrics-port") + .value_name("PORT") + .help("Set the listen TCP port for the Prometheus metrics HTTP server.") + .default_value("5054") + .takes_value(true), + ) + .arg( + Arg::with_name("metrics-allow-origin") + .long("metrics-allow-origin") + .value_name("ORIGIN") + .help("Set the value of the Access-Control-Allow-Origin response HTTP header for the Prometheus metrics HTTP server. \ + Use * to allow any origin (not recommended in production)") + .default_value("") + .takes_value(true), + ) /* Websocket related arguments */ .arg( Arg::with_name("ws") @@ -241,7 +273,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("slots-per-restore-point") .value_name("SLOT_COUNT") .help("Specifies how often a freezer DB restore point should be stored. \ - DO NOT DECREASE AFTER INITIALIZATION. [default: 2048 (mainnet) or 64 (minimal)]") + Cannot be changed after initialization. \ + [default: 2048 (mainnet) or 64 (minimal)]") .takes_value(true) ) .arg( diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 46a96f1af..070c99734 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,7 +1,8 @@ use beacon_chain::builder::PUBKEY_CACHE_FILENAME; use clap::ArgMatches; use clap_utils::BAD_TESTNET_DIR_MESSAGE; -use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis}; +use client::{ClientConfig, ClientGenesis}; +use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use eth2_testnet_config::Eth2TestnetConfig; use slog::{crit, info, warn, Logger}; @@ -13,9 +14,6 @@ use std::net::{TcpListener, UdpSocket}; use std::path::PathBuf; use types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, GRAFFITI_BYTES_LEN}; -pub const BEACON_NODE_DIR: &str = "beacon"; -pub const NETWORK_DIR: &str = "network"; - /// Gets the fully-initialized global client. /// /// The top-level `clap` arguments should be provided as `cli_args`. @@ -89,26 +87,26 @@ pub fn get_config( */ if cli_args.is_present("staking") { - client_config.rest_api.enabled = true; + client_config.http_api.enabled = true; client_config.sync_eth1_chain = true; } /* - * Http server + * Http API server */ if cli_args.is_present("http") { - client_config.rest_api.enabled = true; + client_config.http_api.enabled = true; } if let Some(address) = cli_args.value_of("http-address") { - client_config.rest_api.listen_address = address + client_config.http_api.listen_addr = address .parse::() .map_err(|_| "http-address is not a valid IPv4 address.")?; } if let Some(port) = cli_args.value_of("http-port") { - client_config.rest_api.port = port + client_config.http_api.listen_port = port .parse::() .map_err(|_| "http-port is not a valid u16.")?; } @@ -119,7 +117,36 @@ pub fn get_config( hyper::header::HeaderValue::from_str(allow_origin) .map_err(|_| "Invalid allow-origin value")?; - client_config.rest_api.allow_origin = allow_origin.to_string(); + client_config.http_api.allow_origin = Some(allow_origin.to_string()); + } + + /* + * Prometheus metrics HTTP server + */ + + if cli_args.is_present("metrics") { + client_config.http_metrics.enabled = true; + } + + if let Some(address) = cli_args.value_of("metrics-address") { + client_config.http_metrics.listen_addr = address + .parse::() + .map_err(|_| "metrics-address is not a valid IPv4 address.")?; + } + + if let Some(port) = cli_args.value_of("metrics-port") { + client_config.http_metrics.listen_port = port + .parse::() + .map_err(|_| "metrics-port is not a valid u16.")?; + } + + if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") { + // Pre-validate the config value to give feedback to the user on node startup, instead of + // as late as when the first API response is produced. + hyper::header::HeaderValue::from_str(allow_origin) + .map_err(|_| "Invalid allow-origin value")?; + + client_config.http_metrics.allow_origin = Some(allow_origin.to_string()); } // Log a warning indicating an open HTTP server if it wasn't specified explicitly @@ -127,7 +154,7 @@ pub fn get_config( if cli_args.is_present("staking") { warn!( log, - "Running HTTP server on port {}", client_config.rest_api.port + "Running HTTP server on port {}", client_config.http_api.listen_port ); } @@ -221,7 +248,8 @@ pub fn get_config( unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; client_config.network.discovery_port = unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?; - client_config.rest_api.port = 0; + client_config.http_api.listen_port = 0; + client_config.http_metrics.listen_port = 0; client_config.websocket_server.port = 0; } @@ -232,6 +260,11 @@ pub fn get_config( client_config.eth1.deposit_contract_address = format!("{:?}", eth2_testnet_config.deposit_contract_address()?); + let spec_contract_address = format!("{:?}", spec.deposit_contract_address); + if client_config.eth1.deposit_contract_address != spec_contract_address { + return Err("Testnet contract address does not match spec".into()); + } + client_config.eth1.deposit_contract_deploy_block = eth2_testnet_config.deposit_contract_deploy_block; client_config.eth1.lowest_cached_block_number = @@ -267,7 +300,7 @@ pub fn get_config( }; let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN); - client_config.graffiti[..trimmed_graffiti_len] + client_config.graffiti.0[..trimmed_graffiti_len] .copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]); if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") { @@ -330,7 +363,7 @@ pub fn set_network_config( if let Some(dir) = cli_args.value_of("network-dir") { config.network_dir = PathBuf::from(dir); } else { - config.network_dir = data_dir.join(NETWORK_DIR); + config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; if let Some(listen_address_str) = cli_args.value_of("listen-address") { @@ -495,11 +528,18 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { // Read the `--datadir` flag. // // If it's not present, try and find the home directory (`~`) and push the default data - // directory onto it. + // directory and the testnet name onto it. + cli_args .value_of("datadir") - .map(|path| PathBuf::from(path).join(BEACON_NODE_DIR)) - .or_else(|| dirs::home_dir().map(|home| home.join(DEFAULT_DATADIR).join(BEACON_NODE_DIR))) + .map(|path| PathBuf::from(path).join(DEFAULT_BEACON_NODE_DIR)) + .or_else(|| { + dirs::home_dir().map(|home| { + home.join(DEFAULT_ROOT_DIR) + .join(directory::get_testnet_name(cli_args)) + .join(DEFAULT_BEACON_NODE_DIR) + }) + }) .unwrap_or_else(|| PathBuf::from(".")) } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 199319160..86592cfc7 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -7,7 +7,7 @@ mod config; pub use beacon_chain; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_data_dir, get_eth2_testnet_config, set_network_config}; +pub use config::{get_config, get_data_dir, get_eth2_testnet_config, set_network_config}; pub use eth2_config::Eth2Config; use beacon_chain::events::TeeEventHandler; @@ -17,7 +17,6 @@ use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, }; use clap::ArgMatches; -use config::get_config; use environment::RuntimeContext; use slog::{info, warn}; use std::ops::{Deref, DerefMut}; @@ -54,7 +53,7 @@ impl ProductionBeaconNode { /// configurations hosted remotely. pub async fn new_from_cli( context: RuntimeContext, - matches: &ArgMatches<'_>, + matches: ArgMatches<'static>, ) -> Result { let client_config = get_config::( &matches, @@ -72,7 +71,6 @@ impl ProductionBeaconNode { context: RuntimeContext, mut client_config: ClientConfig, ) -> Result { - let http_eth2_config = context.eth2_config().clone(); let spec = context.eth2_config().spec.clone(); let client_config_1 = client_config.clone(); let client_genesis = client_config.genesis.clone(); @@ -119,26 +117,23 @@ impl ProductionBeaconNode { builder.no_eth1_backend()? }; - let (builder, events) = builder + let (builder, _events) = builder .system_time_slot_clock()? .tee_event_handler(client_config.websocket_server.clone())?; // Inject the executor into the discv5 network config. - client_config.network.discv5_config.executor = Some(Box::new(executor)); + let discv5_executor = Discv5Executor(executor); + client_config.network.discv5_config.executor = Some(Box::new(discv5_executor)); - let builder = builder + builder .build_beacon_chain()? .network(&client_config.network) .await? - .notifier()?; - - let builder = if client_config.rest_api.enabled { - builder.http_server(&client_config, &http_eth2_config, events)? - } else { - builder - }; - - Ok(Self(builder.build())) + .notifier()? + .http_api_config(client_config.http_api.clone()) + .http_metrics_config(client_config.http_metrics.clone()) + .build() + .map(Self) } pub fn into_inner(self) -> ProductionClient { @@ -159,3 +154,13 @@ impl DerefMut for ProductionBeaconNode { &mut self.0 } } + +// Implements the Discv5 Executor trait over our global executor +#[derive(Clone)] +struct Discv5Executor(task_executor::TaskExecutor); + +impl eth2_libp2p::discv5::Executor for Discv5Executor { + fn spawn(&self, future: std::pin::Pin + Send>>) { + self.0.spawn(future, "discv5") + } +} diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 9f06bca54..a0fa4c24e 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -10,8 +10,8 @@ harness = false [dev-dependencies] tempfile = "3.1.0" -criterion = "0.3.2" -rayon = "1.3.0" +criterion = "0.3.3" +rayon = "1.4.1" [dependencies] db-key = "0.0.5" @@ -20,13 +20,13 @@ parking_lot = "0.11.0" itertools = "0.9.0" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -tree_hash = "0.1.0" +tree_hash = "0.1.1" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" -serde = "1.0.110" -serde_derive = "1.0.110" +serde = "1.0.116" +serde_derive = "1.0.116" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lru = "0.5.1" -sloggers = "1.0.0" +lru = "0.6.0" +sloggers = "1.0.1" diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index bebddf8fa..91cf5ec1c 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,11 +1,14 @@ +use crate::{DBColumn, Error, StoreItem}; use serde_derive::{Deserialize, Serialize}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use types::{EthSpec, MinimalEthSpec}; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; /// Database configuration parameters. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode)] pub struct StoreConfig { /// Number of slots to wait between storing restore points in the freezer database. pub slots_per_restore_point: u64, @@ -13,6 +16,11 @@ pub struct StoreConfig { pub block_cache_size: usize, } +#[derive(Debug, Clone)] +pub enum StoreConfigError { + MismatchedSlotsPerRestorePoint { config: u64, on_disk: u64 }, +} + impl Default for StoreConfig { fn default() -> Self { Self { @@ -22,3 +30,29 @@ impl Default for StoreConfig { } } } + +impl StoreConfig { + pub fn check_compatibility(&self, on_disk_config: &Self) -> Result<(), StoreConfigError> { + if self.slots_per_restore_point != on_disk_config.slots_per_restore_point { + return Err(StoreConfigError::MismatchedSlotsPerRestorePoint { + config: self.slots_per_restore_point, + on_disk: on_disk_config.slots_per_restore_point, + }); + } + Ok(()) + } +} + +impl StoreItem for StoreConfig { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index 8e9237361..622cd2ac7 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -1,4 +1,5 @@ use crate::chunked_vector::ChunkError; +use crate::config::StoreConfigError; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use types::{BeaconStateError, Hash256, Slot}; @@ -17,6 +18,7 @@ pub enum Error { BlockNotFound(Hash256), NoContinuationData, SplitPointModified(Slot, Slot), + ConfigError(StoreConfigError), } impl From for Error { @@ -49,6 +51,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: StoreConfigError) -> Error { + Error::ConfigError(e) + } +} + #[derive(Debug)] pub struct DBError { pub message: String, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 08e810866..55c403aa8 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -7,6 +7,9 @@ use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{ParentRootBlockIterator, StateRootsIterator}; use crate::leveldb_store::LevelDB; use crate::memory_store::MemoryStore; +use crate::metadata::{ + SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION, SCHEMA_VERSION_KEY, SPLIT_KEY, +}; use crate::metrics; use crate::{ get_key_for_col, DBColumn, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem, @@ -27,9 +30,6 @@ use std::path::Path; use std::sync::Arc; use types::*; -/// 32-byte key for accessing the `split` of the freezer DB. -pub const SPLIT_DB_KEY: &str = "FREEZERDBSPLITFREEZERDBSPLITFREE"; - /// Defines how blocks should be replayed on states. #[derive(PartialEq)] pub enum BlockReplay { @@ -46,6 +46,8 @@ pub enum BlockReplay { /// intermittent "restore point" states pre-finalization. #[derive(Debug)] pub struct HotColdDB, Cold: ItemStore> { + /// The schema version. Loaded from disk on initialization. + schema_version: SchemaVersion, /// The slot and state root at the point where the database is split between hot and cold. /// /// States with slots less than `split.slot` are in the cold DB, while states with slots @@ -70,6 +72,10 @@ pub struct HotColdDB, Cold: ItemStore> { #[derive(Debug, PartialEq)] pub enum HotColdDBError { + UnsupportedSchemaVersion { + software_version: SchemaVersion, + disk_version: SchemaVersion, + }, /// Recoverable error indicating that the database freeze point couldn't be updated /// due to the finalized block not lying on an epoch boundary (should be infrequent). FreezeSlotUnaligned(Slot), @@ -106,6 +112,7 @@ impl HotColdDB, MemoryStore> { Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; let db = HotColdDB { + schema_version: CURRENT_SCHEMA_VERSION, split: RwLock::new(Split::default()), cold_db: MemoryStore::open(), hot_db: MemoryStore::open(), @@ -134,6 +141,7 @@ impl HotColdDB, LevelDB> { Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; let db = HotColdDB { + schema_version: CURRENT_SCHEMA_VERSION, split: RwLock::new(Split::default()), cold_db: LevelDB::open(cold_path)?, hot_db: LevelDB::open(hot_path)?, @@ -144,12 +152,33 @@ impl HotColdDB, LevelDB> { _phantom: PhantomData, }; + // Ensure that the schema version of the on-disk database matches the software. + // In the future, this would be the spot to hook in auto-migration, etc. + if let Some(schema_version) = db.load_schema_version()? { + if schema_version != CURRENT_SCHEMA_VERSION { + return Err(HotColdDBError::UnsupportedSchemaVersion { + software_version: CURRENT_SCHEMA_VERSION, + disk_version: schema_version, + } + .into()); + } + } else { + db.store_schema_version(CURRENT_SCHEMA_VERSION)?; + } + + // Ensure that any on-disk config is compatible with the supplied config. + if let Some(disk_config) = db.load_config()? { + db.config.check_compatibility(&disk_config)?; + } + db.store_config()?; + // Load the previous split slot from the database (if any). This ensures we can // stop and restart correctly. if let Some(split) = db.load_split()? { info!( db.log, "Hot-Cold DB initialized"; + "version" => db.schema_version.0, "split_slot" => split.slot, "split_state" => format!("{:?}", split.state_root) ); @@ -744,11 +773,29 @@ impl, Cold: ItemStore> HotColdDB * self.config.slots_per_restore_point } + /// Load the database schema version from disk. + fn load_schema_version(&self) -> Result, Error> { + self.hot_db.get(&SCHEMA_VERSION_KEY) + } + + /// Store the database schema version. + fn store_schema_version(&self, schema_version: SchemaVersion) -> Result<(), Error> { + self.hot_db.put(&SCHEMA_VERSION_KEY, &schema_version) + } + + /// Load previously-stored config from disk. + fn load_config(&self) -> Result, Error> { + self.hot_db.get(&CONFIG_KEY) + } + + /// Write the config to disk. + fn store_config(&self) -> Result<(), Error> { + self.hot_db.put(&CONFIG_KEY, &self.config) + } + /// Load the split point from disk. fn load_split(&self) -> Result, Error> { - let key = Hash256::from_slice(SPLIT_DB_KEY.as_bytes()); - let split: Option = self.hot_db.get(&key)?; - Ok(split) + self.hot_db.get(&SPLIT_KEY) } /// Load the state root of a restore point. @@ -927,9 +974,7 @@ pub fn migrate_database, Cold: ItemStore>( slot: frozen_head.slot, state_root: frozen_head_root, }; - store - .hot_db - .put_sync(&Hash256::from_slice(SPLIT_DB_KEY.as_bytes()), &split)?; + store.hot_db.put_sync(&SPLIT_KEY, &split)?; // Split point is now persisted in the hot database on disk. The in-memory split point // hasn't been modified elsewhere since we keep a write lock on it. It's safe to update diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 271870226..f249be1f8 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -19,6 +19,7 @@ pub mod hot_cold_store; mod impls; mod leveldb_store; mod memory_store; +mod metadata; mod metrics; mod partial_beacon_state; @@ -153,7 +154,7 @@ pub enum DBColumn { } impl Into<&'static str> for DBColumn { - /// Returns a `&str` that can be used for keying a key-value data base. + /// Returns a `&str` prefix to be added to keys before they hit the key-value database. fn into(self) -> &'static str { match self { DBColumn::BeaconMeta => "bma", diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs new file mode 100644 index 000000000..2d4733d63 --- /dev/null +++ b/beacon_node/store/src/metadata.rs @@ -0,0 +1,29 @@ +use crate::{DBColumn, Error, StoreItem}; +use ssz::{Decode, Encode}; +use types::Hash256; + +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(1); + +// All the keys that get stored under the `BeaconMeta` column. +// +// We use `repeat_byte` because it's a const fn. +pub const SCHEMA_VERSION_KEY: Hash256 = Hash256::repeat_byte(0); +pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1); +pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct SchemaVersion(pub u64); + +impl StoreItem for SchemaVersion { + fn db_column() -> DBColumn { + DBColumn::BeaconMeta + } + + fn as_store_bytes(&self) -> Vec { + self.0.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(SchemaVersion(u64::from_ssz_bytes(bytes)?)) + } +} diff --git a/beacon_node/tests/test.rs b/beacon_node/tests/test.rs index a845acf04..7d860538f 100644 --- a/beacon_node/tests/test.rs +++ b/beacon_node/tests/test.rs @@ -3,6 +3,7 @@ use beacon_chain::StateSkipConfig; use node_test_rig::{ environment::{Environment, EnvironmentBuilder}, + eth2::types::StateId, testing_client_config, LocalBeaconNode, }; use types::{EthSpec, MinimalEthSpec, Slot}; @@ -34,10 +35,12 @@ fn http_server_genesis_state() { let node = build_node(&mut env); let remote_node = node.remote_node().expect("should produce remote node"); - let (api_state, _root) = env + let api_state = env .runtime() - .block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0))) - .expect("should fetch state from http api"); + .block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0)))) + .expect("should fetch state from http api") + .unwrap() + .data; let mut db_state = node .client diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml index 2cee2c5de..6388a7ad8 100644 --- a/beacon_node/timer/Cargo.toml +++ b/beacon_node/timer/Cargo.toml @@ -12,4 +12,4 @@ tokio = { version = "0.2.22", features = ["full"] } slog = "2.5.2" parking_lot = "0.11.0" futures = "0.3.5" -environment = { path = "../../lighthouse/environment" } +task_executor = { path = "../../common/task_executor" } diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs index 67aca9c27..74c9e5eb0 100644 --- a/beacon_node/timer/src/lib.rs +++ b/beacon_node/timer/src/lib.rs @@ -12,7 +12,7 @@ use tokio::time::{interval_at, Instant}; /// Spawns a timer service which periodically executes tasks for the beacon chain pub fn spawn_timer( - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, beacon_chain: Arc>, milliseconds_per_slot: u64, ) -> Result<(), &'static str> { diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index 00aa24973..8a10bdc5b 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -8,11 +8,10 @@ edition = "2018" [dependencies] futures = "0.3.5" -serde = "1.0.110" -serde_derive = "1.0.110" -serde_json = "1.0.52" +serde = "1.0.116" +serde_derive = "1.0.116" slog = "2.5.2" tokio = { version = "0.2.22", features = ["full"] } types = { path = "../../consensus/types" } ws = "0.9.1" -environment = { path = "../../lighthouse/environment" } +task_executor = { path = "../../common/task_executor" } diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index f9ed3e97e..1eea57ae2 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -34,7 +34,7 @@ impl WebSocketSender { } pub fn start_server( - executor: environment::TaskExecutor, + executor: task_executor::TaskExecutor, config: &Config, ) -> Result<(WebSocketSender, SocketAddr), String> { let log = executor.log(); diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 907cf6d3d..2b79ec4e1 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -1,11 +1,11 @@ # Summary * [Introduction](./intro.md) -* [Become a Validator](./become-a-validator.md) - * [Using Docker](./become-a-validator-docker.md) - * [Building from Source](./become-a-validator-source.md) +* [Become a Testnet Validator](./testnet-validator.md) * [Installation](./installation.md) + * [Pre-Built Binaries](./installation-binaries.md) * [Docker](./docker.md) + * [Build from Source](./installation-source.md) * [Raspberry Pi 4](./pi.md) * [Cross-Compiling](./cross-compiling.md) * [Key Management](./key-management.md) @@ -14,20 +14,21 @@ * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) * [Importing from the Eth2 Launchpad](./validator-import-launchpad.md) -* [Local Testnets](./local-testnets.md) -* [API](./api.md) - * [HTTP (RESTful JSON)](./http.md) - * [/node](./http/node.md) - * [/beacon](./http/beacon.md) - * [/validator](./http/validator.md) - * [/consensus](./http/consensus.md) - * [/network](./http/network.md) - * [/spec](./http/spec.md) - * [/advanced](./http/advanced.md) - * [/lighthouse](./http/lighthouse.md) - * [WebSocket](./websockets.md) +* [APIs](./api.md) + * [Beacon Node API](./api-bn.md) + * [/lighthouse](./api-lighthouse.md) + * [Validator Inclusion APIs](./validator-inclusion.md) + * [Validator Client API](./api-vc.md) + * [Endpoints](./api-vc-endpoints.md) + * [Authorization Header](./api-vc-auth-header.md) + * [Signature Header](./api-vc-sig-header.md) + * [Prometheus Metrics](./advanced_metrics.md) * [Advanced Usage](./advanced.md) * [Database Configuration](./advanced_database.md) +<<<<<<< HEAD +======= + * [Local Testnets](./local-testnets.md) +>>>>>>> v0.3.0-staging * [Advanced Networking](./advanced_networking.md) * [Contributing](./contributing.md) * [Development Environment](./setup.md) diff --git a/book/src/advanced_metrics.md b/book/src/advanced_metrics.md new file mode 100644 index 000000000..6c901862e --- /dev/null +++ b/book/src/advanced_metrics.md @@ -0,0 +1,34 @@ +# Prometheus Metrics + +Lighthouse provides an extensive suite of metrics and monitoring in the +[Prometheus](https://prometheus.io/docs/introduction/overview/) export format +via a HTTP server built into Lighthouse. + +These metrics are generally consumed by a Prometheus server and displayed via a +Grafana dashboard. These components are available in a docker-compose format at +[sigp/lighthouse-metrics](https://github.com/sigp/lighthouse-metrics). + +## Beacon Node Metrics + +By default, these metrics are disabled but can be enabled with the `--metrics` +flag. Use the `--metrics-address`, `--metrics-port` and +`--metrics-allow-origin` flags to customize the metrics server. + +### Example + +Start a beacon node with the metrics server enabled: + +```bash +lighthouse bn --metrics +``` + +Check to ensure that the metrics are available on the default port: + +```bash +curl localhost:5054/metrics +``` + +## Validator Client Metrics + +The validator client does not *yet* expose metrics, however this functionality +is expected to be implemented in late-September 2020. diff --git a/book/src/api-bn.md b/book/src/api-bn.md new file mode 100644 index 000000000..d957e4376 --- /dev/null +++ b/book/src/api-bn.md @@ -0,0 +1,130 @@ +# Beacon Node API + +Lighthouse implements the standard [Eth2 Beacon Node API +specification][OpenAPI]. Please follow that link for a full description of each API endpoint. + +> **Warning:** the standard API specification is still in flux and the Lighthouse implementation is partially incomplete. You can track the status of each endpoint at [#1434](https://github.com/sigp/lighthouse/issues/1434). + +## Starting the server + +A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5052`. + +The following CLI flags control the HTTP server: + +- `--http`: enable the HTTP server (required even if the following flags are + provided). +- `--http-port`: specify the listen port of the server. +- `--http-address`: specify the listen address of the server. +- `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` + header. The default is to not supply a header. + +The schema of the API aligns with the standard Eth2 Beacon Node API as defined +at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs). +An interactive specification is available [here][OpenAPI]. + +### CLI Example + +Start the beacon node with the HTTP server listening on [http://localhost:5052](http://localhost:5052): + +```bash +lighthouse bn --http +``` + +## HTTP Request/Response Examples + +This section contains some simple examples of using the HTTP API via `curl`. +All endpoints are documented in the [Eth2 Beacon Node API +specification][OpenAPI]. + +### View the head of the beacon chain + +Returns the block header at the head of the canonical chain. + +```bash +curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: +application/json" +``` + +```json +{ + "data": { + "root": "0x4381454174fc28c7095077e959dcab407ae5717b5dca447e74c340c1b743d7b2", + "canonical": true, + "header": { + "message": { + "slot": 3199, + "proposer_index": "19077", + "parent_root": "0xf1934973041c5896d0d608e52847c3cd9a5f809c59c64e76f6020e3d7cd0c7cd", + "state_root": "0xe8e468f9f5961655dde91968f66480868dab8d4147de9498111df2b7e4e6fe60", + "body_root": "0x6f183abc6c4e97f832900b00d4e08d4373bfdc819055d76b0f4ff850f559b883" + }, + "signature": "0x988064a2f9cf13fe3aae051a3d85f6a4bca5a8ff6196f2f504e32f1203b549d5f86a39c6509f7113678880701b1881b50925a0417c1c88a750c8da7cd302dda5aabae4b941e3104d0cf19f5043c4f22a7d75d0d50dad5dbdaf6991381dc159ab" + } + } +} +``` + +### View the status of a validator + +Shows the status of validator at index `1` at the `head` state. + +```bash +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" +``` + +```json +{ + "data": { + "index": "1", + "balance": "63985937939", + "status": "Active", + "validator": { + "pubkey": "0x873e73ee8b3e4fcf1d2fb0f1036ba996ac9910b5b348f6438b5f8ef50857d4da9075d0218a9d1b99a9eae235a39703e1", + "withdrawal_credentials": "0x00b8cdcf79ba7e74300a07e9d8f8121dd0d8dd11dcfd6d3f2807c45b426ac968", + "effective_balance": 32000000000, + "slashed": false, + "activation_eligibility_epoch": 0, + "activation_epoch": 0, + "exit_epoch": 18446744073709552000, + "withdrawable_epoch": 18446744073709552000 + } + } +} +``` + +## Troubleshooting + +### HTTP API is unavailable or refusing connections + +Ensure the `--http` flag has been supplied at the CLI. + +You can quickly check that the HTTP endpoint is up using `curl`: + +```bash +curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" +``` + +The beacon node should respond with its version: + +```json +{"data":{"version":"Lighthouse/v0.2.9-6f7b4768a/x86_64-linux"}} +``` + +If this doesn't work, the server might not be started or there might be a +network connection error. + +### I cannot query my node from a web browser (e.g., Swagger) + +By default, the API does not provide an `Access-Control-Allow-Origin` header, +which causes browsers to reject responses with a CORS error. + +The `--http-allow-origin` flag can be used to add a wild-card CORS header: + +```bash +lighthouse bn --http --http-allow-origin "*" +``` + +> **Warning:** Adding the wild-card allow-origin flag can pose a security risk. +> Only use it in production if you understand the risks of a loose CORS policy. + +[OpenAPI]: https://ethereum.github.io/eth2.0-APIs/#/ diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md new file mode 100644 index 000000000..3f37673fa --- /dev/null +++ b/book/src/api-lighthouse.md @@ -0,0 +1,179 @@ +# Lighthouse Non-Standard APIs + +Lighthouse fully supports the standardization efforts at +[github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs), +however sometimes development requires additional endpoints that shouldn't +necessarily be defined as a broad-reaching standard. Such endpoints are placed +behind the `/lighthouse` path. + +The endpoints behind the `/lighthouse` path are: + +- Not intended to be stable. +- Not guaranteed to be safe. +- For testing and debugging purposes only. + +Although we don't recommend that users rely on these endpoints, we +document them briefly so they can be utilized by developers and +researchers. + +### `/lighthouse/health` + +*Presently only available on Linux.* + +```bash +curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "pid": 1728254, + "pid_num_threads": 47, + "pid_mem_resident_set_size": 510054400, + "pid_mem_virtual_memory_size": 3963158528, + "sys_virt_mem_total": 16715530240, + "sys_virt_mem_available": 4065374208, + "sys_virt_mem_used": 11383402496, + "sys_virt_mem_free": 1368662016, + "sys_virt_mem_percent": 75.67906, + "sys_loadavg_1": 4.92, + "sys_loadavg_5": 5.53, + "sys_loadavg_15": 5.58 + } +} +``` + +### `/lighthouse/syncing` + +```bash +curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/json" | jq +``` + +```json +{ + "data": { + "SyncingFinalized": { + "start_slot": 3104, + "head_slot": 343744, + "head_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" + } + } +} +``` + +### `/lighthouse/peers` + +```bash +curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/json" | jq +``` + +```json +[ + { + "peer_id": "16Uiu2HAmA9xa11dtNv2z5fFbgF9hER3yq35qYNTPvN7TdAmvjqqv", + "peer_info": { + "_status": "Healthy", + "score": { + "score": 0 + }, + "client": { + "kind": "Lighthouse", + "version": "v0.2.9-1c9a055c", + "os_version": "aarch64-linux", + "protocol_version": "lighthouse/libp2p", + "agent_string": "Lighthouse/v0.2.9-1c9a055c/aarch64-linux" + }, + "connection_status": { + "status": "disconnected", + "connections_in": 0, + "connections_out": 0, + "last_seen": 1082, + "banned_ips": [] + }, + "listening_addresses": [ + "/ip4/80.109.35.174/tcp/9000", + "/ip4/127.0.0.1/tcp/9000", + "/ip4/192.168.0.73/tcp/9000", + "/ip4/172.17.0.1/tcp/9000", + "/ip6/::1/tcp/9000" + ], + "sync_status": { + "Advanced": { + "info": { + "status_head_slot": 343829, + "status_head_root": "0xe34e43efc2bb462d9f364bc90e1f7f0094e74310fd172af698b5a94193498871", + "status_finalized_epoch": 10742, + "status_finalized_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf" + } + } + }, + "meta_data": { + "seq_number": 160, + "attnets": "0x0000000800000080" + } + } + } +] +``` + +### `/lighthouse/peers/connected` + +```bash +curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: application/json" | jq +``` + +```json +[ + { + "peer_id": "16Uiu2HAkzJC5TqDSKuLgVUsV4dWat9Hr8EjNZUb6nzFb61mrfqBv", + "peer_info": { + "_status": "Healthy", + "score": { + "score": 0 + }, + "client": { + "kind": "Lighthouse", + "version": "v0.2.8-87181204+", + "os_version": "x86_64-linux", + "protocol_version": "lighthouse/libp2p", + "agent_string": "Lighthouse/v0.2.8-87181204+/x86_64-linux" + }, + "connection_status": { + "status": "connected", + "connections_in": 1, + "connections_out": 0, + "last_seen": 0, + "banned_ips": [] + }, + "listening_addresses": [ + "/ip4/34.204.178.218/tcp/9000", + "/ip4/127.0.0.1/tcp/9000", + "/ip4/172.31.67.58/tcp/9000", + "/ip4/172.17.0.1/tcp/9000", + "/ip6/::1/tcp/9000" + ], + "sync_status": "Unknown", + "meta_data": { + "seq_number": 1819, + "attnets": "0xffffffffffffffff" + } + } + } +] +``` + +### `/lighthouse/proto_array` + +```bash +curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: application/json" | jq +``` + +*Example omitted for brevity.* + +### `/lighthouse/validator_inclusion/{epoch}/{validator_id}` + +See [Validator Inclusion APIs](./validator-inclusion.md). + +### `/lighthouse/validator_inclusion/{epoch}/global` + +See [Validator Inclusion APIs](./validator-inclusion.md). diff --git a/book/src/api-vc-auth-header.md b/book/src/api-vc-auth-header.md new file mode 100644 index 000000000..dbd334c9c --- /dev/null +++ b/book/src/api-vc-auth-header.md @@ -0,0 +1,55 @@ +# Validator Client API: Authorization Header + +## Overview + +The validator client HTTP server requires that all requests have the following +HTTP header: + +- Name: `Authorization` +- Value: `Basic ` + +Where `` is a string that can be obtained from the validator client +host. Here is an example `Authorization` header: + +``` +Authorization Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +``` + +## Obtaining the API token + +The API token can be obtained via two methods: + +### Method 1: Reading from a file + +The API token is stored as a file in the `validators` directory. For most users +this is `~/.lighthouse/{testnet}/validators/api-token.txt`. Here's an +example using the `cat` command to print the token to the terminal, but any +text editor will suffice: + +``` +$ cat api-token.txt +api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +``` + +### Method 2: Reading from logs + +When starting the validator client it will output a log message containing an +`api-token` field: + +``` +Sep 28 19:17:52.615 INFO HTTP API started api_token: api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123, listen_address: 127.0.0.1:5062 +``` + +## Example + +Here is an example `curl` command using the API token in the `Authorization` header: + +```bash +curl localhost:5062/lighthouse/version -H "Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" +``` + +The server should respond with its version: + +```json +{"data":{"version":"Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux"}} +``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md new file mode 100644 index 000000000..78ea49356 --- /dev/null +++ b/book/src/api-vc-endpoints.md @@ -0,0 +1,363 @@ +# Validator Client API: Endpoints + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version +[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine +[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Eth2 specification used by the validator +[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators +[`GET /lighthouse/validators/:voting_pubkey`](#get-lighthousevalidatorsvoting_pubkey) | Get a specific validator +[`PATCH /lighthouse/validators/:voting_pubkey`](#patch-lighthousevalidatorsvoting_pubkey) | Update a specific validator +[`POST /lighthouse/validators`](#post-lighthousevalidators) | Create a new validator and mnemonic. +[`POST /lighthouse/validators/mnemonic`](#post-lighthousevalidatorsmnemonic) | Create a new validator from an existing mnemonic. + +## `GET /lighthouse/version` + +Returns the software version and `git` commit hash for the Lighthouse binary. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/version` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 + +### Example Response Body + +```json +{ + "data": { + "version": "Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux" + } +} +``` + +## `GET /lighthouse/health` + +Returns information regarding the health of the host machine. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/health` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 + +*Note: this endpoint is presently only available on Linux.* + +### Example Response Body + +```json +{ + "data": { + "pid": 1476293, + "pid_num_threads": 19, + "pid_mem_resident_set_size": 4009984, + "pid_mem_virtual_memory_size": 1306775552, + "sys_virt_mem_total": 33596100608, + "sys_virt_mem_available": 23073017856, + "sys_virt_mem_used": 9346957312, + "sys_virt_mem_free": 22410510336, + "sys_virt_mem_percent": 31.322334, + "sys_loadavg_1": 0.98, + "sys_loadavg_5": 0.98, + "sys_loadavg_15": 1.01 + } +} +``` + +## `GET /lighthouse/spec` + +Returns the Eth2 specification loaded for this validator. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/spec` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 + +### Example Response Body + +```json +{ + "data": { + "CONFIG_NAME": "mainnet", + "MAX_COMMITTEES_PER_SLOT": "64", + "TARGET_COMMITTEE_SIZE": "128", + "MIN_PER_EPOCH_CHURN_LIMIT": "4", + "CHURN_LIMIT_QUOTIENT": "65536", + "SHUFFLE_ROUND_COUNT": "90", + "MIN_GENESIS_ACTIVE_VALIDATOR_COUNT": "1024", + "MIN_GENESIS_TIME": "1601380800", + "GENESIS_DELAY": "172800", + "MIN_DEPOSIT_AMOUNT": "1000000000", + "MAX_EFFECTIVE_BALANCE": "32000000000", + "EJECTION_BALANCE": "16000000000", + "EFFECTIVE_BALANCE_INCREMENT": "1000000000", + "HYSTERESIS_QUOTIENT": "4", + "HYSTERESIS_DOWNWARD_MULTIPLIER": "1", + "HYSTERESIS_UPWARD_MULTIPLIER": "5", + "PROPORTIONAL_SLASHING_MULTIPLIER": "3", + "GENESIS_FORK_VERSION": "0x00000002", + "BLS_WITHDRAWAL_PREFIX": "0x00", + "SECONDS_PER_SLOT": "12", + "MIN_ATTESTATION_INCLUSION_DELAY": "1", + "MIN_SEED_LOOKAHEAD": "1", + "MAX_SEED_LOOKAHEAD": "4", + "MIN_EPOCHS_TO_INACTIVITY_PENALTY": "4", + "MIN_VALIDATOR_WITHDRAWABILITY_DELAY": "256", + "SHARD_COMMITTEE_PERIOD": "256", + "BASE_REWARD_FACTOR": "64", + "WHISTLEBLOWER_REWARD_QUOTIENT": "512", + "PROPOSER_REWARD_QUOTIENT": "8", + "INACTIVITY_PENALTY_QUOTIENT": "16777216", + "MIN_SLASHING_PENALTY_QUOTIENT": "32", + "SAFE_SLOTS_TO_UPDATE_JUSTIFIED": "8", + "DOMAIN_BEACON_PROPOSER": "0x00000000", + "DOMAIN_BEACON_ATTESTER": "0x01000000", + "DOMAIN_RANDAO": "0x02000000", + "DOMAIN_DEPOSIT": "0x03000000", + "DOMAIN_VOLUNTARY_EXIT": "0x04000000", + "DOMAIN_SELECTION_PROOF": "0x05000000", + "DOMAIN_AGGREGATE_AND_PROOF": "0x06000000", + "MAX_VALIDATORS_PER_COMMITTEE": "2048", + "SLOTS_PER_EPOCH": "32", + "EPOCHS_PER_ETH1_VOTING_PERIOD": "32", + "SLOTS_PER_HISTORICAL_ROOT": "8192", + "EPOCHS_PER_HISTORICAL_VECTOR": "65536", + "EPOCHS_PER_SLASHINGS_VECTOR": "8192", + "HISTORICAL_ROOTS_LIMIT": "16777216", + "VALIDATOR_REGISTRY_LIMIT": "1099511627776", + "MAX_PROPOSER_SLASHINGS": "16", + "MAX_ATTESTER_SLASHINGS": "2", + "MAX_ATTESTATIONS": "128", + "MAX_DEPOSITS": "16", + "MAX_VOLUNTARY_EXITS": "16", + "ETH1_FOLLOW_DISTANCE": "1024", + "TARGET_AGGREGATORS_PER_COMMITTEE": "16", + "RANDOM_SUBNETS_PER_VALIDATOR": "1", + "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION": "256", + "SECONDS_PER_ETH1_BLOCK": "14", + "DEPOSIT_CONTRACT_ADDRESS": "0x48b597f4b53c21b48ad95c7256b49d1779bd5890" + } +} +``` + +## `GET /lighthouse/validators` + +Lists all validators managed by this validator client. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/validators` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 + +### Example Response Body + +```json +{ + "data": [ + { + "enabled": true, + "voting_pubkey": "0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" + }, + { + "enabled": true, + "voting_pubkey": "0xb0441246ed813af54c0a11efd53019f63dd454a1fa2a9939ce3c228419fbe113fb02b443ceeb38736ef97877eb88d43a" + }, + { + "enabled": true, + "voting_pubkey": "0xad77e388d745f24e13890353031dd8137432ee4225752642aad0a2ab003c86620357d91973b6675932ff51f817088f38" + } + ] +} +``` + +## `GET /lighthouse/validators/:voting_pubkey` + +Get a validator by their `voting_pubkey`. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/validators/:voting_pubkey` +Method | GET +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200, 400 + +### Example Path + +``` +localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +``` + +### Example Response Body + +```json +{ + "data": { + "enabled": true, + "voting_pubkey": "0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde" + } +} +``` + +## `PATCH /lighthouse/validators/:voting_pubkey` + +Update some values for the validator with `voting_pubkey`. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/validators/:voting_pubkey` +Method | PATCH +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200, 400 + +### Example Path + +``` +localhost:5062/lighthouse/validators/0xb0148e6348264131bf47bcd1829590e870c836dc893050fd0dadc7a28949f9d0a72f2805d027521b45441101f0cc1cde +``` + +### Example Request Body + +```json +{ + "enabled": false +} +``` + +### Example Response Body + +```json +null +``` + +## `POST /lighthouse/validators/` + +Create any number of new validators, all of which will share a common mnemonic +generated by the server. + +A BIP-39 mnemonic will be randomly generated and returned with the response. +This mnemonic can be used to recover all keys returned in the response. +Validators are generated from the mnemonic according to +[EIP-2334](https://eips.ethereum.org/EIPS/eip-2334), starting at index `0`. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/validators` +Method | POST +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 + +### Example Request Body + +```json +[ + { + "enable": true, + "description": "validator_one", + "deposit_gwei": "32000000000" + }, + { + "enable": false, + "description": "validator two", + "deposit_gwei": "34000000000" + } +] +``` + +### Example Response Body + +```json +{ + "data": { + "mnemonic": "marine orchard scout label trim only narrow taste art belt betray soda deal diagram glare hero scare shadow ramp blur junior behave resource tourist", + "validators": [ + { + "enabled": true, + "description": "validator_one", + "voting_pubkey": "0x8ffbc881fb60841a4546b4b385ec5e9b5090fd1c4395e568d98b74b94b41a912c6101113da39d43c101369eeb9b48e50", + "eth1_deposit_tx_data": "0x22895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000001206c68675776d418bfd63468789e7c68a6788c4dd45a3a911fe3d642668220bbf200000000000000000000000000000000000000000000000000000000000000308ffbc881fb60841a4546b4b385ec5e9b5090fd1c4395e568d98b74b94b41a912c6101113da39d43c101369eeb9b48e5000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000cf8b3abbf0ecd91f3b0affcc3a11e9c5f8066efb8982d354ee9a812219b17000000000000000000000000000000000000000000000000000000000000000608fbe2cc0e17a98d4a58bd7a65f0475a58850d3c048da7b718f8809d8943fee1dbd5677c04b5fa08a9c44d271d009edcd15caa56387dc217159b300aad66c2cf8040696d383d0bff37b2892a7fe9ba78b2220158f3dc1b9cd6357bdcaee3eb9f2", + "deposit_gwei": "32000000000" + }, + { + "enabled": false, + "description": "validator two", + "voting_pubkey": "0xa9fadd620dc68e9fe0d6e1a69f6c54a0271ad65ab5a509e645e45c6e60ff8f4fc538f301781193a08b55821444801502", + "eth1_deposit_tx_data": "0x22895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000120b1911954c1b8d23233e0e2bf8c4878c8f56d25a4f790ec09a94520ec88af30490000000000000000000000000000000000000000000000000000000000000030a9fadd620dc68e9fe0d6e1a69f6c54a0271ad65ab5a509e645e45c6e60ff8f4fc538f301781193a08b5582144480150200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000a96df8b95c3ba749265e48a101f2ed974fffd7487487ed55f8dded99b617ad000000000000000000000000000000000000000000000000000000000000006090421299179824950e2f5a592ab1fdefe5349faea1e8126146a006b64777b74cce3cfc5b39d35b370e8f844e99c2dc1b19a1ebd38c7605f28e9c4540aea48f0bc48e853ae5f477fa81a9fc599d1732968c772730e1e47aaf5c5117bd045b788e", + "deposit_gwei": "34000000000" + } + ] + } +} +``` + +## `POST /lighthouse/validators/mnemonic` + +Create any number of new validators, all of which will share a common mnemonic. + +The supplied BIP-39 mnemonic will be used to generate the validator keys +according to [EIP-2334](https://eips.ethereum.org/EIPS/eip-2334), starting at +the supplied `key_derivation_path_offset`. For example, if +`key_derivation_path_offset = 42`, then the first validator voting key will be +generated with the path `m/12381/3600/i/42`. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/validators/mnemonic` +Method | POST +Required Headers | [`Authorization`](./api-vc-auth-header.md) +Typical Responses | 200 + +### Example Request Body + +```json +{ + "mnemonic": "theme onion deal plastic claim silver fancy youth lock ordinary hotel elegant balance ridge web skill burger survey demand distance legal fish salad cloth", + "key_derivation_path_offset": 0, + "validators": [ + { + "enable": true, + "description": "validator_one", + "deposit_gwei": "32000000000" + } + ] +} +``` + +### Example Response Body + +```json +{ + "data": [ + { + "enabled": true, + "description": "validator_one", + "voting_pubkey": "0xa062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db380", + "eth1_deposit_tx_data": "0x22895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000120a57324d95ae9c7abfb5cc9bd4db253ed0605dc8a19f84810bcf3f3874d0e703a0000000000000000000000000000000000000000000000000000000000000030a062f95fee747144d5e511940624bc6546509eeaeae9383257a9c43e7ddc58c17c2bab4ae62053122184c381b90db3800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200046e4199f18102b5d4e8842d0eeafaa1268ee2c21340c63f9c2cd5b03ff19320000000000000000000000000000000000000000000000000000000000000060b2a897b4ba4f3910e9090abc4c22f81f13e8923ea61c0043506950b6ae174aa643540554037b465670d28fa7b7d716a301e9b172297122acc56be1131621c072f7c0a73ea7b8c5a90ecd5da06d79d90afaea17cdeeef8ed323912c70ad62c04b", + "deposit_gwei": "32000000000" + } + ] +} +``` diff --git a/book/src/api-vc-sig-header.md b/book/src/api-vc-sig-header.md new file mode 100644 index 000000000..a1b9b104f --- /dev/null +++ b/book/src/api-vc-sig-header.md @@ -0,0 +1,108 @@ +# Validator Client API: Signature Header + +## Overview + +The validator client HTTP server adds the following header to all responses: + +- Name: `Signature` +- Value: a secp256k1 signature across the SHA256 of the response body. + +Example `Signature` header: + +``` +Signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 +``` + +## Verifying the Signature + +Below is a browser-ready example of signature verification. + +### HTML + +```html + + +``` + +### Javascript + +```javascript +// Helper function to turn a hex-string into bytes. +function hexStringToByte(str) { + if (!str) { + return new Uint8Array(); + } + + var a = []; + for (var i = 0, len = str.length; i < len; i+=2) { + a.push(parseInt(str.substr(i,2),16)); + } + + return new Uint8Array(a); +} + +// This example uses the secp256k1 curve from the "elliptic" library: +// +// https://github.com/indutny/elliptic +var ec = new elliptic.ec('secp256k1'); + +// The public key is contained in the API token: +// +// Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +var pk_bytes = hexStringToByte('03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123'); + +// The signature is in the `Signature` header of the response: +// +// Signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 +var sig_bytes = hexStringToByte('304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873'); + +// The HTTP response body. +var response_body = "{\"data\":{\"version\":\"Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux\"}}"; + +// The HTTP response body is hashed (SHA256) to determine the 32-byte message. +let hash = sha256.create(); +hash.update(response_body); +let message = hash.array(); + +// The 32-byte message hash, the signature and the public key are verified. +if (ec.verify(message, sig_bytes, pk_bytes)) { + console.log("The signature is valid") +} else { + console.log("The signature is invalid") +} +``` + +*This example is also available as a [JSFiddle](https://jsfiddle.net/wnqd74Lz/).* + +## Example + +The previous Javascript example was written using the output from the following +`curl` command: + +```bash +curl -v localhost:5062/lighthouse/version -H "Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123" +``` + +``` +* Trying ::1:5062... +* connect to ::1 port 5062 failed: Connection refused +* Trying 127.0.0.1:5062... +* Connected to localhost (127.0.0.1) port 5062 (#0) +> GET /lighthouse/version HTTP/1.1 +> Host: localhost:5062 +> User-Agent: curl/7.72.0 +> Accept: */* +> Authorization: Basic api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123 +> +* Mark bundle as not supporting multiuse +< HTTP/1.1 200 OK +< content-type: application/json +< signature: 0x304402205b114366444112580bf455d919401e9c869f5af067cd496016ab70d428b5a99d0220067aede1eb5819eecfd5dd7a2b57c5ac2b98f25a7be214b05684b04523aef873 +< server: Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux +< access-control-allow-origin: +< content-length: 65 +< date: Tue, 29 Sep 2020 04:23:46 GMT +< +* Connection #0 to host localhost left intact +{"data":{"version":"Lighthouse/v0.2.11-fc0654fbe+/x86_64-linux"}} +``` diff --git a/book/src/api-vc.md b/book/src/api-vc.md new file mode 100644 index 000000000..0a8941eda --- /dev/null +++ b/book/src/api-vc.md @@ -0,0 +1,38 @@ +# Validator Client API + +Lighthouse implements a HTTP/JSON API for the validator client. Since there is +no Eth2 standard validator client API, Lighthouse has defined its own. + +A full list of endpoints can be found in [Endpoints](./api-vc-endpoints.md). + +> Note: All requests to the HTTP server must supply an +> [`Authorization`](./api-vc-auth-header.md) header. All responses contain a +> [`Signature`](./api-vc-sig-header.md) header for optional verification. + +## Starting the server + +A Lighthouse validator client can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5062`. + +The following CLI flags control the HTTP server: + +- `--http`: enable the HTTP server (required even if the following flags are + provided). +- `--http-port`: specify the listen port of the server. +- `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin` + header. The default is to not supply a header. + +## Security + +The validator client HTTP is **not encrypted** (i.e., it is **not HTTPS**). For +this reason, it will only listen on `127.0.0.1`. + +It is unsafe to expose the validator client to the public Internet without +additional transport layer security (e.g., HTTPS via nginx, SSH tunnels, etc.). + +### CLI Example + +Start the validator client with the HTTP server listening on [http://localhost:5062](http://localhost:5062): + +```bash +lighthouse vc --http +``` diff --git a/book/src/api.md b/book/src/api.md index 0fa6c3001..56c1ff5ce 100644 --- a/book/src/api.md +++ b/book/src/api.md @@ -1,13 +1,9 @@ # APIs -The Lighthouse `beacon_node` provides two APIs for local consumption: +Lighthouse allows users to query the state of Eth2.0 using web-standard, +RESTful HTTP/JSON APIs. -- A [RESTful JSON HTTP API](http.html) which provides beacon chain, node and network - information. -- A read-only [WebSocket API](websockets.html) providing beacon chain events, as they occur. +There are two APIs served by Lighthouse: - -## Security - -These endpoints are not designed to be exposed to the public Internet or -untrusted users. They may pose a considerable DoS attack vector when used improperly. +- [Beacon Node API](./api-bn.md) +- [Validator Client API](./api-vc.md) (not yet released). diff --git a/book/src/become-a-validator-docker.md b/book/src/become-a-validator-docker.md deleted file mode 100644 index ce45996fd..000000000 --- a/book/src/become-a-validator-docker.md +++ /dev/null @@ -1,121 +0,0 @@ -# Become a Validator: Using Docker - -Sigma Prime maintains the -[sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repository -which provides an easy way to run Lighthouse without building the Lighthouse -binary yourself. - -> Note: when you're running the Docker Hub image you're relying upon a -> pre-built binary instead of building from source. If you want the highest -> assurance you're running the _real_ Lighthouse, -> [build the docker image yourself](./docker.md) instead. You'll need some -> experience with docker-compose to integrate your locally built docker image -> with the docker-compose environment. - -## 0. Install Docker Compose - - Docker Compose relies on Docker Engine for any meaningful work, so make sure you have Docker Engine installed either locally or remote, depending on your setup. - -- On desktop systems like [Docker Desktop for Mac](https://docs.docker.com/docker-for-mac/install/) and [Docker Desktop for Windows](https://docs.docker.com/docker-for-windows/install/), Docker Compose is included as part of those desktop installs, so the desktop install is all you need. - -- On Linux systems, you'll need to first [install the Docker for your OS](https://docs.docker.com/install/#server) and then [follow the instuctions here](https://docs.docker.com/compose/install/#install-compose-on-linux-systems). - -> For more on installing Compose, see [here](https://docs.docker.com/compose/install/). - - -## 1. Clone the repository - -Once you have Docker Compose installed, clone the -[sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repository: - -```bash - git clone https://github.com/sigp/lighthouse-docker - cd lighthouse-docker -``` - -## 2. Configure the Docker environment - -Then, create a file named `.env` with the following contents (these values are -documented -[here](https://github.com/sigp/lighthouse-docker/blob/master/default.env)): - -```bash -DEBUG_LEVEL=info -START_GETH=true -START_VALIDATOR=true -VALIDATOR_COUNT=1 -VOTING_ETH1_NODE=http://geth:8545 -``` - -> To specify a non-default testnet add `TESTNET=` to the above file. can be `altona` or `medalla`. - -_This `.env` file should live in the `lighthouse-docker` directory alongside the -`docker-compose.yml` file_. - -## 3. Start Lighthouse - -Start the docker-compose environment (you may need to prefix the below command with `sudo`): - -```bash - docker-compose up -``` - -Watch the output of this command for the `Decrypted validator keystore pubkey` -log, as it contains your `voting_pubkey` -- the primary identifier for your new -validator. This key is useful for finding your validator in block explorers. -Here's an example of the log: - -```bash -validator_client_1 | Jun 01 00:29:24.418 INFO Decrypted validator keystore voting_pubkey: 0x9986ade7a974d2fe2d0fc84a8c04153873337d533d43a83439cab8ec276410686dd69aa808605a7324f34e52497a3f41 -``` -This is one of the earlier logs outputted, so you may have to scroll up or perform a search in your terminal to find it. - -> Note: `docker-compose up` generates a new sub-directory -- to store your validator's deposit data, along with its voting and withdrawal keys -- in the `lighthouse-data/validators` directory. This sub-directory is identified by your validator's `voting_pubkey` (the same `voting_pubkey` you see in the logs). So this is another way you can find it. - -> Note: the docker-compose setup includes a fast-synced geth node. So you can -> expect the `beacon_node` to log some eth1-related errors whilst the geth node -> boots and becomes synced. This will only happen on the first start of the -> compose environment or if geth loses sync. - -> Note: If you are participating in the genesis of a network (the network has -> not launched yet) you will notice errors in the validator client. This is -> because the beacon node not expose its HTTP API until -> the genesis of the network is known (approx 2 days before the network -> launches). - -> Note: Docker exposes ports TCP 9000 and UDP 9000 by default. Although not -> strictly required, we recommend setting up port forwards to expose these -> ports publicly. For more information see the FAQ or the [Advanced Networking](advanced_networking.html) -> section - -To find an estimate for how long your beacon node will take to finish syncing, look for logs that look like this: - -```bash -beacon_node_1 | Mar 16 11:33:53.979 INFO Syncing -est_time: 47 mins, speed: 16.67 slots/sec, distance: 47296 slots (7 days 14 hrs), peers: 3, service: slot_notifier -``` - -You'll find the estimated time under `est_time`. In the example above, that's `47 mins`. - -If your beacon node hasn't finished syncing yet, you'll see some ERRO messages indicating that your node hasn't synced yet: - -```bash -validator_client_1 | Mar 16 11:34:36.086 ERRO Beacon node is not synced current_epoch: 6999, node_head_epoch: 5531, service: duties -``` - -It's safest to wait for your node to sync before moving on to the next step, otherwise your validator may activate before you're able to produce blocks and attestations (and you may be penalized as a result). - -However, since it generally takes somewhere between [4 and 8 hours](./faq.md) after depositing for a validator to become active, if your `est_time` is less than 4 hours, you _should_ be fine to just move on to the next step. After all, this is a testnet and you're only risking Goerli ETH! - -## Installation complete! - -In the [next step](become-a-validator.html#2-submit-your-deposit-to-goerli) you'll need to upload your validator's deposit data. This data is stored in a file called `eth1_deposit_data.rlp`. - -You'll find it in `lighthouse-docker/.lighthouse/validators/` -- in the sub-directory that corresponds to your validator's public key (`voting_pubkey`). - - -> For example, if you ran [step 1](become-a-validator-docker.html#1-clone-the-repository) in `/home/karlm/`, and your validator's `voting_pubkey` is `0x8592c7..`, then you'll find your `eth1_deposit_data.rlp` file in the following directory: -> ->`/home/karlm/lighthouse-docker/.lighthouse/validators/0x8592c7../` - -Once you've located `eth1_deposit_data.rlp`, you're ready to move on to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli). diff --git a/book/src/become-a-validator-source.md b/book/src/become-a-validator-source.md deleted file mode 100644 index b1a8db045..000000000 --- a/book/src/become-a-validator-source.md +++ /dev/null @@ -1,219 +0,0 @@ -# Become a Validator: Building from Source - -## 0. Install Rust -If you don't have Rust installed already, visit [rustup.rs](https://rustup.rs/) to install it. - -> Notes: -> - If you're not familiar with Rust or you'd like more detailed instructions, see our [installation guide](./installation.md). -> - Windows is presently only supported via [WSL](https://docs.microsoft.com/en-us/windows/wsl/about). - - -## 1. Download and install Lighthouse - -Once you have Rust installed, you can install Lighthouse with the following commands: - -1. `git clone https://github.com/sigp/lighthouse.git` -2. `cd lighthouse` -4. `make` - -You may need to open a new terminal window before running `make`. - -You've completed this step when you can run `$ lighthouse --help` and see the -help menu. - - -## 2. Start an Eth1 client - -Since Eth2 relies upon the Eth1 chain for validator on-boarding, all Eth2 validators must have a connection to an Eth1 node. - -We provide instructions for using Geth (the Eth1 client that, by chance, we ended up testing with), but you could use any client that implements the JSON RPC via HTTP. A fast-synced node should be sufficient. - -### Installing Geth -If you're using a Mac, follow the instructions [listed here](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Mac) to install geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth). - -### Starting Geth - -Once you have geth installed, use this command to start your Eth1 node: - -```bash - geth --goerli --http -``` - -## 3. Start your beacon node - -The beacon node is the core component of Eth2, it connects to other peers over -the internet and maintains a view of the chain. - -Start your beacon node with: - -```bash - lighthouse --testnet medalla beacon --staking -``` - -> The `--testnet` parameter is optional. Omitting it will default to the -> current public testnet. Set the value to the testnet you wish to run on. -> Current values are either `altona` or `medalla`. This is true for all the -> following commands in this document. - -> Note: Lighthouse, by default, opens port 9000 over TCP and UDP. Although not -> strictly required, we recommend setting up port forwards to expose these -> ports publicly. For more information see the FAQ or the [Advanced Networking](advanced_networking.html) -> section - - -You can also pass an external http endpoint (e.g. Infura) for the Eth1 node using the `--eth1-endpoint` flag: - -```bash - lighthouse --testnet medalla beacon --staking --eth1-endpoint -``` - -Your beacon node has started syncing when you see the following (truncated) -log: - -``` -Dec 09 12:57:18.026 INFO Syncing -est_time: 2 hrs ... -``` - -The `distance` value reports the time since eth2 genesis, whilst the `est_time` -reports an estimate of how long it will take your node to become synced. - -You'll know it's finished syncing once you see the following (truncated) log: - -``` -Dec 09 12:27:06.010 INFO Synced -slot: 16835, ... -``` - - -## 4. Generate your validator key - -First, [create a wallet](./wallet-create.md) that can be used to generate -validator keys. Then, from that wallet [create a -validator](./validator-create.md). A two-step example follows: - -### 4.1 Create a Wallet - -Create a wallet with: - -```bash -lighthouse --testnet medalla account wallet create -``` - -You will be prompted for a wallet name and a password. The output will look like this: - -``` -Your wallet's 24-word BIP-39 mnemonic is: - - glad marble art pelican nurse large guilt response brave affair kite essence welcome gauge peace once picnic debris devote ticket blood bike solar junk - -This mnemonic can be used to fully restore your wallet, should -you lose the JSON file or your password. - -It is very important that you DO NOT SHARE this mnemonic as it will -reveal the private keys of all validators and keys generated with -this wallet. That would be catastrophic. - -It is also important to store a backup of this mnemonic so you can -recover your private keys in the case of data loss. Writing it on -a piece of paper and storing it in a safe place would be prudent. - -Your wallet's UUID is: - - 1c8c13d5-d065-4ef7-bad3-14e9d8146140 - -You do not need to backup your UUID or keep it secret. -``` - -**Don't forget to make a backup** of the 24-word BIP-39 mnemonic. It can be -used to restore your validator if there is a data loss. - -### 4.2 Create a Validator from the Wallet - -Create a validator from the wallet with: - -```bash -lighthouse --testnet medalla account validator create --count 1 -``` - -Enter your wallet's name and password when prompted. The output will look like this: - -```bash -1/1 0x80f3dce8d6745a725d8442c9bc3ca0852e772394b898c95c134b94979ebb0af6f898d5c5f65b71be6889185c486918a7 -``` - -Take note of the _validator public key_ (the `0x` and 64 characters following -it). It's the validator's primary identifier, and will be used to find your -validator in block explorers. (The `1/1` at the start is saying it's one-of-one -keys generated). - -Once you've observed the validator public key, you've successfully generated a -new sub-directory for your validator in the `.lighthouse/validators` directory. -The sub-directory is identified by your validator's public key . And is used to -store your validator's deposit data, along with its voting keys and other -information. - - -## 5. Start your validator client - -> Note: If you are participating in the genesis of a network (the network has -> not launched yet) you should skip this step and re-run this step two days before -> the launch of the network. The beacon node does not expose its HTTP API until -> the genesis of the network is known (approx 2 days before the network -> launches). - -Since the validator client stores private keys and signs messages generated by the beacon node, for security reasons it runs separately from it. - -You'll need both your beacon node _and_ validator client running if you want to -stake. - -Start the validator client with: - -```bash - lighthouse --testnet medalla validator --auto-register -``` - -The `--auto-register` flag registers your signing key with the slashing protection database, which -keeps track of all the messages your validator signs. This flag should be used sparingly, -as reusing the same key on multiple nodes can lead to your validator getting slashed. On subsequent -runs you should leave off the `--auto-register` flag. - -You know that your validator client is running and has found your validator keys from [step 3](become-a-validator-source.html#3-start-your-beacon-node) when you see the following logs: - -``` -Dec 09 13:08:59.171 INFO Loaded validator keypair store voting_validators: 1 -Dec 09 13:09:09.000 INFO Awaiting activation slot: 17787, ... -``` - - -To find an estimate for how long your beacon node will take to finish syncing, lookout for the following logs: - -```bash -beacon_node_1 | Mar 16 11:33:53.979 INFO Syncing -est_time: 47 mins, speed: 16.67 slots/sec, distance: 47296 slots (7 days 14 hrs), peers: 3, service: slot_notifier -``` - -You'll find the estimated time under `est_time`. In the example log above, that's `47 mins`. - -If your beacon node hasn't finished syncing yet, you'll see some `ERRO` -messages indicating that your node hasn't synced yet: - -```bash -validator_client_1 | Mar 16 11:34:36.086 ERRO Beacon node is not synced current_epoch: 6999, node_head_epoch: 5531, service: duties -``` - -It's safest to wait for your node to sync before moving on to the next step, otherwise your validator may activate before you're able to produce blocks and attestations (and you may be penalized as a result). - -However, since it generally takes somewhere between [4 and 8 hours](./faq.md) after depositing for a validator to become active, if your `est_time` is less than 4 hours, you _should_ be fine to just move on to the next step. After all, this is a testnet and you're only risking Goerli ETH! - -## Installation complete! - -In the [next step](become-a-validator.html#2-submit-your-deposit-to-goerli) you'll need to upload your validator's deposit data. This data is stored in a file called `eth1_deposit_data.rlp`. - -You'll find it in `/home/.lighthouse/validators` -- in the sub-directory that corresponds to your validator's public key. - -> For example, if your username is `karlm`, and your validator's public key (aka `voting_pubkey`) is `0x8592c7..`, then you'll find your `eth1_deposit_data.rlp` file in the following directory: -> ->`/home/karlm/.lighthouse/validators/0x8592c7../` - -Once you've located your `eth1_deposit_data.rlp` file, you're ready to move on to [Become a Validator: Step 2](become-a-validator.html#2-submit-your-deposit-to-goerli). diff --git a/book/src/become-a-validator.md b/book/src/become-a-validator.md deleted file mode 100644 index a8f01c38b..000000000 --- a/book/src/become-a-validator.md +++ /dev/null @@ -1,96 +0,0 @@ -# Become an Ethereum 2.0 Validator - -There are two public testnets currently available. [Medalla](https://github.com/goerli/medalla/tree/master/medalla) and [Altona](https://github.com/goerli/medalla/tree/master/altona). Lighthouse supports both out of the box and joining these multi-client testnets is easy if you're familiar with the terminal. - -Lighthouse runs on Linux, MacOS and Windows and has a Docker work-flow to make -things as simple as possible. - -## 0. Acquire Goerli ETH -Before you install Lighthouse, you'll need [Metamask](https://metamask.io/) and 32 gETH -(Goerli ETH). We recommend the [mudit.blog -faucet](https://faucet.goerli.mudit.blog/) for those familiar with Goerli, or -[goerli.net](https://goerli.net/) for an overview of the testnet. - -> If this is your first time using Metamask and/or interacting with an Ethereum test network, we recommend going through the beginning of [this guide](https://hack.aragon.org/docs/guides-use-metamask) first (up to the *Signing your first transaction with MetaMask* section). - -## 1. Install and start Lighthouse - -There are two, different ways to install and start a Lighthouse validator: - -1. [Using `docker-compose`](./become-a-validator-docker.md): this is the easiest method. - -2. [Building from source](./become-a-validator-source.md): this is a little more involved, however it - gives a more hands-on experience. - -Once you've completed **either one** of these steps, you can move onto the next step. - -> Take note when running Lighthouse. Use the --testnet parameter to specify the testnet you whish to participate in. Medalla is currently the default, so make sure to use --testnet altona to join the Altona testnet. - -## 2. Submit your deposit to Goerli - - - - - - - -> This deposit is made using gETH (Goerli ETH) which has no real value. Please don't ever -> send _real_ ETH to our deposit contract! - -## 3. Leave Lighthouse running - -Leave your beacon node and validator client running and you'll see logs as the -beacon node stays synced with the network while the validator client produces -blocks and attestations. - -It will take 4-8+ hours for the beacon chain to process and activate your -validator, however you'll know you're active when the validator client starts -successfully publishing attestations each slot: - -``` -Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, -``` - -Although you'll produce an attestation each slot, it's less common to produce a -block. Watch for the block production logs too: - -``` -Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block -``` - -If you see any `ERRO` (error) logs, please reach out on -[Discord](https://discord.gg/cyAszAh) or [create an -issue](https://github.com/sigp/lighthouse/issues/new). - -Don't forget to checkout the open-source block explorer for the Lighthouse -testnet at -[lighthouse-testnet3.beaconcha.in](https://lighthouse-testnet3.beaconcha.in/). - -Happy staking! - - - - - - diff --git a/book/src/docker.md b/book/src/docker.md index 8a09e2bb4..ad6e2f1a1 100644 --- a/book/src/docker.md +++ b/book/src/docker.md @@ -1,11 +1,7 @@ # Docker Guide This repository has a `Dockerfile` in the root which builds an image with the -`lighthouse` binary installed. - -A pre-built image is available on Docker Hub and the -[sigp/lighthouse](https://github.com/sigp/lighthouse-docker) repository -contains a full-featured `docker-compose` environment. +`lighthouse` binary installed. A pre-built image is available on Docker Hub. ## Obtaining the Docker image @@ -20,10 +16,28 @@ Lighthouse maintains the Docker Hub repository which provides an easy way to run Lighthouse without building the image yourself. +Obtain the latest image with: + +```bash +$ docker pull sigp/lighthouse +``` + Download and test the image with: ```bash -$ docker run sigp/lighthouse lighthouse --help +$ docker run sigp/lighthouse lighthouse --version +``` + +If you can see the latest [Lighthouse +release](https://github.com/sigp/lighthouse/releases) version (see example +below), then you've +successfully installed Lighthouse via Docker. + +#### Example Version Output + +``` +Lighthouse vx.x.xx-xxxxxxxxx +BLS Library: xxxx-xxxxxxx ``` > Note: when you're running the Docker Hub image you're relying upon a diff --git a/book/src/http.md b/book/src/http.md index e07440e8d..700535c2a 100644 --- a/book/src/http.md +++ b/book/src/http.md @@ -1,5 +1,9 @@ # HTTP API +[OpenAPI Specification](https://ethereum.github.io/eth2.0-APIs/#/) + +## Beacon Node + A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `localhost:5052`. The following CLI flags control the HTTP server: @@ -9,24 +13,10 @@ The following CLI flags control the HTTP server: - `--http-port`: specify the listen port of the server. - `--http-address`: specify the listen address of the server. -The API is logically divided into several core endpoints, each documented in -detail: - -Endpoint | Description | -| --- | -- | -[`/node`](./http/node.md) | General information about the beacon node. -[`/beacon`](./http/beacon.md) | General information about the beacon chain. -[`/validator`](./http/validator.md) | Provides functionality to validator clients. -[`/consensus`](./http/consensus.md) | Proof-of-stake voting statistics. -[`/network`](./http/network.md) | Information about the p2p network. -[`/spec`](./http/spec.md) | Information about the specs that the client is running. -[`/advanced`](./http/advanced.md) | Provides endpoints for advanced inspection of Lighthouse specific objects. -[`/lighthouse`](./http/lighthouse.md) | Provides lighthouse specific endpoints. - -_Please note: The OpenAPI format at -[SwaggerHub: Lighthouse REST -API](https://app.swaggerhub.com/apis-docs/spble/lighthouse_rest_api/0.2.0) has -been **deprecated**. This documentation is now the source of truth for the REST API._ +The schema of the API aligns with the standard Eth2 Beacon Node API as defined +at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs). +It is an easy-to-use RESTful HTTP/JSON API. An interactive specification is +available [here](https://ethereum.github.io/eth2.0-APIs/#/). ## Troubleshooting diff --git a/book/src/http/advanced.md b/book/src/http/advanced.md deleted file mode 100644 index 822b6ffff..000000000 --- a/book/src/http/advanced.md +++ /dev/null @@ -1,115 +0,0 @@ -# Lighthouse REST API: `/advanced` - -The `/advanced` endpoints provide information Lighthouse specific data structures for advanced debugging. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/advanced/fork_choice`](#advancedfork_choice) | Get the `proto_array` fork choice object. -[`/advanced/operation_pool`](#advancedoperation_pool) | Get the Lighthouse `PersistedOperationPool` object. - - -## `/advanced/fork_choice` - -Requests the `proto_array` fork choice object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/fork_choice` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "prune_threshold": 256, - "justified_epoch": 25, - "finalized_epoch": 24, - "nodes": [ - { - "slot": 544, - "root": "0x27103c56d4427cb4309dd202920ead6381d54d43277c29cf0572ddf0d528e6ea", - "parent": null, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 1, - "best_descendant": 296 - }, - { - "slot": 545, - "root": "0x09af0e8d4e781ea4280c9c969d168839c564fab3a03942e7db0bfbede7d4c745", - "parent": 0, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 2, - "best_descendant": 296 - }, - ], - "indices": { - "0xb935bb3651eeddcb2d2961bf307156850de982021087062033f02576d5df00a3": 59, - "0x8f4ec47a34c6c1d69ede64d27165d195f7e2a97c711808ce51f1071a6e12d5b9": 189, - "0xf675eba701ef77ee2803a130dda89c3c5673a604d2782c9e25ea2be300d7d2da": 173, - "0x488a483c8d5083faaf5f9535c051b9f373ba60d5a16e77ddb1775f248245b281": 37 - } -} -``` -_Truncated for brevity._ - -## `/advanced/operation_pool` - -Requests the `PersistedOperationPool` object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/operation_pool` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "attestations": [ - [ - { - "v": [39, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 118, 215, 252, 51, 186, 76, 156, 157, 99, 91, 4, 137, 195, 209, 224, 26, 233, 233, 184, 38, 89, 215, 177, 247, 97, 243, 119, 229, 69, 50, 90, 24, 0, 0, 0, 0, 0, 0, 0, 79, 37, 38, 210, 96, 235, 121, 142, 129, 136, 206, 214, 179, 132, 22, 19, 222, 213, 203, 46, 112, 192, 26, 5, 254, 26, 103, 170, 158, 205, 72, 3, 25, 0, 0, 0, 0, 0, 0, 0, 164, 50, 214, 67, 98, 13, 50, 180, 108, 232, 248, 109, 128, 45, 177, 23, 221, 24, 218, 211, 8, 152, 172, 120, 24, 86, 198, 103, 68, 164, 67, 202, 1, 0, 0, 0, 0, 0, 0, 0] - }, - [ - { - "aggregation_bits": "0x03", - "data": { - "slot": 807, - "index": 0, - "beacon_block_root": "0x7076d7fc33ba4c9c9d635b0489c3d1e01ae9e9b82659d7b1f761f377e545325a", - "source": { - "epoch": 24, - "root": "0x4f2526d260eb798e8188ced6b3841613ded5cb2e70c01a05fe1a67aa9ecd4803" - }, - "target": { - "epoch": 25, - "root": "0xa432d643620d32b46ce8f86d802db117dd18dad30898ac781856c66744a443ca" - } - }, - "signature": "0x8b1d624b0cd5a7a0e13944e90826878a230e3901db34ea87dbef5b145ade2fedbc830b6752a38a0937a1594211ab85b615d65f9eef0baccd270acca945786036695f4db969d9ff1693c505c0fe568b2fe9831ea78a74cbf7c945122231f04026" - } - ] - ] - ], - "attester_slashings": [], - "proposer_slashings": [], - "voluntary_exits": [] -} -``` -_Truncated for brevity._ diff --git a/book/src/http/beacon.md b/book/src/http/beacon.md deleted file mode 100644 index 2149f4444..000000000 --- a/book/src/http/beacon.md +++ /dev/null @@ -1,784 +0,0 @@ -# Lighthouse REST API: `/beacon` - -The `/beacon` endpoints provide information about the canonical head of the -beacon chain and also historical information about beacon blocks and states. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/beacon/head`](#beaconhead) | Info about the block at the head of the chain. -[`/beacon/heads`](#beaconheads) | Returns a list of all known chain heads. -[`/beacon/block`](#beaconblock) | Get a `BeaconBlock` by slot or root. -[`/beacon/block_root`](#beaconblock_root) | Resolve a slot to a block root. -[`/beacon/fork`](#beaconfork) | Get the fork of the head of the chain. -[`/beacon/genesis_time`](#beacongenesis_time) | Get the genesis time from the beacon state. -[`/beacon/genesis_validators_root`](#beacongenesis_validators_root) | Get the genesis validators root. -[`/beacon/validators`](#beaconvalidators) | Query for one or more validators. -[`/beacon/validators/all`](#beaconvalidatorsall) | Get all validators. -[`/beacon/validators/active`](#beaconvalidatorsactive) | Get all active validators. -[`/beacon/state`](#beaconstate) | Get a `BeaconState` by slot or root. -[`/beacon/state_root`](#beaconstate_root) | Resolve a slot to a state root. -[`/beacon/state/genesis`](#beaconstategenesis) | Get a `BeaconState` at genesis. -[`/beacon/committees`](#beaconcommittees) | Get the shuffling for an epoch. -[`/beacon/proposer_slashing`](#beaconproposer_slashing) | Insert a proposer slashing -[`/beacon/attester_slashing`](#beaconattester_slashing) | Insert an attester slashing - -## `/beacon/head` - -Requests information about the head of the beacon chain, from the node's -perspective. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/head` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "slot": 37923, - "block_root": "0xe865d4805395a0776b8abe46d714a9e64914ab8dc5ff66624e5a1776bcc1684b", - "state_root": "0xe500e3567ab273c9a6f8a057440deff476ab236f0983da27f201ee9494a879f0", - "finalized_slot": 37856, - "finalized_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86", - "justified_slot": 37888, - "justified_block_root": "0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4", - "previous_justified_slot": 37856, - "previous_justified_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86" -} -``` - -## `/beacon/heads` - -Returns the roots of all known head blocks. Only one of these roots is the -canonical head and that is decided by the fork choice algorithm. See [`/beacon/head`](#beaconhead) for the canonical head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/heads` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - { - "beacon_block_root": "0x226b2fd7c5f3d31dbb21444b96dfafe715f0017cd16545ecc4ffa87229496a69", - "beacon_block_slot": 38373 - }, - { - "beacon_block_root": "0x41ed5b253c4fc841cba8a6d44acbe101866bc674c3cfa3c4e9f7388f465aa15b", - "beacon_block_slot": 38375 - } -] -``` - -## `/beacon/block` - -Request that the node return a beacon chain block that matches the provided -criteria (a block `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any block returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned block is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single [`SignedBeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblock) and the block root of the inner [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblock). - -### Example Response - -```json -{ - "root": "0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196", - "beacon_block": { - "message": { - "slot": 0, - "proposer_index": 14, - "parent_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "state_root": "0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f", - "body": { - "randao_reveal": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "eth1_data": { - "deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "deposit_count": 0, - "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } -} -``` - -## `/beacon/block_root` - -Returns the block root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196" -``` - -## `/beacon/committees` - -Request the committees (a.k.a. "shuffling") for all slots and committee indices -in a given `epoch`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/committees` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200/500 - -### Parameters - -The `epoch` (`Epoch`) query parameter is required and defines the epoch for -which the committees will be returned. All slots contained within the response will -be inside this epoch. - -### Returns - -A list of beacon committees. - -### Example Response - -```json -[ - { - "slot": 4768, - "index": 0, - "committee": [ - 1154, - 492, - 9667, - 3089, - 8987, - 1421, - 224, - 11243, - 2127, - 2329, - 188, - 482, - 486 - ] - }, - { - "slot": 4768, - "index": 1, - "committee": [ - 5929, - 8482, - 5528, - 6130, - 14343, - 9777, - 10808, - 12739, - 15234, - 12819, - 5423, - 6320, - 9991 - ] - } -] -``` - -_Truncated for brevity._ - -## `/beacon/fork` - -Request that the node return the `fork` of the current head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/fork` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#fork) of the current head. - -### Example Response - -```json -{ - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 -} -``` - -## `/beacon/genesis_time` - -Request that the node return the genesis time from the beacon state. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/genesis_time` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis time. - -### Example Response - -```json -1581576353 -``` - -## `/beacon/genesis_validators_root` - -Request that the node return the genesis validators root from the beacon state. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/genesis_validators_root` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis validators root. - -### Example Response - -```json -0x4fbf23439a7a9b9dd91650e64e8124012dde5e2ea2940c552b86f04eb47f95de -``` - -## `/beacon/validators` - -Request that the node returns information about one or more validator public -keys. This request takes the form of a `POST` request to allow sending a large -number of pubkeys in the request. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - state_root: Bytes32, - pubkeys: [PublicKey] -} -``` - -The `state_root` field indicates which `BeaconState` should be used to collect -the information. The `state_root` is optional and omitting it will result in -the canonical head state being used. - - -### Returns - -Returns an object describing several aspects of the given validator. - -### Example - -### Request Body - -```json -{ - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -### Response Body - -```json -[ - { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "balance": 3228885987, - "validator": { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "withdrawal_credentials": "0x00b7bec22d5bda6b2cca1343d4f640d0e9ccc204a06a73703605c590d4c0d28e", - "effective_balance": 3200000000, - "slashed": false, - "activation_eligibility_epoch": 0, - "activation_epoch": 0, - "exit_epoch": 18446744073709551615, - "withdrawable_epoch": 18446744073709551615 - } - }, - { - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "balance": null, - "validator": null - } -] -``` - -## `/beacon/validators/all` - -Returns all validators. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/all` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. - - -## `/beacon/validators/active` - -Returns all validators that are active in the state defined by `state_root`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/active` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. - - -## `/beacon/state` - -Request that the node return a beacon chain state that matches the provided -criteria (a state `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any state returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned state is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate) -and its tree hash root. - -### Example Response - -```json -{ - "root": "0x528e54ca5d4c957729a73f40fc513ae312e054c7295775c4a2b21f423416a72b", - "beacon_state": { - "genesis_time": 1575652800, - "genesis_validators_root": "0xa8a9226edee1b2627fb4117d7dea4996e64dec2998f37f6e824f74f2ce39a538", - "slot": 18478 - } -} -``` - -_Truncated for brevity._ - -## `/beacon/state_root` - -Returns the state root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f" -``` - -## `/beacon/state/genesis` - -Request that the node return a beacon chain state at genesis (slot 0). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state/genesis` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate). - -### Example Response - -```json -{ - "genesis_time": 1581576353, - "slot": 0, - "fork": { - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 - }, -} -``` - -_Truncated for brevity._ - - -## `/beacon/state/committees` - -Request that the node return a beacon chain state at genesis (slot 0). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state/genesis` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - - -### Returns - -Returns an object containing the committees for a given epoch. - -### Example Response - -```json -[ - {"slot":64,"index":0,"committee":[]}, - {"slot":65,"index":0,"committee":[3]}, - {"slot":66,"index":0,"committee":[]}, - {"slot":67,"index":0,"committee":[14]}, - {"slot":68,"index":0,"committee":[]}, - {"slot":69,"index":0,"committee":[9]}, - {"slot":70,"index":0,"committee":[]}, - {"slot":71,"index":0,"committee":[11]}, - {"slot":72,"index":0,"committee":[]}, - {"slot":73,"index":0,"committee":[5]}, - {"slot":74,"index":0,"committee":[]}, - {"slot":75,"index":0,"committee":[15]}, - {"slot":76,"index":0,"committee":[]}, - {"slot":77,"index":0,"committee":[0]} -] -``` - -_Truncated for brevity._ - - -## `/beacon/attester_slashing` - -Accepts an `attester_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns a 400 error if the `attester_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/attester_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Parameters - -Expects the following object in the POST request body: - -``` -{ - attestation_1: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - }, - attestation_2: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the attester slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "attestation_1": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - } - }, - "signature": "0xb47f7397cd944b8d5856a13352166bbe74c85625a45b14b7347fc2c9f6f6f82acee674c65bc9ceb576fcf78387a6731c0b0eb3f8371c70db2da4e7f5dfbc451730c159d67263d3db56b6d0e009e4287a8ba3efcacac30b3ae3447e89dc71b5b9" - }, - "attestation_2": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000200000000000000" - } - }, - "signature": "0x93fef587a63acf72aaf8df627718fd43cb268035764071f802ffb4370a2969d226595cc650f4c0bf2291ae0c0a41fcac1700f318603d75d34bcb4b9f4a8368f61eeea0e1f5d969d92d5073ba5fbadec102b45ec87d418d25168d2e3c74b9fcbb" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - - - -## `/beacon/proposer_slashing` - -Accepts a `proposer_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns an 400 error if the `proposer_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/proposer_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - proposer_index: u64, - header_1: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - }, - header_2: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the proposer slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "proposer_index": 0, - "header_1": { - "slot": 0, - "parent_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb8970d1342c6d5779c700ec366efd0ca819937ca330960db3ca5a55eb370a3edd83f4cbb2f74d06e82f934fcbd4bb80609a19c2254cc8b3532a4efff9e80edf312ac735757c059d77126851e377f875593e64ba50d1dffe69a809a409202dd12" - }, - "header_2": { - "slot": 0, - "parent_root": "0x0202020202020202020202020202020202020202020202020202020202020202", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb60e6b348698a34e59b22e0af96f8809f977f00f95d52375383ade8d22e9102270a66c6d52b0434214897e11ca4896871510c01b3fd74d62108a855658d5705fcfc4ced5136264a1c6496f05918576926aa191b1ad311b7e27f5aa2167aba294" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - - - - - diff --git a/book/src/http/lighthouse.md b/book/src/http/lighthouse.md deleted file mode 100644 index d80c0f694..000000000 --- a/book/src/http/lighthouse.md +++ /dev/null @@ -1,182 +0,0 @@ -# Lighthouse REST API: `/lighthouse` - -The `/lighthouse` endpoints provide lighthouse-specific information about the beacon node. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/lighthouse/syncing`](#lighthousesyncing) | Get the node's syncing status -[`/lighthouse/peers`](#lighthousepeers) | Get the peers info known by the beacon node -[`/lighthouse/connected_peers`](#lighthousepeers) | Get the connected_peers known by the beacon node - -## `/lighthouse/syncing` - -Requests the syncing state of a Lighthouse beacon node. Lighthouse as a -custom sync protocol, this request gets Lighthouse-specific sync information. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/syncing` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -If the node is undergoing a finalization sync: -```json -{ - "SyncingFinalized": { - "start_slot": 10, - "head_slot": 20, - "head_root":"0x74020d0e3c3c02d2ea6279d5760f7d0dd376c4924beaaec4d5c0cefd1c0c4465" - } -} -``` - -If the node is undergoing a head chain sync: -```json -{ - "SyncingHead": { - "start_slot":0, - "head_slot":1195 - } -} -``` - -If the node is synced -```json -{ -"Synced" -} -``` - -## `/lighthouse/peers` - -Get all known peers info from the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/peers` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ -{ - "peer_id" : "16Uiu2HAmTEinipUS3haxqucrn7d7SmCKx5XzAVbAZCiNW54ncynG", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : "github.com/libp2p/go-libp2p", - "kind" : "Prysm", - "os_version" : "unknown", - "protocol_version" : "ipfs/0.1.0", - "version" : "unknown" - }, - "connection_status" : { - "Disconnected" : { - "since" : 3 - } - }, - "listening_addresses" : [ - "/ip4/10.3.58.241/tcp/9001", - "/ip4/35.172.14.146/tcp/9001", - "/ip4/35.172.14.146/tcp/9001" - ], - "meta_data" : { - "attnets" : "0x0000000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : { - "Synced" : { - "status_head_slot" : 18146 - } - } - } - }, - { - "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : null, - "kind" : "Unknown", - "os_version" : "unknown", - "protocol_version" : "unknown", - "version" : "unknown" - }, - "connection_status" : { - "Disconnected" : { - "since" : 5 - } - }, - "listening_addresses" : [], - "meta_data" : { - "attnets" : "0x0900000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : "Unknown" - } - }, -] -``` - -## `/lighthouse/connected_peers` - -Get all known peers info from the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/lighthouse/connected_peers` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - { - "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", - "peer_info" : { - "_status" : "Healthy", - "client" : { - "agent_string" : null, - "kind" : "Unknown", - "os_version" : "unknown", - "protocol_version" : "unknown", - "version" : "unknown" - }, - "connection_status" : { - "Connected" : { - "in" : 5, - "out" : 2 - } - }, - "listening_addresses" : [], - "meta_data" : { - "attnets" : "0x0900000000000000", - "seq_number" : 0 - }, - "reputation" : 20, - "sync_status" : "Unknown" - } - }, - ] -``` diff --git a/book/src/http/network.md b/book/src/http/network.md deleted file mode 100644 index 2ac0c83ba..000000000 --- a/book/src/http/network.md +++ /dev/null @@ -1,148 +0,0 @@ -# Lighthouse REST API: `/network` - -The `/network` endpoints provide information about the p2p network that -Lighthouse uses to communicate with other beacon nodes. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/network/enr`](#networkenr) | Get the local node's `ENR` as base64 . -[`/network/peer_count`](#networkpeer_count) | Get the count of connected peers. -[`/network/peer_id`](#networkpeer_id) | Get a node's libp2p `PeerId`. -[`/network/peers`](#networkpeers) | List a node's connected peers (as `PeerIds`). -[`/network/listen_port`](#networklisten_port) | Get a node's libp2p listening port. -[`/network/listen_addresses`](#networklisten_addresses) | Get a list of libp2p multiaddr the node is listening on. - -## `network/enr` - -Requests the beacon node for its listening `ENR` address. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/enr` -Method | GET -JSON Encoding | String (base64) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"-IW4QPYyGkXJSuJ2Eji8b-m4PTNrW4YMdBsNOBrYAdCk8NLMJcddAiQlpcv6G_hdNjiLACOPTkqTBhUjnC0wtIIhyQkEgmlwhKwqAPqDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhA1sBKo0yCfw4Z_jbggwflNfftjwKACu-a-CoFAQHJnrm" -``` - -## `/network/peer_count` - -Requests the count of peers connected to the client. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_count` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -5 -``` -## `/network/peer_id` - -Requests the beacon node's local `PeerId`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_id` -Method | GET -JSON Encoding | String (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"QmVFcULBYZecPdCKgGmpEYDqJLqvMecfhJadVBtB371Avd" -``` - -## `/network/peers` - -Requests one `MultiAddr` for each peer connected to the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peers` -Method | GET -JSON Encoding | [String] (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "QmaPGeXcfKFMU13d8VgbnnpeTxcvoFoD9bUpnRGMUJ1L9w", - "QmZt47cP8V96MgiS35WzHKpPbKVBMqr1eoBNTLhQPqpP3m" -] -``` - - -## `/network/listen_port` - -Requests the TCP port that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_port` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -9000 -``` - -## `/network/listen_addresses` - -Requests the list of multiaddr that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_addresses` -Method | GET -JSON Encoding | Array -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "/ip4/127.0.0.1/tcp/9000", - "/ip4/192.168.31.115/tcp/9000", - "/ip4/172.24.0.1/tcp/9000", - "/ip4/172.21.0.1/tcp/9000", - "/ip4/172.17.0.1/tcp/9000", - "/ip4/172.18.0.1/tcp/9000", - "/ip4/172.19.0.1/tcp/9000", - "/ip4/172.42.0.1/tcp/9000", - "/ip6/::1/tcp/9000" -] -``` diff --git a/book/src/http/node.md b/book/src/http/node.md deleted file mode 100644 index ae370cbe9..000000000 --- a/book/src/http/node.md +++ /dev/null @@ -1,91 +0,0 @@ -# Lighthouse REST API: `/node` - -The `/node` endpoints provide information about the lighthouse beacon node. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/node/version`](#nodeversion) | Get the node's version. -[`/node/syncing`](#nodesyncing) | Get the node's syncing status. -[`/node/health`](#nodehealth) | Get the node's health. - -## `/node/version` - -Requests the beacon node's version. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/version` -Method | GET -JSON Encoding | String -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"Lighthouse-0.2.0-unstable" -``` - -## `/node/syncing` - -Requests the syncing status of the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/syncing` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - is_syncing: true, - sync_status: { - starting_slot: 0, - current_slot: 100, - highest_slot: 200, - } -} -``` - -## `/node/health` - -Requests information about the health of the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/node/health` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "pid": 96160, - "pid_num_threads": 30, - "pid_mem_resident_set_size": 55476224, - "pid_mem_virtual_memory_size": 2081382400, - "sys_virt_mem_total": 16721076224, - "sys_virt_mem_available": 7423197184, - "sys_virt_mem_used": 8450183168, - "sys_virt_mem_free": 3496345600, - "sys_virt_mem_percent": 55.605743, - "sys_loadavg_1": 1.56, - "sys_loadavg_5": 2.61, - "sys_loadavg_15": 2.43 -} -``` diff --git a/book/src/http/spec.md b/book/src/http/spec.md deleted file mode 100644 index 619a1d4e3..000000000 --- a/book/src/http/spec.md +++ /dev/null @@ -1,154 +0,0 @@ -# Lighthouse REST API: `/spec` - -The `/spec` endpoints provide information about Eth2.0 specifications that the node is running. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/spec`](#spec) | Get the full spec object that a node's running. -[`/spec/slots_per_epoch`](#specslots_per_epoch) | Get the number of slots per epoch. -[`/spec/eth2_config`](#specseth2_config) | Get the full Eth2 config object. - -## `/spec` - -Requests the full spec object that a node's running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "genesis_delay": 172800, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "shard_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 -} -``` - -## `/spec/eth2_config` - -Requests the full `Eth2Config` object. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/eth2_config` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "spec_constants": "mainnet", - "spec": { - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "genesis_delay": 172800, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "shard_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 - } -} -``` - -## `/spec/slots_per_epoch` - -Requests the `SLOTS_PER_EPOCH` parameter from the specs that the node is running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/slots_per_epoch` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -32 -``` \ No newline at end of file diff --git a/book/src/http/validator.md b/book/src/http/validator.md deleted file mode 100644 index eff0c6095..000000000 --- a/book/src/http/validator.md +++ /dev/null @@ -1,545 +0,0 @@ -# Lighthouse REST API: `/validator` - -The `/validator` endpoints provide the minimum functionality required for a validator -client to connect to the beacon node and produce blocks and attestations. - -## Endpoints - -HTTP Path | HTTP Method | Description | -| - | - | ---- | -[`/validator/duties`](#validatorduties) | POST | Provides block and attestation production information for validators. -[`/validator/subscribe`](#validatorsubscribe) | POST | Subscribes a list of validators to the beacon node for a particular duty/slot. -[`/validator/duties/all`](#validatordutiesall) | GET |Provides block and attestation production information for all validators. -[`/validator/duties/active`](#validatordutiesactive) | GET | Provides block and attestation production information for all active validators. -[`/validator/block`](#validatorblock-get) | GET | Retrieves the current beacon block for the validator to publish. -[`/validator/block`](#validatorblock-post) | POST | Publishes a signed block to the network. -[`/validator/attestation`](#validatorattestation) | GET | Retrieves the current best attestation for a validator to publish. -[`/validator/aggregate_attestation`](#validatoraggregate_attestation) | GET | Gets an aggregate attestation for validators to sign and publish. -[`/validator/attestations`](#validatorattestations) | POST | Publishes a list of raw unaggregated attestations to their appropriate subnets. -[`/validator/aggregate_and_proofs`](#validatoraggregate_and_proofs) | POST | Publishes a list of Signed aggregate and proofs for validators who are aggregators. - -## `/validator/duties` - -Request information about when a validator must produce blocks and attestations -at some given `epoch`. The information returned always refers to the canonical -chain and the same input parameters may yield different results after a re-org. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -Duties are assigned on a per-epoch basis, all duties returned will contain -slots that are inside the given `epoch`. A set of duties will be returned for -each of the `pubkeys`. - -Validators who are not known to the beacon chain (e.g., have not yet deposited) -will have `null` values for most fields. - - -### Returns - -A set of duties for each given pubkey. - -### Example - -#### Request Body - -```json -{ - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "validator_pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "attestation_slot": 38511, - "attestation_committee_index": 3, - "attestation_committee_position": 39, - "block_proposal_slots": [], - "aggregator_modulo": 5, - }, - { - "validator_pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "attestation_slot": null, - "attestation_committee_index": null, - "attestation_committee_position": null, - "block_proposal_slots": [] - "aggregator_modulo": null, - } -] -``` - -## `/validator/duties/all` - -Returns the duties for all validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys. - -Considering that duties for non-active validators will just be `null`, it is -generally more efficient to query using [Active Validator -Duties](#active-validator-duties). - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/all` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/duties/active` - -Returns the duties for all active validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys that -are active in the given epoch. - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/active` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/subscribe` - -Posts a list of `ValidatorSubscription` to subscribe validators to -particular slots to perform attestation duties. - -This informs the beacon node to search for peers and subscribe to -required attestation subnets to perform the attestation duties required. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/subscribe` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -[ - { - validator_index: 10, - attestation_committee_index: 12, - slot: 3, - is_aggregator: true - } -] -``` - -The `is_aggregator` informs the beacon node if the validator is an aggregator -for this slot/committee. - -### Returns - -A null object on success and an error indicating any failures. - -## `/validator/block` GET - - -Produces and returns an unsigned `BeaconBlock` object. - -The block will be produced with the given `slot` and the parent block will be the -highest block in the canonical chain that has a slot less than `slot`. The -block will still be produced if some other block is also known to be at `slot` -(i.e., it may produce a block that would be slashable if signed). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `randao_reveal` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the block is to be produced. -- `randao_reveal` (`Signature`): 96 bytes `Signature` for the randomness. - - -### Returns - -Returns a `BeaconBlock` object. - -#### Response Body - -```json -{ - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } -} -``` - -## `/validator/block` POST - -Accepts a `SignedBeaconBlock` for verification. If it is valid, it will be -imported into the local database and published on the network. Invalid blocks -will not be published to the network. - -A block may be considered invalid because it is fundamentally incorrect, or its -parent has not yet been imported. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded `SignedBeaconBlock` in the POST request body: - -### Returns - -Returns a null object if the block passed all block validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "message": { - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [ - - ], - "attester_slashings": [ - - ], - "attestations": [ - - ], - "deposits": [ - - ], - "voluntary_exits": [ - - ] - } - }, - "signature": "0x965ced900dbabd0a78b81a0abb5d03407be0d38762104316416347f2ea6f82652b5759396f402e85df8ee18ba2c60145037c73b1c335f4272f1751a1cd89862b7b4937c035e350d0108554bd4a8930437ec3311c801a65fe8e5ba022689b5c24" -} -``` - -## `/validator/attestation` - -Produces and returns an unsigned `Attestation` from the current state. - -The attestation will reference the `beacon_block_root` of the highest block in -the canonical chain with a slot equal to or less than the given `slot`. - -An error will be returned if the given slot is more than -`SLOTS_PER_HISTORICAL_VECTOR` slots behind the current head block. - -This endpoint is not protected against slashing. Signing the returned -attestation may result in a slashable offence. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestation` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `committee_index` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the attestation is to be produced. -- `committee_index` (`CommitteeIndex`): The index of the committee that makes the attestation. - - -### Returns - -Returns a `Attestation` object with a default signature. The `signature` field should be replaced by the valid signature. - -#### Response Body - -```json -{ - "aggregation_bits": "0x01", - "data": { - "slot": 100, - "index": 0, - "beacon_block_root": "0xf22e4ec281136d119eabcd4d9d248aeacd042eb63d8d7642f73ad3e71f1c9283", - "source": { - "epoch": 2, - "root": "0x34c1244535c923f08e7f83170d41a076e4f1ec61013846b3a615a1d109d3c329" - }, - "target": { - "epoch": 3, - "root": "0xaefd23b384994dc0c1a6b77836bdb2f24f209ebfe6c4819324d9685f4a43b4e1" - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" -} -``` - - - -## `/validator/aggregate_attestation` - -Requests an `AggregateAttestation` from the beacon node that has a -specific `attestation.data`. If no aggregate attestation is known this will -return a null object. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/aggregate_attestation` -Method | GET -JSON Encoding | Object -Query Parameters | `attestation_data` -Typical Responses | 200 - -### Returns - -Returns a null object if the attestation data passed is not known to the beacon -node. - -### Example - -### Request Body - -```json -{ - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" -} -``` - - -## `/validator/attestations` - -Accepts a list of `Attestation` for verification. If they are valid, they will be imported -into the local database and published to the network. Invalid attestations will -not be published to the network. - -An attestation may be considered invalid because it is fundamentally incorrect -or because the beacon node has not imported the relevant blocks required to -verify it. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestations` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded list of signed `Attestation` objects in the POST request body. In -accordance with the naive aggregation scheme, the attestation _must_ have -exactly one of the `attestation.aggregation_bits` fields set. - -### Returns - -Returns a null object if the attestation passed all validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" -} -``` - -## `/validator/aggregate_and_proofs` - -Accepts a list of `SignedAggregateAndProof` for publication. If they are valid -(the validator is an aggregator and the signatures can be verified) these -are published to the network on the global aggregate gossip topic. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/aggregate_and_proofs` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - -### Request Body - -Expects a JSON encoded list of `SignedAggregateAndProof` objects in the POST request body. - -### Returns - -Returns a null object if the attestation passed all validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -[ - { - "message": { - "aggregator_index": 12, - "aggregate": { - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - }, - "selection_proof": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - } - signature: "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" - } -] -``` -_Note: The data in this request is for demonstrating types and does not -contain real data_ diff --git a/book/src/installation-binaries.md b/book/src/installation-binaries.md new file mode 100644 index 000000000..2c01f9496 --- /dev/null +++ b/book/src/installation-binaries.md @@ -0,0 +1,38 @@ +# Pre-built Binaries + +Each Lighthouse release contains several downloadable binaries in the "Assets" +section of the release. You can find the [releases +on Github](https://github.com/sigp/lighthouse/releases). + +> Note: binaries are not yet provided for MacOS or Windows native. + +## Platforms + +Binaries are supplied for two platforms: + +- `x86_64-unknown-linux-gnu`: AMD/Intel 64-bit processors (most desktops, laptops, servers) +- `aarch64-unknown-linux-gnu`: 64-bit ARM processors (Raspberry Pi 4) + +Additionally there is also a `-portable` suffix which indicates if the `portable` feature is used: + +- Without `portable`: uses modern CPU instructions to provide the fastest signature verification times (may cause `Illegal instruction` error on older CPUs) +- With `portable`: approx. 20% slower, but should work on all modern 64-bit processors. + +## Usage + +Each binary is contained in a `.tar.gz` archive. For this example, lets use the +`v0.2.13` release and assume the user needs a portable `x86_64` binary. + +> Whilst this example uses `v0.2.13` we recommend always using the latest release. + +### Steps + +1. Go to the [Releases](https://github.com/sigp/lighthouse/releases) page and + select the latest release. +1. Download the `lighthouse-${VERSION}-x86_64-unknown-linux-gnu-portable.tar.gz` binary. +1. Extract the archive: + 1. `cd Downloads` + 1. `tar -xvf lighthouse-${VERSION}-x86_64-unknown-linux-gnu.tar.gz` +1. Test the binary with `./lighthouse --version` (it should print the version). +1. (Optional) Move the `lighthouse` binary to a location in your `PATH`, so the `lighthouse` command can be called from anywhere. + - E.g., `cp lighthouse /usr/bin` diff --git a/book/src/installation-source.md b/book/src/installation-source.md new file mode 100644 index 000000000..1f05c3e8d --- /dev/null +++ b/book/src/installation-source.md @@ -0,0 +1,82 @@ +# Installation: Build from Source + +Lighthouse builds on Linux, macOS, and Windows (via [WSL][] only). + +Compilation should be easy. In fact, if you already have Rust installed all you +need is: + +- `git clone https://github.com/sigp/lighthouse.git` +- `cd lighthouse` +- `make` + +If this doesn't work or is not clear enough, see the [Detailed +Instructions](#detailed-instructions) below. If you have further issues, see +[Troubleshooting](#troubleshooting). If you'd prefer to use Docker, see the +[Docker Guide](./docker.md). + +## Detailed Instructions + +1. Install Rust and Cargo with [rustup](https://rustup.rs/). + - Use the `stable` toolchain (it's the default). + - Check the [Troubleshooting](#troubleshooting) section for additional + dependencies (e.g., `cmake`). +1. Clone the Lighthouse repository. + - Run `$ git clone https://github.com/sigp/lighthouse.git` + - Change into the newly created directory with `$ cd lighthouse` +1. Build Lighthouse with `$ make`. +1. Installation was successful if `$ lighthouse --help` displays the + command-line documentation. + +> First time compilation may take several minutes. If you experience any +> failures, please reach out on [discord](https://discord.gg/cyAszAh) or +> [create an issue](https://github.com/sigp/lighthouse/issues/new). + +## Windows Support + +Compiling or running Lighthouse natively on Windows is not currently supported. However, +Lighthouse can run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using +Ubuntu under WSL, you can should install the Ubuntu dependencies listed in the [Dependencies +(Ubuntu)](#dependencies-ubuntu) section. + +[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about + +## Troubleshooting + +### Dependencies + +#### Ubuntu + +Several dependencies may be required to compile Lighthouse. The following +packages may be required in addition a base Ubuntu Server installation: + +```bash +sudo apt install -y git gcc g++ make cmake pkg-config libssl-dev +``` + +#### macOS + +You will need `cmake`. You can install via homebrew: + + brew install openssl cmake + +### Command is not found + +Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory +needs to be on your `PATH` before you can run `$ lighthouse`. + +See ["Configuring the `PATH` environment variable" +(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information. + +### Compilation error + +Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `$ rustup update`. + +### OpenSSL + +If you get a build failure relating to OpenSSL, try installing `openssl-dev` or +`libssl-dev` using your OS package manager. + +- Ubuntu: `$ apt-get install libssl-dev`. +- Amazon Linux: `$ yum install openssl-devel`. + +[WSL]: https://docs.microsoft.com/en-us/windows/wsl/about diff --git a/book/src/installation.md b/book/src/installation.md index 94c6bcdd3..25aa8040c 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -1,73 +1,16 @@ # 📦 Installation -Lighthouse runs on Linux, macOS, and Windows via [WSL][]. -Installation should be easy. In fact, if you already have Rust installed all you need is: +Lighthouse runs on Linux, macOS, and Windows (via [WSL][] only). -- `git clone https://github.com/sigp/lighthouse.git` -- `cd lighthouse` -- `make` +There are three core methods to obtain the Lighthouse application: -If this doesn't work or is not clear enough, see the [Detailed Instructions](#detailed-instructions). If you have further issues, see [Troubleshooting](#troubleshooting). If you'd prefer to use Docker, see the [Docker Guide](./docker.md). +- [Pre-built binaries](./installation-binaries.md). +- [Docker images](./docker.md). +- [Building from source](./installation-source.md). -## Detailed Instructions +Additionally, there are two extra guides for specific uses: -1. Install Rust and Cargo with [rustup](https://rustup.rs/). - - Use the `stable` toolchain (it's the default). -1. Clone the Lighthouse repository. - - Run `$ git clone https://github.com/sigp/lighthouse.git` - - Change into the newly created directory with `$ cd lighthouse` -1. Build Lighthouse with `$ make`. -1. Installation was successful if `$ lighthouse --help` displays the - command-line documentation. - -> First time compilation may take several minutes. If you experience any -> failures, please reach out on [discord](https://discord.gg/cyAszAh) or -> [create an issue](https://github.com/sigp/lighthouse/issues/new). - -## Windows Support - -Compiling or running Lighthouse natively on Windows is not currently supported. However, -Lighthouse can run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using -Ubuntu under WSL, you can should install the Ubuntu dependencies listed in the [Dependencies -(Ubuntu)](#dependencies-ubuntu) section. - -## Troubleshooting - -### Dependencies - -#### Ubuntu - -Several dependencies may be required to compile Lighthouse. The following -packages may be required in addition a base Ubuntu Server installation: - -```bash -sudo apt install -y git gcc g++ make cmake pkg-config libssl-dev -``` - -#### macOS - -You will need `cmake`. You can install via homebrew: - - brew install cmake - -### Command is not found - -Lighthouse will be installed to `CARGO_HOME` or `$HOME/.cargo`. This directory -needs to be on your `PATH` before you can run `$ lighthouse`. - -See ["Configuring the `PATH` environment variable" -(rust-lang.org)](https://www.rust-lang.org/tools/install) for more information. - -### Compilation error - -Make sure you are running the latest version of Rust. If you have installed Rust using rustup, simply type `$ rustup update`. - -### OpenSSL - -If you get a build failure relating to OpenSSL, try installing `openssl-dev` or -`libssl-dev` using your OS package manager. - -- Ubuntu: `$ apt-get install libssl-dev`. -- Amazon Linux: `$ yum install openssl-devel`. +- [Rapsberry Pi 4 guide](./pi.md). +- [Cross-compiling guide for developers](./cross-compiling.md). [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about diff --git a/book/src/key-management.md b/book/src/key-management.md index 412bcf990..1275057d8 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -42,12 +42,12 @@ keypairs. Creating a single validator looks like this: - `lighthouse account validator create --wallet-name wally --wallet-password wally.pass --count 1` -In step (1), we created a wallet in `~/.lighthouse/wallets` with the name +In step (1), we created a wallet in `~/.lighthouse/{testnet}/wallets` with the name `wally`. We encrypted this using a pre-defined password in the `wally.pass` file. Then, in step (2), we created one new validator in the -`~/.lighthouse/validators` directory using `wally` (unlocking it with +`~/.lighthouse/{testnet}/validators` directory using `wally` (unlocking it with `wally.pass`) and storing the passwords to the validators voting key in -`~/.lighthouse/secrets`. +`~/.lighthouse/{testnet}/secrets`. Thanks to the hierarchical key derivation scheme, we can delete all of the aforementioned directories and then regenerate them as long as we remembered @@ -65,14 +65,16 @@ There are three important directories in Lighthouse validator key management: - `wallets/`: contains encrypted wallets which are used for hierarchical key derivation. - - Defaults to `~/.lighthouse/wallets` + - Defaults to `~/.lighthouse/{testnet}/wallets` - `validators/`: contains a directory for each validator containing encrypted keystores and other validator-specific data. - - Defaults to `~/.lighthouse/validators` + - Defaults to `~/.lighthouse/{testnet}/validators` - `secrets/`: since the validator signing keys are "hot", the validator process needs access to the passwords to decrypt the keystores in the validators dir. These passwords are stored here. - - Defaults to `~/.lighthouse/secrets` + - Defaults to `~/.lighthouse/{testnet}/secrets` + +where `testnet` is the name of the testnet passed in the `--testnet` parameter (default is `medalla`). When the validator client boots, it searches the `validators/` for directories containing voting keystores. When it discovers a keystore, it searches the diff --git a/book/src/testnet-validator.md b/book/src/testnet-validator.md new file mode 100644 index 000000000..59b05f881 --- /dev/null +++ b/book/src/testnet-validator.md @@ -0,0 +1,162 @@ +# Become a Testnet Validator + +Joining an Eth2 testnet is a great way to get familiar with staking in Phase 0. +All users should experiment with a testnet prior to staking mainnet ETH. + +## Supported Testnets + +Lighthouse supports four testnets: + +- [Medalla](https://github.com/goerli/medalla/tree/master/medalla) (default) +- [Zinken](https://github.com/goerli/medalla/tree/master/zinken) +- [Spadina](https://github.com/goerli/medalla/tree/master/spadina) (deprecated) +- [Altona](https://github.com/goerli/medalla/tree/master/altona) (deprecated) + +When using Lighthouse, the `--testnet` flag selects a testnet. E.g., + +- `lighthouse` (no flag): Medalla. +- `lighthouse --testnet medalla`: Medalla. +- `lighthouse --testnet zinken`: Zinken. + +Using the correct `--testnet` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, always +provide a `--testnet` flag instead of relying on the default. + +> Note: In these documents we use `--testnet MY_TESTNET` for demonstration. You +> must replace `MY_TESTNET` with a valid testnet name. + +## Joining a Testnet + +There are five primary steps to become a testnet validator: + +1. Create validator keys and submit deposits. +1. Start an Eth1 client. +1. Install Lighthouse. +1. Import the validator keys into Lighthouse. +1. Start Lighthouse. +1. Leave Lighthouse running. + +Each of these primary steps has several intermediate steps, so we recommend +setting aside one or two hours for this process. + +### Step 1. Create validator keys + +The Ethereum Foundation provides an "Eth2 launch pad" for each active testnet: + +- [Medalla launchpad](https://medalla.launchpad.ethereum.org/) +- [Zinken launchpad](https://zinken.launchpad.ethereum.org/) + +Please follow the steps on the appropriate launch pad site to generate +validator keys and submit deposits. Make sure you select "Lighthouse" as your +client. + +Move to the next step once you have completed the steps on the launch pad, +including generating keys via the Python CLI and submitting gETH/ETH deposits. + +### Step 2. Start an Eth1 client + +Since Eth2 relies upon the Eth1 chain for validator on-boarding, all Eth2 validators must have a connection to an Eth1 node. + +We provide instructions for using Geth (the Eth1 client that, by chance, we ended up testing with), but you could use any client that implements the JSON RPC via HTTP. A fast-synced node should be sufficient. + +#### Installing Geth + +If you're using a Mac, follow the instructions [listed here](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Mac) to install geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth). + +#### Starting Geth + +Once you have geth installed, use this command to start your Eth1 node: + +```bash + geth --goerli --http +``` + +### Step 3. Install Lighthouse + +*Note: Lighthouse only supports Windows via WSL.* + +Follow the [Lighthouse Installation Instructions](./installation.md) to install +Lighthouse from one of the available options. + +Proceed to the next step once you've successfully installed Lighthouse and view +its `--version` info. + +> Note: Some of the instructions vary when using Docker, ensure you follow the +> appropriate sections later in this guide. + +### Step 4. Import validator keys to Lighthouse + +When Lighthouse is installed, follow the [Importing from the Ethereum 2.0 Launch +pad](./validator-import-launchpad.md) instructions so the validator client can +perform your validator duties. + +Proceed to the next step once you've successfully imported all validators. + +### Step 5. Start Lighthouse + +For staking, one needs to run two Lighthouse processes: + +- `lighthouse bn`: the "beacon node" which connects to the P2P network and + verifies blocks. +- `lighthouse vc`: the "validator client" which manages validators, using data + obtained from the beacon node via a HTTP API. + +Starting these processes is different for binary and docker users: + +#### Binary users + +Those using the pre- or custom-built binaries can start the two processes with: + +```bash +lighthouse --testnet MY_TESTNET bn --staking +``` + +```bash +lighthouse --testnet MY_TESTNET vc +``` + +#### Docker users + +Those using Docker images can start the processes with: + +```bash +$ docker run \ + --network host \ + -v $HOME/.lighthouse:/root/.lighthouse sigp/lighthouse \ + lighthouse --testnet MY_TESTNET bn --staking --http-address 0.0.0.0 +``` + +```bash +$ docker run \ + --network host \ + -v $HOME/.lighthouse:/root/.lighthouse \ + sigp/lighthouse \ + lighthouse --testnet MY_TESTNET vc +``` + +### Step 6. Leave Lighthouse running + +Leave your beacon node and validator client running and you'll see logs as the +beacon node stays synced with the network while the validator client produces +blocks and attestations. + +It will take 4-8+ hours for the beacon chain to process and activate your +validator, however you'll know you're active when the validator client starts +successfully publishing attestations each epoch: + +``` +Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, +``` + +Although you'll produce an attestation each epoch, it's less common to produce a +block. Watch for the block production logs too: + +``` +Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block +``` + +If you see any `ERRO` (error) logs, please reach out on +[Discord](https://discord.gg/cyAszAh) or [create an +issue](https://github.com/sigp/lighthouse/issues/new). + +Happy staking! diff --git a/book/src/validator-create.md b/book/src/validator-create.md index 25112e748..9d73cdf80 100644 --- a/book/src/validator-create.md +++ b/book/src/validator-create.md @@ -41,7 +41,7 @@ OPTIONS: The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator (MAX_EFFECTIVE_BALANCE) --secrets-dir - The path where the validator keystore passwords will be stored. Defaults to ~/.lighthouse/secrets + The path where the validator keystore passwords will be stored. Defaults to ~/.lighthouse/{testnet}/secrets -s, --spec Specifies the default eth2 spec type. [default: mainnet] [possible values: mainnet, minimal, interop] @@ -53,7 +53,7 @@ OPTIONS: Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective if there is no existing database. --validator-dir <VALIDATOR_DIRECTORY> - The path where the validator directories will be created. Defaults to ~/.lighthouse/validators + The path where the validator directories will be created. Defaults to ~/.lighthouse/{testnet}/validators --wallet-name <WALLET_NAME> Use the wallet identified by this name --wallet-password <WALLET_PASSWORD_PATH> @@ -73,10 +73,12 @@ This command will: - Derive a single new BLS keypair from `wally`, updating it so that it generates a new key next time. -- Create a new directory in `~/.lighthouse/validators` containing: +- Create a new directory in `~/.lighthouse/{testnet}/validators` containing: - An encrypted keystore containing the validators voting keypair. - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH` for most testnets and mainnet) which can be submitted to the deposit contract for the medalla testnet. Other testnets can be set via the `--testnet` CLI param. -- Store a password to the validators voting keypair in `~/.lighthouse/secrets`. +- Store a password to the validators voting keypair in `~/.lighthouse/{testnet}/secrets`. + +where `testnet` is the name of the testnet passed in the `--testnet` parameter (default is `medalla`). \ No newline at end of file diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md index 3688b139c..b9d444b86 100644 --- a/book/src/validator-import-launchpad.md +++ b/book/src/validator-import-launchpad.md @@ -1,4 +1,4 @@ -# Importing from the Ethereum 2.0 Launchpad +# Importing from the Ethereum 2.0 Launch pad The [Eth2 Lauchpad](https://github.com/ethereum/eth2.0-deposit) is a website from the Ethereum Foundation which guides users how to use the @@ -20,7 +20,7 @@ Whilst following the steps on the website, users are instructed to download the repository. This `eth2-deposit-cli` script will generate the validator BLS keys into a `validator_keys` directory. We assume that the user's present-working-directory is the `eth2-deposit-cli` repository (this is where -you will be if you just ran the `./deposit.sh` script from the Eth2 Launchpad +you will be if you just ran the `./deposit.sh` script from the Eth2 Launch pad website). If this is not the case, simply change the `--directory` to point to the `validator_keys` directory. @@ -30,6 +30,9 @@ using the standard `validators` directory (specify a different one using ### 1. Run the `lighthouse account validator import` command. +Docker users should use the command from the [Docker](#docker) +section, all other users can use: + ```bash lighthouse account validator import --directory validator_keys @@ -85,3 +88,22 @@ INFO Enabled validator voting_pubkey: 0xa5e8702533f6d66422e042a0bf3471ab9b Once this log appears (and there are no errors) the `lighthouse vc` application will ensure that the validator starts performing its duties and being rewarded by the protocol. There is no more input required from the user. + +## Docker + +The `import` command is a little more complex for Docker users, but the example +in this document can be substituted with: + +```bash +docker run -it \ + -v $HOME/.lighthouse:/root/.lighthouse \ + -v $(pwd)/validator_keys:/root/validator_keys \ + sigp/lighthouse \ + lighthouse --testnet medalla account validator import --directory /root/validator_keys +``` + +Here we use two `-v` volumes to attach: + +- `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. +- The `validator_keys` directory in the present working directory of the host + to the `/root/validator_keys` directory of the Docker container. diff --git a/book/src/http/consensus.md b/book/src/validator-inclusion.md similarity index 52% rename from book/src/http/consensus.md rename to book/src/validator-inclusion.md index c71b78ce3..ce8e61caf 100644 --- a/book/src/http/consensus.md +++ b/book/src/validator-inclusion.md @@ -1,16 +1,21 @@ -# Lighthouse REST API: `/consensus` +# Validator Inclusion APIs -The `/consensus` endpoints provide information on results of the proof-of-stake -voting process used for finality/justification under Casper FFG. +The `/lighthouse/validator_inclusion` API endpoints provide information on +results of the proof-of-stake voting process used for finality/justification +under Casper FFG. + +These endpoints are not stable or included in the Eth2 standard API. As such, +they are subject to change or removal without a change in major release +version. ## Endpoints HTTP Path | Description | | --- | -- | -[`/consensus/global_votes`](#consensusglobal_votes) | A global vote count for a given epoch. -[`/consensus/individual_votes`](#consensusindividual_votes) | A per-validator breakdown of votes in a given epoch. +[`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch. +[`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch. -## `/consensus/global_votes` +## Global Returns a global count of votes for some given `epoch`. The results are included both for the current and previous (`epoch - 1`) epochs since both are required @@ -75,40 +80,27 @@ voting upon the previous epoch included in a block. When this value is greater than or equal to `2/3` it is possible that the beacon chain may justify and/or finalize the epoch. -### HTTP Specification +### HTTP Example -| Property | Specification | -| --- |--- | -Path | `/consensus/global_votes` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -Requires the `epoch` (`Epoch`) query parameter to determine which epoch will be -considered the current epoch. - -### Returns - -A report on global validator voting participation. - -### Example +```bash +curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H "accept: application/json" | jq +``` ```json { - "current_epoch_active_gwei": 52377600000000, - "previous_epoch_active_gwei": 52377600000000, - "current_epoch_attesting_gwei": 50740900000000, - "current_epoch_target_attesting_gwei": 49526000000000, - "previous_epoch_attesting_gwei": 52377600000000, - "previous_epoch_target_attesting_gwei": 51063400000000, - "previous_epoch_head_attesting_gwei": 9248600000000 + "data": { + "current_epoch_active_gwei": 642688000000000, + "previous_epoch_active_gwei": 642688000000000, + "current_epoch_attesting_gwei": 366208000000000, + "current_epoch_target_attesting_gwei": 366208000000000, + "previous_epoch_attesting_gwei": 1000000000, + "previous_epoch_target_attesting_gwei": 1000000000, + "previous_epoch_head_attesting_gwei": 1000000000 + } } ``` -## `/consensus/individual_votes` +## Individual Returns a per-validator summary of how that validator performed during the current epoch. @@ -117,73 +109,26 @@ The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of t individual values, please see it for definitions of terms like "current_epoch", "previous_epoch" and "target_attester". -### HTTP Specification -| Property | Specification | -| --- |--- | -Path | `/consensus/individual_votes` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: +### HTTP Example +```bash +curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/42" -H "accept: application/json" | jq ``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -### Returns - -A report on the validators voting participation. - -### Example - -#### Request Body ```json { - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] + "data": { + "is_slashed": false, + "is_withdrawable_in_current_epoch": false, + "is_active_in_current_epoch": true, + "is_active_in_previous_epoch": true, + "current_epoch_effective_balance_gwei": 32000000000, + "is_current_epoch_attester": false, + "is_current_epoch_target_attester": false, + "is_previous_epoch_attester": false, + "is_previous_epoch_target_attester": false, + "is_previous_epoch_head_attester": false + } } ``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "epoch": 1203, - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "vote": { - "is_slashed": false, - "is_withdrawable_in_current_epoch": false, - "is_active_in_current_epoch": true, - "is_active_in_previous_epoch": true, - "current_epoch_effective_balance_gwei": 3200000000, - "is_current_epoch_attester": true, - "is_current_epoch_target_attester": true, - "is_previous_epoch_attester": true, - "is_previous_epoch_target_attester": true, - "is_previous_epoch_head_attester": false - } - }, - { - "epoch": 1203, - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "vote": null - } -] -``` diff --git a/book/src/validator-management.md b/book/src/validator-management.md index fbb76c9b4..df0e7243d 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -16,7 +16,7 @@ useful. ## Introducing the `validator_definitions.yml` file The `validator_definitions.yml` file is located in the `validator-dir`, which -defaults to `~/.lighthouse/validators`. It is a +defaults to `~/.lighthouse/{testnet}/validators`. It is a [YAML](https://en.wikipedia.org/wiki/YAML) encoded file defining exactly which validators the validator client will (and won't) act for. @@ -92,7 +92,7 @@ name identical to the `voting_public_key` value. Lets assume the following directory structure: ``` -~/.lighthouse/validators +~/.lighthouse/{testnet}/validators ├── john │   └── voting-keystore.json ├── sally @@ -135,7 +135,7 @@ In order for the validator client to decrypt the validators, they will need to ensure their `secrets-dir` is organised as below: ``` -~/.lighthouse/secrets +~/.lighthouse/{testnet}/secrets ├── 0xa5566f9ec3c6e1fdf362634ebec9ef7aceb0e460e5079714808388e5d48f4ae1e12897fed1bea951c17fa389d511e477 ├── 0xaa440c566fcf34dedf233baf56cf5fb05bb420d9663b4208272545608c27c13d5b08174518c758ecd814f158f2b4a337 └── 0x87a580d31d7bc69069b55f5a01995a610dd391a26dc9e36e81057a17211983a79266800ab8531f21f1083d7d84085007 diff --git a/book/src/websockets.md b/book/src/websockets.md deleted file mode 100644 index 69cf0e18d..000000000 --- a/book/src/websockets.md +++ /dev/null @@ -1,111 +0,0 @@ -# Websocket API - -**Note: the WebSocket server _only_ emits events. It does not accept any -requests. Use the [HTTP API](./http.md) for requests.** - -By default, a Lighthouse `beacon_node` exposes a websocket server on `localhost:5053`. - -The following CLI flags control the websocket server: - -- `--no-ws`: disable the websocket server. -- `--ws-port`: specify the listen port of the server. -- `--ws-address`: specify the listen address of the server. - -All clients connected to the websocket server will receive the same stream of events, all triggered -by the `BeaconChain`. Each event is a JSON object with the following schema: - -```json -{ - "event": "string", - "data": "object" -} -``` - -## Events - -The following events may be emitted: - -### Beacon Head Changed - -Occurs whenever the canonical head of the beacon chain changes. - -```json -{ - "event": "beacon_head_changed", - "data": { - "reorg": "boolean", - "current_head_beacon_block_root": "string", - "previous_head_beacon_block_root": "string" - } -} -``` - -### Beacon Finalization - -Occurs whenever the finalized checkpoint of the canonical head changes. - -```json -{ - "event": "beacon_finalization", - "data": { - "epoch": "number", - "root": "string" - } -} -``` - -### Beacon Block Imported - -Occurs whenever the beacon node imports a valid block. - -```json -{ - "event": "beacon_block_imported", - "data": { - "block": "object" - } -} -``` - -### Beacon Block Rejected - -Occurs whenever the beacon node rejects a block because it is invalid or an -error occurred during validation. - -```json -{ - "event": "beacon_block_rejected", - "data": { - "reason": "string", - "block": "object" - } -} -``` - -### Beacon Attestation Imported - -Occurs whenever the beacon node imports a valid attestation. - -```json -{ - "event": "beacon_attestation_imported", - "data": { - "attestation": "object" - } -} -``` - -### Beacon Attestation Rejected - -Occurs whenever the beacon node rejects an attestation because it is invalid or -an error occurred during validation. - -```json -{ - "event": "beacon_attestation_rejected", - "data": { - "reason": "string", - "attestation": "object" - } -} -``` diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index a5895f4c3..d73f9cc13 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,20 +1,20 @@ [package] name = "boot_node" -version = "0.2.13" +version = "0.3.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2018" [dependencies] beacon_node = { path = "../beacon_node" } -clap = "2.33.0" +clap = "2.33.3" eth2_libp2p = { path = "../beacon_node/eth2_libp2p" } types = { path = "../consensus/types" } eth2_testnet_config = { path = "../common/eth2_testnet_config" } -eth2_ssz = { path = "../consensus/ssz" } +eth2_ssz = "0.1.2" slog = "2.5.2" sloggers = "1.0.1" tokio = "0.2.22" -log = "0.4.8" +log = "0.4.11" slog-term = "2.6.0" logging = { path = "../common/logging" } slog-async = "2.5.0" diff --git a/boot_node/src/config.rs b/boot_node/src/config.rs index 2a4bba51d..edc3e3b21 100644 --- a/boot_node/src/config.rs +++ b/boot_node/src/config.rs @@ -103,7 +103,7 @@ impl<T: EthSpec> TryFrom<&ArgMatches<'_>> for BootNodeConfig<T> { ); // add to the local_enr - if let Err(e) = local_enr.insert("eth2", enr_fork.as_ssz_bytes(), &local_key) { + if let Err(e) = local_enr.insert("eth2", &enr_fork.as_ssz_bytes(), &local_key) { slog::warn!(logger, "Could not update eth2 field"; "error" => ?e); } } else { diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index feb8f6467..1b7580ded 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -7,15 +7,16 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = "0.7.2" +rand = "0.7.3" eth2_wallet = { path = "../../crypto/eth2_wallet" } eth2_keystore = { path = "../../crypto/eth2_keystore" } -zeroize = { version = "1.0.0", features = ["zeroize_derive"] } -serde = "1.0.110" -serde_derive = "1.0.110" +zeroize = { version = "1.1.1", features = ["zeroize_derive"] } +serde = "1.0.116" +serde_derive = "1.0.116" serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } types = { path = "../../consensus/types" } validator_dir = { path = "../validator_dir" } regex = "1.3.9" -rpassword = "4.0.5" +rpassword = "5.0.0" +directory = { path = "../directory" } diff --git a/common/account_utils/src/lib.rs b/common/account_utils/src/lib.rs index 77351a7b9..d74ed71ed 100644 --- a/common/account_utils/src/lib.rs +++ b/common/account_utils/src/lib.rs @@ -2,7 +2,10 @@ //! Lighthouse project. use eth2_keystore::Keystore; -use eth2_wallet::Wallet; +use eth2_wallet::{ + bip39::{Language, Mnemonic, MnemonicType}, + Wallet, +}; use rand::{distributions::Alphanumeric, Rng}; use serde_derive::{Deserialize, Serialize}; use std::fs::{self, File}; @@ -15,6 +18,7 @@ use zeroize::Zeroize; pub mod validator_definitions; pub use eth2_keystore; +pub use eth2_wallet; pub use eth2_wallet::PlainText; /// The minimum number of characters required for a wallet password. @@ -150,6 +154,16 @@ pub fn is_password_sufficiently_complex(password: &[u8]) -> Result<(), String> { } } +/// Returns a random 24-word english mnemonic. +pub fn random_mnemonic() -> Mnemonic { + Mnemonic::new(MnemonicType::Words24, Language::English) +} + +/// Attempts to parse a mnemonic phrase. +pub fn mnemonic_from_phrase(phrase: &str) -> Result<Mnemonic, String> { + Mnemonic::from_phrase(phrase, Language::English).map_err(|e| e.to_string()) +} + /// Provides a new-type wrapper around `String` that is zeroized on `Drop`. /// /// Useful for ensuring that password memory is zeroed-out on drop. @@ -164,6 +178,12 @@ impl From<String> for ZeroizeString { } } +impl ZeroizeString { + pub fn as_str(&self) -> &str { + &self.0 + } +} + impl AsRef<[u8]> for ZeroizeString { fn as_ref(&self) -> &[u8] { self.0.as_bytes() diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 733c771be..11cfa1c14 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -4,6 +4,7 @@ //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. use crate::{create_with_600_perms, default_keystore_password_path, ZeroizeString}; +use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; use serde_derive::{Deserialize, Serialize}; @@ -35,6 +36,8 @@ pub enum Error { InvalidKeystorePubkey, /// The keystore was unable to be opened. UnableToOpenKeystore(eth2_keystore::Error), + /// The validator directory could not be created. + UnableToCreateValidatorDir(PathBuf), } /// Defines how the validator client should attempt to sign messages for this validator. @@ -63,6 +66,8 @@ pub enum SigningDefinition { pub struct ValidatorDefinition { pub enabled: bool, pub voting_public_key: PublicKey, + #[serde(default)] + pub description: String, #[serde(flatten)] pub signing_definition: SigningDefinition, } @@ -88,6 +93,7 @@ impl ValidatorDefinition { Ok(ValidatorDefinition { enabled: true, voting_public_key, + description: keystore.description().unwrap_or_else(|| "").to_string(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path: None, @@ -105,6 +111,9 @@ pub struct ValidatorDefinitions(Vec<ValidatorDefinition>); impl ValidatorDefinitions { /// Open an existing file or create a new, empty one if it does not exist. pub fn open_or_create<P: AsRef<Path>>(validators_dir: P) -> Result<Self, Error> { + ensure_dir_exists(validators_dir.as_ref()).map_err(|_| { + Error::UnableToCreateValidatorDir(PathBuf::from(validators_dir.as_ref())) + })?; let config_path = validators_dir.as_ref().join(CONFIG_FILENAME); if !config_path.exists() { let this = Self::default(); @@ -205,6 +214,7 @@ impl ValidatorDefinitions { Some(ValidatorDefinition { enabled: true, voting_public_key, + description: keystore.description().unwrap_or_else(|| "").to_string(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, voting_keystore_password_path, diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 0f84869ad..85c562a50 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -7,9 +7,9 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -clap = "2.33.0" +clap = "2.33.3" hex = "0.4.2" -dirs = "2.0.2" +dirs = "3.0.1" types = { path = "../../consensus/types" } eth2_testnet_config = { path = "../eth2_testnet_config" } eth2_ssz = "0.1.2" diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml index 550615b14..256af2767 100644 --- a/common/compare_fields_derive/Cargo.toml +++ b/common/compare_fields_derive/Cargo.toml @@ -8,5 +8,5 @@ edition = "2018" proc-macro = true [dependencies] -syn = "1.0.18" -quote = "1.0.4" +syn = "1.0.42" +quote = "1.0.7" diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 02014305d..1a6f84395 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,13 +7,13 @@ edition = "2018" build = "build.rs" [build-dependencies] -reqwest = { version = "0.10.4", features = ["blocking", "json", "native-tls-vendored"] } -serde_json = "1.0.52" +reqwest = { version = "0.10.8", features = ["blocking", "json", "native-tls-vendored"] } +serde_json = "1.0.58" sha2 = "0.9.1" hex = "0.4.2" [dependencies] types = { path = "../../consensus/types"} eth2_ssz = "0.1.2" -tree_hash = "0.1.0" +tree_hash = "0.1.1" ethabi = "12.0.0" diff --git a/common/directory/Cargo.toml b/common/directory/Cargo.toml new file mode 100644 index 000000000..1687bb48b --- /dev/null +++ b/common/directory/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "directory" +version = "0.1.0" +authors = ["pawan <pawandhananjay@gmail.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.3" +clap_utils = {path = "../clap_utils"} +dirs = "3.0.1" +eth2_testnet_config = { path = "../eth2_testnet_config" } diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs new file mode 100644 index 000000000..765fdabd6 --- /dev/null +++ b/common/directory/src/lib.rs @@ -0,0 +1,60 @@ +use clap::ArgMatches; +pub use eth2_testnet_config::DEFAULT_HARDCODED_TESTNET; +use std::fs::create_dir_all; +use std::path::{Path, PathBuf}; + +/// Names for the default directories. +pub const DEFAULT_ROOT_DIR: &str = ".lighthouse"; +pub const DEFAULT_BEACON_NODE_DIR: &str = "beacon"; +pub const DEFAULT_NETWORK_DIR: &str = "network"; +pub const DEFAULT_VALIDATOR_DIR: &str = "validators"; +pub const DEFAULT_SECRET_DIR: &str = "secrets"; +pub const DEFAULT_WALLET_DIR: &str = "wallets"; + +/// Base directory name for unnamed testnets passed through the --testnet-dir flag +pub const CUSTOM_TESTNET_DIR: &str = "custom"; + +/// Gets the testnet directory name +/// +/// Tries to get the name first from the "testnet" flag, +/// if not present, then checks the "testnet-dir" flag and returns a custom name +/// If neither flags are present, returns the default hardcoded network name. +pub fn get_testnet_name(matches: &ArgMatches) -> String { + if let Some(testnet_name) = matches.value_of("testnet") { + testnet_name.to_string() + } else if matches.value_of("testnet-dir").is_some() { + CUSTOM_TESTNET_DIR.to_string() + } else { + eth2_testnet_config::DEFAULT_HARDCODED_TESTNET.to_string() + } +} + +/// Checks if a directory exists in the given path and creates a directory if it does not exist. +pub fn ensure_dir_exists<P: AsRef<Path>>(path: P) -> Result<(), String> { + let path = path.as_ref(); + + if !path.exists() { + create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?; + } + + Ok(()) +} + +/// If `arg` is in `matches`, parses the value as a path. +/// +/// Otherwise, attempts to find the default directory for the `testnet` from the `matches` +/// and appends `flag` to it. +pub fn parse_path_or_default_with_flag( + matches: &ArgMatches, + arg: &'static str, + flag: &str, +) -> Result<PathBuf, String> { + clap_utils::parse_path_with_default_in_home_dir( + matches, + arg, + PathBuf::new() + .join(DEFAULT_ROOT_DIR) + .join(get_testnet_name(matches)) + .join(flag), + ) +} diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml new file mode 100644 index 000000000..0c1528e9b --- /dev/null +++ b/common/eth2/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "eth2" +version = "0.1.0" +authors = ["Paul Hauner <paul@paulhauner.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +serde = { version = "1.0.116", features = ["derive"] } +serde_json = "1.0.58" +types = { path = "../../consensus/types" } +hex = "0.4.2" +reqwest = { version = "0.10.8", features = ["json"] } +eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" } +proto_array = { path = "../../consensus/proto_array", optional = true } +serde_utils = { path = "../../consensus/serde_utils" } +zeroize = { version = "1.1.1", features = ["zeroize_derive"] } +eth2_keystore = { path = "../../crypto/eth2_keystore" } +libsecp256k1 = "0.3.5" +ring = "0.16.12" +bytes = "0.5.6" +account_utils = { path = "../../common/account_utils" } + +[target.'cfg(target_os = "linux")'.dependencies] +psutil = { version = "3.2.0", optional = true } +procinfo = { version = "0.4.2", optional = true } + +[features] +default = ["lighthouse"] +lighthouse = ["proto_array", "psutil", "procinfo"] diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs new file mode 100644 index 000000000..8e9a18b47 --- /dev/null +++ b/common/eth2/src/lib.rs @@ -0,0 +1,797 @@ +//! This crate provides two major things: +//! +//! 1. The types served by the `http_api` crate. +//! 2. A wrapper around `reqwest` that forms a HTTP client, able of consuming the endpoints served +//! by the `http_api` crate. +//! +//! Eventually it would be ideal to publish this crate on crates.io, however we have some local +//! dependencies preventing this presently. + +#[cfg(feature = "lighthouse")] +pub mod lighthouse; +pub mod lighthouse_vc; +pub mod types; + +use self::types::*; +use reqwest::{IntoUrl, Response}; +use serde::{de::DeserializeOwned, Serialize}; +use std::convert::TryFrom; +use std::fmt; + +pub use reqwest; +pub use reqwest::{StatusCode, Url}; + +#[derive(Debug)] +pub enum Error { + /// The `reqwest` client raised an error. + Reqwest(reqwest::Error), + /// The server returned an error message where the body was able to be parsed. + ServerMessage(ErrorMessage), + /// The server returned an error message where the body was unable to be parsed. + StatusCode(StatusCode), + /// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`. + InvalidUrl(Url), + /// The supplied validator client secret is invalid. + InvalidSecret(String), + /// The server returned a response with an invalid signature. It may be an impostor. + InvalidSignatureHeader, + /// The server returned a response without a signature header. It may be an impostor. + MissingSignatureHeader, + /// The server returned an invalid JSON response. + InvalidJson(serde_json::Error), +} + +impl Error { + /// If the error has a HTTP status code, return it. + pub fn status(&self) -> Option<StatusCode> { + match self { + Error::Reqwest(error) => error.status(), + Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), + Error::StatusCode(status) => Some(*status), + Error::InvalidUrl(_) => None, + Error::InvalidSecret(_) => None, + Error::InvalidSignatureHeader => None, + Error::MissingSignatureHeader => None, + Error::InvalidJson(_) => None, + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a +/// Lighthouse Beacon Node HTTP server (`http_api`). +#[derive(Clone)] +pub struct BeaconNodeHttpClient { + client: reqwest::Client, + server: Url, +} + +impl BeaconNodeHttpClient { + pub fn new(server: Url) -> Self { + Self { + client: reqwest::Client::new(), + server, + } + } + + pub fn from_components(server: Url, client: reqwest::Client) -> Self { + Self { client, server } + } + + /// Return the path with the standard `/eth1/v1` prefix applied. + fn eth_path(&self) -> Result<Url, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("eth") + .push("v1"); + + Ok(path) + } + + /// Perform a HTTP GET request. + async fn get<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<T, Error> { + let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + ok_or_error(response) + .await? + .json() + .await + .map_err(Error::Reqwest) + } + + /// Perform a HTTP GET request, returning `None` on a 404 error. + async fn get_opt<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<Option<T>, Error> { + let response = self.client.get(url).send().await.map_err(Error::Reqwest)?; + match ok_or_error(response).await { + Ok(resp) => resp.json().await.map(Option::Some).map_err(Error::Reqwest), + Err(err) => { + if err.status() == Some(StatusCode::NOT_FOUND) { + Ok(None) + } else { + Err(err) + } + } + } + } + + /// Perform a HTTP POST request. + async fn post<T: Serialize, U: IntoUrl>(&self, url: U, body: &T) -> Result<(), Error> { + let response = self + .client + .post(url) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + ok_or_error(response).await?; + Ok(()) + } + + /// `GET beacon/genesis` + /// + /// ## Errors + /// + /// May return a `404` if beacon chain genesis has not yet occurred. + pub async fn get_beacon_genesis(&self) -> Result<GenericResponse<GenesisData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("genesis"); + + self.get(path).await + } + + /// `GET beacon/states/{state_id}/root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_root( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<RootData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("root"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/fork` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_fork( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<Fork>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("fork"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/finality_checkpoints` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_finality_checkpoints( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<FinalityCheckpointsData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("finality_checkpoints"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/validators` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_validators( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<Vec<ValidatorData>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators"); + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/committees?slot,index` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_committees( + &self, + state_id: StateId, + epoch: Epoch, + slot: Option<Slot>, + index: Option<u64>, + ) -> Result<Option<GenericResponse<Vec<CommitteeData>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("committees") + .push(&epoch.to_string()); + + if let Some(slot) = slot { + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()); + } + + if let Some(index) = index { + path.query_pairs_mut() + .append_pair("index", &index.to_string()); + } + + self.get_opt(path).await + } + + /// `GET beacon/states/{state_id}/validators/{validator_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_states_validator_id( + &self, + state_id: StateId, + validator_id: &ValidatorId, + ) -> Result<Option<GenericResponse<ValidatorData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("states") + .push(&state_id.to_string()) + .push("validators") + .push(&validator_id.to_string()); + + self.get_opt(path).await + } + + /// `GET beacon/headers?slot,parent_root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_headers( + &self, + slot: Option<Slot>, + parent_root: Option<Hash256>, + ) -> Result<Option<GenericResponse<Vec<BlockHeaderData>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("headers"); + + if let Some(slot) = slot { + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()); + } + + if let Some(root) = parent_root { + path.query_pairs_mut() + .append_pair("parent_root", &format!("{:?}", root)); + } + + self.get_opt(path).await + } + + /// `GET beacon/headers/{block_id}` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_headers_block_id( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<BlockHeaderData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("headers") + .push(&block_id.to_string()); + + self.get_opt(path).await + } + + /// `POST beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn post_beacon_blocks<T: EthSpec>( + &self, + block: &SignedBeaconBlock<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks"); + + self.post(path, block).await?; + + Ok(()) + } + + /// `GET beacon/blocks` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks<T: EthSpec>( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<SignedBeaconBlock<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()); + + self.get_opt(path).await + } + + /// `GET beacon/blocks/{block_id}/root` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_root( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<RootData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()) + .push("root"); + + self.get_opt(path).await + } + + /// `GET beacon/blocks/{block_id}/attestations` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_blocks_attestations<T: EthSpec>( + &self, + block_id: BlockId, + ) -> Result<Option<GenericResponse<Vec<Attestation<T>>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("blocks") + .push(&block_id.to_string()) + .push("attestations"); + + self.get_opt(path).await + } + + /// `POST beacon/pool/attestations` + pub async fn post_beacon_pool_attestations<T: EthSpec>( + &self, + attestation: &Attestation<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + self.post(path, attestation).await?; + + Ok(()) + } + + /// `GET beacon/pool/attestations` + pub async fn get_beacon_pool_attestations<T: EthSpec>( + &self, + ) -> Result<GenericResponse<Vec<Attestation<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attestations"); + + self.get(path).await + } + + /// `POST beacon/pool/attester_slashings` + pub async fn post_beacon_pool_attester_slashings<T: EthSpec>( + &self, + slashing: &AttesterSlashing<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.post(path, slashing).await?; + + Ok(()) + } + + /// `GET beacon/pool/attester_slashings` + pub async fn get_beacon_pool_attester_slashings<T: EthSpec>( + &self, + ) -> Result<GenericResponse<Vec<AttesterSlashing<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("attester_slashings"); + + self.get(path).await + } + + /// `POST beacon/pool/proposer_slashings` + pub async fn post_beacon_pool_proposer_slashings( + &self, + slashing: &ProposerSlashing, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("proposer_slashings"); + + self.post(path, slashing).await?; + + Ok(()) + } + + /// `GET beacon/pool/proposer_slashings` + pub async fn get_beacon_pool_proposer_slashings( + &self, + ) -> Result<GenericResponse<Vec<ProposerSlashing>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("proposer_slashings"); + + self.get(path).await + } + + /// `POST beacon/pool/voluntary_exits` + pub async fn post_beacon_pool_voluntary_exits( + &self, + exit: &SignedVoluntaryExit, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("voluntary_exits"); + + self.post(path, exit).await?; + + Ok(()) + } + + /// `GET beacon/pool/voluntary_exits` + pub async fn get_beacon_pool_voluntary_exits( + &self, + ) -> Result<GenericResponse<Vec<SignedVoluntaryExit>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("pool") + .push("voluntary_exits"); + + self.get(path).await + } + + /// `GET config/fork_schedule` + pub async fn get_config_fork_schedule(&self) -> Result<GenericResponse<Vec<Fork>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("fork_schedule"); + + self.get(path).await + } + + /// `GET config/spec` + pub async fn get_config_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("spec"); + + self.get(path).await + } + + /// `GET config/deposit_contract` + pub async fn get_config_deposit_contract( + &self, + ) -> Result<GenericResponse<DepositContractData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("config") + .push("deposit_contract"); + + self.get(path).await + } + + /// `GET node/version` + pub async fn get_node_version(&self) -> Result<GenericResponse<VersionData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("node") + .push("version"); + + self.get(path).await + } + + /// `GET node/syncing` + pub async fn get_node_syncing(&self) -> Result<GenericResponse<SyncingData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("node") + .push("syncing"); + + self.get(path).await + } + + /// `GET debug/beacon/states/{state_id}` + pub async fn get_debug_beacon_states<T: EthSpec>( + &self, + state_id: StateId, + ) -> Result<Option<GenericResponse<BeaconState<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("states") + .push(&state_id.to_string()); + + self.get_opt(path).await + } + + /// `GET debug/beacon/heads` + pub async fn get_debug_beacon_heads( + &self, + ) -> Result<GenericResponse<Vec<ChainHeadData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("debug") + .push("beacon") + .push("heads"); + + self.get(path).await + } + + /// `GET validator/duties/attester/{epoch}?index` + /// + /// ## Note + /// + /// The `index` query parameter accepts a list of validator indices. + pub async fn get_validator_duties_attester( + &self, + epoch: Epoch, + index: Option<&[u64]>, + ) -> Result<GenericResponse<Vec<AttesterData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("attester") + .push(&epoch.to_string()); + + if let Some(index) = index { + let string = index + .iter() + .map(|i| i.to_string()) + .collect::<Vec<_>>() + .join(","); + path.query_pairs_mut().append_pair("index", &string); + } + + self.get(path).await + } + + /// `GET validator/duties/proposer/{epoch}` + pub async fn get_validator_duties_proposer( + &self, + epoch: Epoch, + ) -> Result<GenericResponse<Vec<ProposerData>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("duties") + .push("proposer") + .push(&epoch.to_string()); + + self.get(path).await + } + + /// `GET validator/duties/attester/{epoch}?index` + /// + /// ## Note + /// + /// The `index` query parameter accepts a list of validator indices. + pub async fn get_validator_blocks<T: EthSpec>( + &self, + slot: Slot, + randao_reveal: SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result<GenericResponse<BeaconBlock<T>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("blocks") + .push(&slot.to_string()); + + path.query_pairs_mut() + .append_pair("randao_reveal", &randao_reveal.to_string()); + + if let Some(graffiti) = graffiti { + path.query_pairs_mut() + .append_pair("graffiti", &graffiti.to_string()); + } + + self.get(path).await + } + + /// `GET validator/attestation_data?slot,committee_index` + pub async fn get_validator_attestation_data( + &self, + slot: Slot, + committee_index: CommitteeIndex, + ) -> Result<GenericResponse<AttestationData>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("attestation_data"); + + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()) + .append_pair("committee_index", &committee_index.to_string()); + + self.get(path).await + } + + /// `GET validator/attestation_attestation?slot,attestation_data_root` + pub async fn get_validator_aggregate_attestation<T: EthSpec>( + &self, + slot: Slot, + attestation_data_root: Hash256, + ) -> Result<Option<GenericResponse<Attestation<T>>>, Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_attestation"); + + path.query_pairs_mut() + .append_pair("slot", &slot.to_string()) + .append_pair( + "attestation_data_root", + &format!("{:?}", attestation_data_root), + ); + + self.get_opt(path).await + } + + /// `POST validator/aggregate_and_proofs` + pub async fn post_validator_aggregate_and_proof<T: EthSpec>( + &self, + aggregate: &SignedAggregateAndProof<T>, + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("aggregate_and_proofs"); + + self.post(path, aggregate).await?; + + Ok(()) + } + + /// `POST validator/beacon_committee_subscriptions` + pub async fn post_validator_beacon_committee_subscriptions( + &self, + subscriptions: &[BeaconCommitteeSubscription], + ) -> Result<(), Error> { + let mut path = self.eth_path()?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("validator") + .push("beacon_committee_subscriptions"); + + self.post(path, &subscriptions).await?; + + Ok(()) + } +} + +/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an +/// appropriate error message. +async fn ok_or_error(response: Response) -> Result<Response, Error> { + let status = response.status(); + + if status == StatusCode::OK { + Ok(response) + } else if let Ok(message) = response.json().await { + Err(Error::ServerMessage(message)) + } else { + Err(Error::StatusCode(status)) + } +} diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs new file mode 100644 index 000000000..8bfbad84e --- /dev/null +++ b/common/eth2/src/lighthouse.rs @@ -0,0 +1,224 @@ +//! This module contains endpoints that are non-standard and only available on Lighthouse servers. + +use crate::{ + types::{Epoch, EthSpec, GenericResponse, ValidatorId}, + BeaconNodeHttpClient, Error, +}; +use proto_array::core::ProtoArray; +use serde::{Deserialize, Serialize}; + +pub use eth2_libp2p::{types::SyncState, PeerInfo}; + +/// Information returned by `peers` and `connected_peers`. +// TODO: this should be deserializable.. +#[derive(Debug, Clone, Serialize)] +#[serde(bound = "T: EthSpec")] +pub struct Peer<T: EthSpec> { + /// The Peer's ID + pub peer_id: String, + /// The PeerInfo associated with the peer. + pub peer_info: PeerInfo<T>, +} + +/// The results of validators voting during an epoch. +/// +/// Provides information about the current and previous epochs. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GlobalValidatorInclusionData { + /// The total effective balance of all active validators during the _current_ epoch. + pub current_epoch_active_gwei: u64, + /// The total effective balance of all active validators during the _previous_ epoch. + pub previous_epoch_active_gwei: u64, + /// The total effective balance of all validators who attested during the _current_ epoch. + pub current_epoch_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _current_ epoch and + /// agreed with the state about the beacon block at the first slot of the _current_ epoch. + pub current_epoch_target_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch. + pub previous_epoch_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. + pub previous_epoch_target_attesting_gwei: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the time of attestation. + pub previous_epoch_head_attesting_gwei: u64, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorInclusionData { + /// True if the validator has been slashed, ever. + pub is_slashed: bool, + /// True if the validator can withdraw in the current epoch. + pub is_withdrawable_in_current_epoch: bool, + /// True if the validator was active in the state's _current_ epoch. + pub is_active_in_current_epoch: bool, + /// True if the validator was active in the state's _previous_ epoch. + pub is_active_in_previous_epoch: bool, + /// The validator's effective balance in the _current_ epoch. + pub current_epoch_effective_balance_gwei: u64, + /// True if the validator had an attestation included in the _current_ epoch. + pub is_current_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _current_ + /// epoch matches the block root known to the state. + pub is_current_epoch_target_attester: bool, + /// True if the validator had an attestation included in the _previous_ epoch. + pub is_previous_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _previous_ + /// epoch matches the block root known to the state. + pub is_previous_epoch_target_attester: bool, + /// True if the validator's beacon block root attestation in the _previous_ epoch at the + /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. + pub is_previous_epoch_head_attester: bool, +} + +#[cfg(target_os = "linux")] +use {procinfo::pid, psutil::process::Process}; + +/// Reports on the health of the Lighthouse instance. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Health { + /// The pid of this process. + pub pid: u32, + /// The number of threads used by this pid. + pub pid_num_threads: i32, + /// The total resident memory used by this pid. + pub pid_mem_resident_set_size: u64, + /// The total virtual memory used by this pid. + pub pid_mem_virtual_memory_size: u64, + /// Total virtual memory on the system + pub sys_virt_mem_total: u64, + /// Total virtual memory available for new processes. + pub sys_virt_mem_available: u64, + /// Total virtual memory used on the system + pub sys_virt_mem_used: u64, + /// Total virtual memory not used on the system + pub sys_virt_mem_free: u64, + /// Percentage of virtual memory used on the system + pub sys_virt_mem_percent: f32, + /// System load average over 1 minute. + pub sys_loadavg_1: f64, + /// System load average over 5 minutes. + pub sys_loadavg_5: f64, + /// System load average over 15 minutes. + pub sys_loadavg_15: f64, +} + +impl Health { + #[cfg(not(target_os = "linux"))] + pub fn observe() -> Result<Self, String> { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + pub fn observe() -> Result<Self, String> { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; + + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + let loadavg = + psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + Ok(Self { + pid: process.pid(), + pid_num_threads: stat.num_threads, + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + }) + } +} + +impl BeaconNodeHttpClient { + /// `GET lighthouse/health` + pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("health"); + + self.get(path).await + } + + /// `GET lighthouse/syncing` + pub async fn get_lighthouse_syncing(&self) -> Result<GenericResponse<SyncState>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("syncing"); + + self.get(path).await + } + + /* + * Note: + * + * The `lighthouse/peers` endpoints do not have functions here. We are yet to implement + * `Deserialize` on the `PeerInfo` struct since it contains use of `Instant`. This could be + * fairly simply achieved, if desired. + */ + + /// `GET lighthouse/proto_array` + pub async fn get_lighthouse_proto_array(&self) -> Result<GenericResponse<ProtoArray>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("proto_array"); + + self.get(path).await + } + + /// `GET lighthouse/validator_inclusion/{epoch}/global` + pub async fn get_lighthouse_validator_inclusion_global( + &self, + epoch: Epoch, + ) -> Result<GenericResponse<GlobalValidatorInclusionData>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validator_inclusion") + .push(&epoch.to_string()) + .push("global"); + + self.get(path).await + } + + /// `GET lighthouse/validator_inclusion/{epoch}/{validator_id}` + pub async fn get_lighthouse_validator_inclusion( + &self, + epoch: Epoch, + validator_id: ValidatorId, + ) -> Result<GenericResponse<Option<ValidatorInclusionData>>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validator_inclusion") + .push(&epoch.to_string()) + .push(&validator_id.to_string()); + + self.get(path).await + } +} diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs new file mode 100644 index 000000000..b08ceabb2 --- /dev/null +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -0,0 +1,331 @@ +use super::{types::*, PK_LEN, SECRET_PREFIX}; +use crate::Error; +use account_utils::ZeroizeString; +use bytes::Bytes; +use reqwest::{ + header::{HeaderMap, HeaderValue}, + IntoUrl, +}; +use ring::digest::{digest, SHA256}; +use secp256k1::{Message, PublicKey, Signature}; +use serde::{de::DeserializeOwned, Serialize}; + +pub use reqwest; +pub use reqwest::{Response, StatusCode, Url}; + +/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a +/// Lighthouse Validator Client HTTP server (`validator_client/src/http_api`). +#[derive(Clone)] +pub struct ValidatorClientHttpClient { + client: reqwest::Client, + server: Url, + secret: ZeroizeString, + server_pubkey: PublicKey, +} + +/// Parse an API token and return a secp256k1 public key. +pub fn parse_pubkey(secret: &str) -> Result<PublicKey, Error> { + let secret = if !secret.starts_with(SECRET_PREFIX) { + return Err(Error::InvalidSecret(format!( + "secret does not start with {}", + SECRET_PREFIX + ))); + } else { + &secret[SECRET_PREFIX.len()..] + }; + + serde_utils::hex::decode(&secret) + .map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e))) + .and_then(|bytes| { + if bytes.len() != PK_LEN { + return Err(Error::InvalidSecret(format!( + "expected {} bytes not {}", + PK_LEN, + bytes.len() + ))); + } + + let mut arr = [0; PK_LEN]; + arr.copy_from_slice(&bytes); + PublicKey::parse_compressed(&arr) + .map_err(|e| Error::InvalidSecret(format!("invalid secp256k1 pubkey: {:?}", e))) + }) +} + +impl ValidatorClientHttpClient { + pub fn new(server: Url, secret: String) -> Result<Self, Error> { + Ok(Self { + client: reqwest::Client::new(), + server, + server_pubkey: parse_pubkey(&secret)?, + secret: secret.into(), + }) + } + + pub fn from_components( + server: Url, + client: reqwest::Client, + secret: String, + ) -> Result<Self, Error> { + Ok(Self { + client, + server, + server_pubkey: parse_pubkey(&secret)?, + secret: secret.into(), + }) + } + + async fn signed_body(&self, response: Response) -> Result<Bytes, Error> { + let sig = response + .headers() + .get("Signature") + .ok_or_else(|| Error::MissingSignatureHeader)? + .to_str() + .map_err(|_| Error::InvalidSignatureHeader)? + .to_string(); + + let body = response.bytes().await.map_err(Error::Reqwest)?; + + let message = + Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); + + serde_utils::hex::decode(&sig) + .ok() + .and_then(|bytes| { + let sig = Signature::parse_der(&bytes).ok()?; + Some(secp256k1::verify(&message, &sig, &self.server_pubkey)) + }) + .filter(|is_valid| *is_valid) + .ok_or_else(|| Error::InvalidSignatureHeader)?; + + Ok(body) + } + + async fn signed_json<T: DeserializeOwned>(&self, response: Response) -> Result<T, Error> { + let body = self.signed_body(response).await?; + serde_json::from_slice(&body).map_err(Error::InvalidJson) + } + + fn headers(&self) -> Result<HeaderMap, Error> { + let header_value = HeaderValue::from_str(&format!("Basic {}", self.secret.as_str())) + .map_err(|e| { + Error::InvalidSecret(format!("secret is invalid as a header value: {}", e)) + })?; + + let mut headers = HeaderMap::new(); + headers.insert("Authorization", header_value); + + Ok(headers) + } + + /// Perform a HTTP GET request. + async fn get<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<T, Error> { + let response = self + .client + .get(url) + .headers(self.headers()?) + .send() + .await + .map_err(Error::Reqwest)?; + let response = ok_or_error(response).await?; + self.signed_json(response).await + } + + /// Perform a HTTP GET request, returning `None` on a 404 error. + async fn get_opt<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<Option<T>, Error> { + let response = self + .client + .get(url) + .headers(self.headers()?) + .send() + .await + .map_err(Error::Reqwest)?; + match ok_or_error(response).await { + Ok(resp) => self.signed_json(resp).await.map(Option::Some), + Err(err) => { + if err.status() == Some(StatusCode::NOT_FOUND) { + Ok(None) + } else { + Err(err) + } + } + } + } + + /// Perform a HTTP POST request. + async fn post<T: Serialize, U: IntoUrl, V: DeserializeOwned>( + &self, + url: U, + body: &T, + ) -> Result<V, Error> { + let response = self + .client + .post(url) + .headers(self.headers()?) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + let response = ok_or_error(response).await?; + self.signed_json(response).await + } + + /// Perform a HTTP PATCH request. + async fn patch<T: Serialize, U: IntoUrl>(&self, url: U, body: &T) -> Result<(), Error> { + let response = self + .client + .patch(url) + .headers(self.headers()?) + .json(body) + .send() + .await + .map_err(Error::Reqwest)?; + let response = ok_or_error(response).await?; + self.signed_body(response).await?; + Ok(()) + } + + /// `GET lighthouse/version` + pub async fn get_lighthouse_version(&self) -> Result<GenericResponse<VersionData>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("version"); + + self.get(path).await + } + + /// `GET lighthouse/health` + pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("health"); + + self.get(path).await + } + + /// `GET lighthouse/spec` + pub async fn get_lighthouse_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("spec"); + + self.get(path).await + } + + /// `GET lighthouse/validators` + pub async fn get_lighthouse_validators( + &self, + ) -> Result<GenericResponse<Vec<ValidatorData>>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validators"); + + self.get(path).await + } + + /// `GET lighthouse/validators/{validator_pubkey}` + pub async fn get_lighthouse_validators_pubkey( + &self, + validator_pubkey: &PublicKeyBytes, + ) -> Result<Option<GenericResponse<ValidatorData>>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validators") + .push(&validator_pubkey.to_string()); + + self.get_opt(path).await + } + + /// `POST lighthouse/validators` + pub async fn post_lighthouse_validators( + &self, + validators: Vec<ValidatorRequest>, + ) -> Result<GenericResponse<PostValidatorsResponseData>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validators"); + + self.post(path, &validators).await + } + + /// `POST lighthouse/validators/mnemonic` + pub async fn post_lighthouse_validators_mnemonic( + &self, + request: &CreateValidatorsMnemonicRequest, + ) -> Result<GenericResponse<Vec<CreatedValidator>>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validators") + .push("mnemonic"); + + self.post(path, &request).await + } + + /// `POST lighthouse/validators/keystore` + pub async fn post_lighthouse_validators_keystore( + &self, + request: &KeystoreValidatorsPostRequest, + ) -> Result<GenericResponse<ValidatorData>, Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validators") + .push("keystore"); + + self.post(path, &request).await + } + + /// `PATCH lighthouse/validators/{validator_pubkey}` + pub async fn patch_lighthouse_validators( + &self, + voting_pubkey: &PublicKeyBytes, + enabled: bool, + ) -> Result<(), Error> { + let mut path = self.server.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("validators") + .push(&voting_pubkey.to_string()); + + self.patch(path, &ValidatorPatchRequest { enabled }).await + } +} + +/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an +/// appropriate error message. +async fn ok_or_error(response: Response) -> Result<Response, Error> { + let status = response.status(); + + if status == StatusCode::OK { + Ok(response) + } else if let Ok(message) = response.json().await { + Err(Error::ServerMessage(message)) + } else { + Err(Error::StatusCode(status)) + } +} diff --git a/common/eth2/src/lighthouse_vc/mod.rs b/common/eth2/src/lighthouse_vc/mod.rs new file mode 100644 index 000000000..b7de7c715 --- /dev/null +++ b/common/eth2/src/lighthouse_vc/mod.rs @@ -0,0 +1,9 @@ +pub mod http_client; +pub mod types; + +/// The number of bytes in the secp256k1 public key used as the authorization token for the VC API. +pub const PK_LEN: usize = 33; + +/// The prefix for the secp256k1 public key when it is used as the authorization token for the VC +/// API. +pub const SECRET_PREFIX: &str = "api-token-"; diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs new file mode 100644 index 000000000..64674e6fc --- /dev/null +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -0,0 +1,58 @@ +use account_utils::ZeroizeString; +use eth2_keystore::Keystore; +use serde::{Deserialize, Serialize}; + +pub use crate::lighthouse::Health; +pub use crate::types::{GenericResponse, VersionData}; +pub use types::*; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorData { + pub enabled: bool, + pub description: String, + pub voting_pubkey: PublicKeyBytes, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorRequest { + pub enable: bool, + pub description: String, + #[serde(with = "serde_utils::quoted_u64")] + pub deposit_gwei: u64, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct CreateValidatorsMnemonicRequest { + pub mnemonic: ZeroizeString, + #[serde(with = "serde_utils::quoted_u32")] + pub key_derivation_path_offset: u32, + pub validators: Vec<ValidatorRequest>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CreatedValidator { + pub enabled: bool, + pub description: String, + pub voting_pubkey: PublicKeyBytes, + pub eth1_deposit_tx_data: String, + #[serde(with = "serde_utils::quoted_u64")] + pub deposit_gwei: u64, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct PostValidatorsResponseData { + pub mnemonic: ZeroizeString, + pub validators: Vec<CreatedValidator>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorPatchRequest { + pub enabled: bool, +} + +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct KeystoreValidatorsPostRequest { + pub password: ZeroizeString, + pub enable: bool, + pub keystore: Keystore, +} diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs new file mode 100644 index 000000000..c3a8d240c --- /dev/null +++ b/common/eth2/src/types.rs @@ -0,0 +1,432 @@ +//! This module exposes a superset of the `types` crate. It adds additional types that are only +//! required for the HTTP API. + +use eth2_libp2p::{Enr, Multiaddr}; +use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; +use std::fmt; +use std::str::FromStr; + +pub use types::*; + +/// An API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ErrorMessage { + pub code: u16, + pub message: String, + #[serde(default)] + pub stacktraces: Vec<String>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct GenesisData { + #[serde(with = "serde_utils::quoted_u64")] + pub genesis_time: u64, + pub genesis_validators_root: Hash256, + #[serde(with = "serde_utils::bytes_4_hex")] + pub genesis_fork_version: [u8; 4], +} + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum BlockId { + Head, + Genesis, + Finalized, + Justified, + Slot(Slot), + Root(Hash256), +} + +impl FromStr for BlockId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + match s { + "head" => Ok(BlockId::Head), + "genesis" => Ok(BlockId::Genesis), + "finalized" => Ok(BlockId::Finalized), + "justified" => Ok(BlockId::Justified), + other => { + if other.starts_with("0x") { + Hash256::from_str(&s[2..]) + .map(BlockId::Root) + .map_err(|e| format!("{} cannot be parsed as a root", e)) + } else { + u64::from_str(s) + .map(Slot::new) + .map(BlockId::Slot) + .map_err(|_| format!("{} cannot be parsed as a parameter", s)) + } + } + } + } +} + +impl fmt::Display for BlockId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BlockId::Head => write!(f, "head"), + BlockId::Genesis => write!(f, "genesis"), + BlockId::Finalized => write!(f, "finalized"), + BlockId::Justified => write!(f, "justified"), + BlockId::Slot(slot) => write!(f, "{}", slot), + BlockId::Root(root) => write!(f, "{:?}", root), + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum StateId { + Head, + Genesis, + Finalized, + Justified, + Slot(Slot), + Root(Hash256), +} + +impl FromStr for StateId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + match s { + "head" => Ok(StateId::Head), + "genesis" => Ok(StateId::Genesis), + "finalized" => Ok(StateId::Finalized), + "justified" => Ok(StateId::Justified), + other => { + if other.starts_with("0x") { + Hash256::from_str(&s[2..]) + .map(StateId::Root) + .map_err(|e| format!("{} cannot be parsed as a root", e)) + } else { + u64::from_str(s) + .map(Slot::new) + .map(StateId::Slot) + .map_err(|_| format!("{} cannot be parsed as a slot", s)) + } + } + } + } +} + +impl fmt::Display for StateId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + StateId::Head => write!(f, "head"), + StateId::Genesis => write!(f, "genesis"), + StateId::Finalized => write!(f, "finalized"), + StateId::Justified => write!(f, "justified"), + StateId::Slot(slot) => write!(f, "{}", slot), + StateId::Root(root) => write!(f, "{:?}", root), + } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")] +pub struct GenericResponse<T: Serialize + serde::de::DeserializeOwned> { + pub data: T, +} + +impl<T: Serialize + serde::de::DeserializeOwned> From<T> for GenericResponse<T> { + fn from(data: T) -> Self { + Self { data } + } +} + +#[derive(Debug, PartialEq, Clone, Serialize)] +#[serde(bound = "T: Serialize")] +pub struct GenericResponseRef<'a, T: Serialize> { + pub data: &'a T, +} + +impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> { + fn from(data: &'a T) -> Self { + Self { data } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +pub struct RootData { + pub root: Hash256, +} + +impl From<Hash256> for RootData { + fn from(root: Hash256) -> Self { + Self { root } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct FinalityCheckpointsData { + pub previous_justified: Checkpoint, + pub current_justified: Checkpoint, + pub finalized: Checkpoint, +} + +#[derive(Debug, Clone, PartialEq)] +pub enum ValidatorId { + PublicKey(PublicKeyBytes), + Index(u64), +} + +impl FromStr for ValidatorId { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + if s.starts_with("0x") { + PublicKeyBytes::from_str(s) + .map(ValidatorId::PublicKey) + .map_err(|e| format!("{} cannot be parsed as a public key: {}", s, e)) + } else { + u64::from_str(s) + .map(ValidatorId::Index) + .map_err(|e| format!("{} cannot be parsed as a slot: {}", s, e)) + } + } +} + +impl fmt::Display for ValidatorId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ValidatorId::PublicKey(pubkey) => write!(f, "{:?}", pubkey), + ValidatorId::Index(index) => write!(f, "{}", index), + } + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ValidatorData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub balance: u64, + pub status: ValidatorStatus, + pub validator: Validator, +} + +// TODO: This does not currently match the spec, but I'm going to try and change the spec using +// this proposal: +// +// https://hackmd.io/bQxMDRt1RbS1TLno8K4NPg?view +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +pub enum ValidatorStatus { + Unknown, + WaitingForEligibility, + WaitingForFinality, + WaitingInQueue, + StandbyForActive(Epoch), + Active, + ActiveAwaitingVoluntaryExit(Epoch), + ActiveAwaitingSlashedExit(Epoch), + ExitedVoluntarily(Epoch), + ExitedSlashed(Epoch), + Withdrawable, + Withdrawn, +} + +impl ValidatorStatus { + pub fn from_validator( + validator_opt: Option<&Validator>, + epoch: Epoch, + finalized_epoch: Epoch, + far_future_epoch: Epoch, + ) -> Self { + if let Some(validator) = validator_opt { + if validator.is_withdrawable_at(epoch) { + ValidatorStatus::Withdrawable + } else if validator.is_exited_at(epoch) { + if validator.slashed { + ValidatorStatus::ExitedSlashed(validator.withdrawable_epoch) + } else { + ValidatorStatus::ExitedVoluntarily(validator.withdrawable_epoch) + } + } else if validator.is_active_at(epoch) { + if validator.exit_epoch < far_future_epoch { + if validator.slashed { + ValidatorStatus::ActiveAwaitingSlashedExit(validator.exit_epoch) + } else { + ValidatorStatus::ActiveAwaitingVoluntaryExit(validator.exit_epoch) + } + } else { + ValidatorStatus::Active + } + } else if validator.activation_epoch < far_future_epoch { + ValidatorStatus::StandbyForActive(validator.activation_epoch) + } else if validator.activation_eligibility_epoch < far_future_epoch { + if finalized_epoch < validator.activation_eligibility_epoch { + ValidatorStatus::WaitingForFinality + } else { + ValidatorStatus::WaitingInQueue + } + } else { + ValidatorStatus::WaitingForEligibility + } + } else { + ValidatorStatus::Unknown + } + } +} + +#[derive(Serialize, Deserialize)] +pub struct CommitteesQuery { + pub slot: Option<Slot>, + pub index: Option<u64>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct CommitteeData { + #[serde(with = "serde_utils::quoted_u64")] + pub index: u64, + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64_vec")] + pub validators: Vec<u64>, +} + +#[derive(Serialize, Deserialize)] +pub struct HeadersQuery { + pub slot: Option<Slot>, + pub parent_root: Option<Hash256>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockHeaderAndSignature { + pub message: BeaconBlockHeader, + pub signature: SignatureBytes, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BlockHeaderData { + pub root: Hash256, + pub canonical: bool, + pub header: BlockHeaderAndSignature, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct DepositContractData { + #[serde(with = "serde_utils::quoted_u64")] + pub chain_id: u64, + pub address: Address, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ChainHeadData { + pub slot: Slot, + pub root: Hash256, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IdentityData { + pub peer_id: String, + pub enr: Enr, + pub p2p_addresses: Vec<Multiaddr>, + // TODO: missing the following fields: + // + // - discovery_addresses + // - metadata + // + // Tracked here: https://github.com/sigp/lighthouse/issues/1434 +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VersionData { + pub version: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct SyncingData { + pub is_syncing: bool, + pub head_slot: Slot, + pub sync_distance: Slot, +} + +#[derive(Clone, PartialEq, Debug, Deserialize)] +#[serde(try_from = "String", bound = "T: FromStr")] +pub struct QueryVec<T: FromStr>(pub Vec<T>); + +impl<T: FromStr> TryFrom<String> for QueryVec<T> { + type Error = String; + + fn try_from(string: String) -> Result<Self, Self::Error> { + if string == "" { + return Ok(Self(vec![])); + } + + string + .split(',') + .map(|s| s.parse().map_err(|_| "unable to parse".to_string())) + .collect::<Result<Vec<T>, String>>() + .map(Self) + } +} + +#[derive(Clone, Deserialize)] +pub struct ValidatorDutiesQuery { + pub index: Option<QueryVec<u64>>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct AttesterData { + pub pubkey: PublicKeyBytes, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: CommitteeIndex, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_length: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub validator_committee_index: u64, + pub slot: Slot, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ProposerData { + pub pubkey: PublicKeyBytes, + pub slot: Slot, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorBlocksQuery { + pub randao_reveal: SignatureBytes, + pub graffiti: Option<Graffiti>, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorAttestationDataQuery { + pub slot: Slot, + pub committee_index: CommitteeIndex, +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct ValidatorAggregateAttestationQuery { + pub attestation_data_root: Hash256, + pub slot: Slot, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BeaconCommitteeSubscription { + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committee_index: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, + pub slot: Slot, + pub is_aggregator: bool, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn query_vec() { + assert_eq!( + QueryVec::try_from("0,1,2".to_string()).unwrap(), + QueryVec(vec![0_u64, 1, 2]) + ); + } +} diff --git a/common/eth2_config/Cargo.toml b/common/eth2_config/Cargo.toml index b551a2349..cb5ee888d 100644 --- a/common/eth2_config/Cargo.toml +++ b/common/eth2_config/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2018" [dependencies] -serde = "1.0.110" -serde_derive = "1.0.110" +serde = "1.0.116" +serde_derive = "1.0.116" toml = "0.5.6" types = { path = "../../consensus/types" } diff --git a/common/eth2_config/src/lib.rs b/common/eth2_config/src/lib.rs index 6ec5a2162..31258a2ad 100644 --- a/common/eth2_config/src/lib.rs +++ b/common/eth2_config/src/lib.rs @@ -111,6 +111,8 @@ define_net!(medalla, include_medalla_file, "medalla", true); define_net!(spadina, include_spadina_file, "spadina", true); +define_net!(zinken, include_zinken_file, "zinken", true); + #[cfg(test)] mod tests { use super::*; diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 6062257cd..c451e068c 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -11,10 +11,10 @@ lazy_static = "1.4.0" num-bigint = "0.3.0" eth2_hashing = "0.1.0" hex = "0.4.2" -serde_yaml = "0.8.11" -serde = "1.0.110" -serde_derive = "1.0.110" +serde_yaml = "0.8.13" +serde = "1.0.116" +serde_derive = "1.0.116" bls = { path = "../../crypto/bls" } [dev-dependencies] -base64 = "0.12.1" +base64 = "0.13.0" diff --git a/common/eth2_testnet_config/Cargo.toml b/common/eth2_testnet_config/Cargo.toml index f5351875c..7ee8eb801 100644 --- a/common/eth2_testnet_config/Cargo.toml +++ b/common/eth2_testnet_config/Cargo.toml @@ -7,16 +7,16 @@ edition = "2018" build = "build.rs" [build-dependencies] -zip = "0.5" +zip = "0.5.8" eth2_config = { path = "../eth2_config"} [dev-dependencies] tempdir = "0.3.7" [dependencies] -serde = "1.0.110" -serde_yaml = "0.8.11" +serde = "1.0.116" +serde_yaml = "0.8.13" types = { path = "../../consensus/types"} -enr = { version = "0.1.0", features = ["libsecp256k1", "ed25519"] } eth2_ssz = "0.1.2" eth2_config = { path = "../eth2_config"} +enr = "0.3.0" diff --git a/common/eth2_testnet_config/build.rs b/common/eth2_testnet_config/build.rs index 588ec90a0..e9f4794d4 100644 --- a/common/eth2_testnet_config/build.rs +++ b/common/eth2_testnet_config/build.rs @@ -1,6 +1,6 @@ //! Downloads a testnet configuration from Github. -use eth2_config::{altona, medalla, spadina, Eth2NetArchiveAndDirectory}; +use eth2_config::{altona, medalla, spadina, zinken, Eth2NetArchiveAndDirectory}; use std::fs; use std::fs::File; use std::io; @@ -10,6 +10,7 @@ const ETH2_NET_DIRS: &[Eth2NetArchiveAndDirectory<'static>] = &[ altona::ETH2_NET_DIR, medalla::ETH2_NET_DIR, spadina::ETH2_NET_DIR, + zinken::ETH2_NET_DIR, ]; fn main() { diff --git a/common/eth2_testnet_config/src/lib.rs b/common/eth2_testnet_config/src/lib.rs index 1b0d4a933..37d532145 100644 --- a/common/eth2_testnet_config/src/lib.rs +++ b/common/eth2_testnet_config/src/lib.rs @@ -7,7 +7,9 @@ //! //! https://github.com/sigp/lighthouse/pull/605 //! -use eth2_config::{include_altona_file, include_medalla_file, include_spadina_file, unique_id}; +use eth2_config::{ + include_altona_file, include_medalla_file, include_spadina_file, include_zinken_file, unique_id, +}; use enr::{CombinedKey, Enr}; use ssz::{Decode, Encode}; @@ -54,8 +56,9 @@ macro_rules! define_net { const ALTONA: HardcodedNet = define_net!(altona, include_altona_file); const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file); const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file); +const ZINKEN: HardcodedNet = define_net!(zinken, include_zinken_file); -const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA]; +const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, ZINKEN]; pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla"; /// Specifies an Eth2 testnet. diff --git a/common/eth2_testnet_config/testnet_zinken.zip b/common/eth2_testnet_config/testnet_zinken.zip new file mode 100644 index 000000000..1457638d1 Binary files /dev/null and b/common/eth2_testnet_config/testnet_zinken.zip differ diff --git a/common/lighthouse_metrics/Cargo.toml b/common/lighthouse_metrics/Cargo.toml index c6f11391f..a8380b85c 100644 --- a/common/lighthouse_metrics/Cargo.toml +++ b/common/lighthouse_metrics/Cargo.toml @@ -8,4 +8,4 @@ edition = "2018" [dependencies] lazy_static = "1.4.0" -prometheus = "0.9.0" +prometheus = "0.10.0" diff --git a/common/lighthouse_metrics/src/lib.rs b/common/lighthouse_metrics/src/lib.rs index 0a4251e06..0637b973c 100644 --- a/common/lighthouse_metrics/src/lib.rs +++ b/common/lighthouse_metrics/src/lib.rs @@ -55,6 +55,7 @@ //! ``` use prometheus::{HistogramOpts, HistogramTimer, Opts}; +use std::time::Duration; pub use prometheus::{ Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, @@ -221,6 +222,19 @@ pub fn start_timer(histogram: &Result<Histogram>) -> Option<HistogramTimer> { } } +/// Starts a timer on `vec` with the given `name`. +pub fn observe_timer_vec(vec: &Result<HistogramVec>, name: &[&str], duration: Duration) { + // This conversion was taken from here: + // + // https://docs.rs/prometheus/0.5.0/src/prometheus/histogram.rs.html#550-555 + let nanos = f64::from(duration.subsec_nanos()) / 1e9; + let secs = duration.as_secs() as f64 + nanos; + + if let Some(h) = get_histogram(vec, name) { + h.observe(secs) + } +} + /// Stops a timer created with `start_timer(..)`. pub fn stop_timer(timer: Option<HistogramTimer>) { if let Some(t) = timer { diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index d88369793..c9abcf099 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -10,7 +10,7 @@ use target_info::Target; /// `Lighthouse/v0.2.0-1419501f2+` pub const VERSION: &str = git_version!( args = ["--always", "--dirty=+"], - prefix = "Lighthouse/v0.2.13-", + prefix = "Lighthouse/v0.3.0-", fallback = "unknown" ); diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index b3c50c6d6..8e19c1f11 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -6,6 +6,6 @@ edition = "2018" [dependencies] slog = "2.5.2" -slog-term = "2.5.0" +slog-term = "2.6.0" lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" diff --git a/common/remote_beacon_node/Cargo.toml b/common/remote_beacon_node/Cargo.toml deleted file mode 100644 index 38ee8c7ca..000000000 --- a/common/remote_beacon_node/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "remote_beacon_node" -version = "0.2.0" -authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2018" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -reqwest = { version = "0.10.4", features = ["json", "native-tls-vendored"] } -url = "2.1.1" -serde = "1.0.110" -futures = "0.3.5" -types = { path = "../../consensus/types" } -rest_types = { path = "../rest_types" } -hex = "0.4.2" -eth2_ssz = "0.1.2" -serde_json = "1.0.52" -eth2_config = { path = "../eth2_config" } -proto_array = { path = "../../consensus/proto_array" } -operation_pool = { path = "../../beacon_node/operation_pool" } diff --git a/common/remote_beacon_node/src/lib.rs b/common/remote_beacon_node/src/lib.rs deleted file mode 100644 index 199efefd9..000000000 --- a/common/remote_beacon_node/src/lib.rs +++ /dev/null @@ -1,732 +0,0 @@ -//! Provides a `RemoteBeaconNode` which interacts with a HTTP API on another Lighthouse (or -//! compatible) instance. -//! -//! Presently, this is only used for testing but it _could_ become a user-facing library. - -use eth2_config::Eth2Config; -use reqwest::{Client, ClientBuilder, Response, StatusCode}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use ssz::Encode; -use std::marker::PhantomData; -use std::time::Duration; -use types::{ - Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex, - Epoch, EthSpec, Fork, Graffiti, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, - Signature, SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId, -}; -use url::Url; - -pub use operation_pool::PersistedOperationPool; -pub use proto_array::core::ProtoArray; -pub use rest_types::{ - CanonicalHeadResponse, Committee, HeadBeaconBlock, Health, IndividualVotesRequest, - IndividualVotesResponse, SyncingResponse, ValidatorDutiesRequest, ValidatorDutyBytes, - ValidatorRequest, ValidatorResponse, ValidatorSubscription, -}; - -// Setting a long timeout for debug ensures that crypto-heavy operations can still succeed. -#[cfg(debug_assertions)] -pub const REQUEST_TIMEOUT_SECONDS: u64 = 15; - -#[cfg(not(debug_assertions))] -pub const REQUEST_TIMEOUT_SECONDS: u64 = 5; - -#[derive(Clone)] -/// Connects to a remote Lighthouse (or compatible) node via HTTP. -pub struct RemoteBeaconNode<E: EthSpec> { - pub http: HttpClient<E>, -} - -impl<E: EthSpec> RemoteBeaconNode<E> { - /// Uses the default HTTP timeout. - pub fn new(http_endpoint: String) -> Result<Self, String> { - Self::new_with_timeout(http_endpoint, Duration::from_secs(REQUEST_TIMEOUT_SECONDS)) - } - - pub fn new_with_timeout(http_endpoint: String, timeout: Duration) -> Result<Self, String> { - Ok(Self { - http: HttpClient::new(http_endpoint, timeout) - .map_err(|e| format!("Unable to create http client: {:?}", e))?, - }) - } -} - -#[derive(Debug)] -pub enum Error { - /// Unable to parse a URL. Check the server URL. - UrlParseError(url::ParseError), - /// The `reqwest` library returned an error. - ReqwestError(reqwest::Error), - /// There was an error when encoding/decoding an object using serde. - SerdeJsonError(serde_json::Error), - /// The server responded to the request, however it did not return a 200-type success code. - DidNotSucceed { status: StatusCode, body: String }, - /// The request input was invalid. - InvalidInput, -} - -#[derive(Clone)] -pub struct HttpClient<E> { - client: Client, - url: Url, - timeout: Duration, - _phantom: PhantomData<E>, -} - -impl<E: EthSpec> HttpClient<E> { - /// Creates a new instance (without connecting to the node). - pub fn new(server_url: String, timeout: Duration) -> Result<Self, Error> { - Ok(Self { - client: ClientBuilder::new() - .timeout(timeout) - .build() - .expect("should build from static configuration"), - url: Url::parse(&server_url)?, - timeout: Duration::from_secs(15), - _phantom: PhantomData, - }) - } - - pub fn beacon(&self) -> Beacon<E> { - Beacon(self.clone()) - } - - pub fn validator(&self) -> Validator<E> { - Validator(self.clone()) - } - - pub fn spec(&self) -> Spec<E> { - Spec(self.clone()) - } - - pub fn node(&self) -> Node<E> { - Node(self.clone()) - } - - pub fn advanced(&self) -> Advanced<E> { - Advanced(self.clone()) - } - - pub fn consensus(&self) -> Consensus<E> { - Consensus(self.clone()) - } - - fn url(&self, path: &str) -> Result<Url, Error> { - self.url.join(path).map_err(|e| e.into()) - } - - pub async fn json_post<T: Serialize>(&self, url: Url, body: T) -> Result<Response, Error> { - self.client - .post(&url.to_string()) - .json(&body) - .send() - .await - .map_err(Error::from) - } - - pub async fn json_get<T: DeserializeOwned>( - &self, - mut url: Url, - query_pairs: Vec<(String, String)>, - ) -> Result<T, Error> { - query_pairs.into_iter().for_each(|(key, param)| { - url.query_pairs_mut().append_pair(&key, ¶m); - }); - - let response = self - .client - .get(&url.to_string()) - .send() - .await - .map_err(Error::from)?; - - let success = error_for_status(response).await.map_err(Error::from)?; - success.json::<T>().await.map_err(Error::from) - } -} - -/// Returns an `Error` (with a description) if the `response` was not a 200-type success response. -/// -/// Distinct from `Response::error_for_status` because it includes the body of the response as -/// text. This ensures the error message from the server is not discarded. -async fn error_for_status(response: Response) -> Result<Response, Error> { - let status = response.status(); - - if status.is_success() { - Ok(response) - } else { - let text_result = response.text().await; - match text_result { - Err(e) => Err(Error::ReqwestError(e)), - Ok(body) => Err(Error::DidNotSucceed { status, body }), - } - } -} - -#[derive(Debug, PartialEq, Clone)] -pub enum PublishStatus { - /// The object was valid and has been published to the network. - Valid, - /// The object was not valid and may or may not have been published to the network. - Invalid(String), - /// The server responded with an unknown status code. The object may or may not have been - /// published to the network. - Unknown, -} - -impl PublishStatus { - /// Returns `true` if `*self == PublishStatus::Valid`. - pub fn is_valid(&self) -> bool { - *self == PublishStatus::Valid - } -} - -/// Provides the functions on the `/validator` endpoint of the node. -#[derive(Clone)] -pub struct Validator<E>(HttpClient<E>); - -impl<E: EthSpec> Validator<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("validator/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Produces an unsigned attestation. - pub async fn produce_attestation( - &self, - slot: Slot, - committee_index: CommitteeIndex, - ) -> Result<Attestation<E>, Error> { - let query_params = vec![ - ("slot".into(), format!("{}", slot)), - ("committee_index".into(), format!("{}", committee_index)), - ]; - - let client = self.0.clone(); - let url = self.url("attestation")?; - client.json_get(url, query_params).await - } - - /// Produces an aggregate attestation. - pub async fn produce_aggregate_attestation( - &self, - attestation_data: &AttestationData, - ) -> Result<Attestation<E>, Error> { - let query_params = vec![( - "attestation_data".into(), - as_ssz_hex_string(attestation_data), - )]; - - let client = self.0.clone(); - let url = self.url("aggregate_attestation")?; - client.json_get(url, query_params).await - } - - /// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_attestations( - &self, - attestation: Vec<(Attestation<E>, SubnetId)>, - ) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("attestations")?; - let response = client.json_post::<_>(url, attestation).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Posts a list of signed aggregates and proofs to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_aggregate_and_proof( - &self, - signed_aggregate_and_proofs: Vec<SignedAggregateAndProof<E>>, - ) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("aggregate_and_proofs")?; - let response = client - .json_post::<_>(url, signed_aggregate_and_proofs) - .await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Returns the duties required of the given validator pubkeys in the given epoch. - pub async fn get_duties( - &self, - epoch: Epoch, - validator_pubkeys: &[PublicKey], - ) -> Result<Vec<ValidatorDutyBytes>, Error> { - let client = self.0.clone(); - - let bulk_request = ValidatorDutiesRequest { - epoch, - pubkeys: validator_pubkeys - .iter() - .map(|pubkey| pubkey.clone().into()) - .collect(), - }; - - let url = self.url("duties")?; - let response = client.json_post::<_>(url, bulk_request).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Posts a block to the beacon node, expecting it to verify it and publish it to the network. - pub async fn publish_block(&self, block: SignedBeaconBlock<E>) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("block")?; - let response = client.json_post::<_>(url, block).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } - - /// Requests a new (unsigned) block from the beacon node. - pub async fn produce_block( - &self, - slot: Slot, - randao_reveal: Signature, - graffiti: Option<Graffiti>, - ) -> Result<BeaconBlock<E>, Error> { - let client = self.0.clone(); - let url = self.url("block")?; - - let mut query_pairs = vec![ - ("slot".into(), format!("{}", slot.as_u64())), - ("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)), - ]; - - if let Some(graffiti_bytes) = graffiti { - query_pairs.push(("graffiti".into(), as_ssz_hex_string(&graffiti_bytes))); - } - - client.json_get::<BeaconBlock<E>>(url, query_pairs).await - } - - /// Subscribes a list of validators to particular slots for attestation production/publication. - pub async fn subscribe( - &self, - subscriptions: Vec<ValidatorSubscription>, - ) -> Result<PublishStatus, Error> { - let client = self.0.clone(); - let url = self.url("subscribe")?; - let response = client.json_post::<_>(url, subscriptions).await?; - - match response.status() { - StatusCode::OK => Ok(PublishStatus::Valid), - StatusCode::ACCEPTED => Ok(PublishStatus::Invalid( - response.text().await.map_err(Error::from)?, - )), - _ => response - .error_for_status() - .map_err(Error::from) - .map(|_| PublishStatus::Unknown), - } - } -} - -/// Provides the functions on the `/beacon` endpoint of the node. -#[derive(Clone)] -pub struct Beacon<E>(HttpClient<E>); - -impl<E: EthSpec> Beacon<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("beacon/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Returns the genesis time. - pub async fn get_genesis_time(&self) -> Result<u64, Error> { - let client = self.0.clone(); - let url = self.url("genesis_time")?; - client.json_get(url, vec![]).await - } - - /// Returns the genesis validators root. - pub async fn get_genesis_validators_root(&self) -> Result<Hash256, Error> { - let client = self.0.clone(); - let url = self.url("genesis_validators_root")?; - client.json_get(url, vec![]).await - } - - /// Returns the fork at the head of the beacon chain. - pub async fn get_fork(&self) -> Result<Fork, Error> { - let client = self.0.clone(); - let url = self.url("fork")?; - client.json_get(url, vec![]).await - } - - /// Returns info about the head of the canonical beacon chain. - pub async fn get_head(&self) -> Result<CanonicalHeadResponse, Error> { - let client = self.0.clone(); - let url = self.url("head")?; - client.json_get::<CanonicalHeadResponse>(url, vec![]).await - } - - /// Returns the set of known beacon chain head blocks. One of these will be the canonical head. - pub async fn get_heads(&self) -> Result<Vec<HeadBeaconBlock>, Error> { - let client = self.0.clone(); - let url = self.url("heads")?; - client.json_get(url, vec![]).await - } - - /// Returns the block and block root at the given slot. - pub async fn get_block_by_slot( - &self, - slot: Slot, - ) -> Result<(SignedBeaconBlock<E>, Hash256), Error> { - self.get_block("slot".to_string(), format!("{}", slot.as_u64())) - .await - } - - /// Returns the block and block root at the given root. - pub async fn get_block_by_root( - &self, - root: Hash256, - ) -> Result<(SignedBeaconBlock<E>, Hash256), Error> { - self.get_block("root".to_string(), root_as_string(root)) - .await - } - - /// Returns the block and block root at the given slot. - async fn get_block( - &self, - query_key: String, - query_param: String, - ) -> Result<(SignedBeaconBlock<E>, Hash256), Error> { - let client = self.0.clone(); - let url = self.url("block")?; - client - .json_get::<BlockResponse<E>>(url, vec![(query_key, query_param)]) - .await - .map(|response| (response.beacon_block, response.root)) - } - - /// Returns the state and state root at the given slot. - pub async fn get_state_by_slot(&self, slot: Slot) -> Result<(BeaconState<E>, Hash256), Error> { - self.get_state("slot".to_string(), format!("{}", slot.as_u64())) - .await - } - - /// Returns the state and state root at the given root. - pub async fn get_state_by_root( - &self, - root: Hash256, - ) -> Result<(BeaconState<E>, Hash256), Error> { - self.get_state("root".to_string(), root_as_string(root)) - .await - } - - /// Returns the root of the state at the given slot. - pub async fn get_state_root(&self, slot: Slot) -> Result<Hash256, Error> { - let client = self.0.clone(); - let url = self.url("state_root")?; - client - .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - .await - } - - /// Returns the root of the block at the given slot. - pub async fn get_block_root(&self, slot: Slot) -> Result<Hash256, Error> { - let client = self.0.clone(); - let url = self.url("block_root")?; - client - .json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))]) - .await - } - - /// Returns the state and state root at the given slot. - async fn get_state( - &self, - query_key: String, - query_param: String, - ) -> Result<(BeaconState<E>, Hash256), Error> { - let client = self.0.clone(); - let url = self.url("state")?; - client - .json_get::<StateResponse<E>>(url, vec![(query_key, query_param)]) - .await - .map(|response| (response.beacon_state, response.root)) - } - - /// Returns the block and block root at the given slot. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_validators( - &self, - validator_pubkeys: Vec<PublicKey>, - state_root: Option<Hash256>, - ) -> Result<Vec<ValidatorResponse>, Error> { - let client = self.0.clone(); - - let bulk_request = ValidatorRequest { - state_root, - pubkeys: validator_pubkeys - .iter() - .map(|pubkey| pubkey.clone().into()) - .collect(), - }; - - let url = self.url("validators")?; - let response = client.json_post::<_>(url, bulk_request).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Returns all validators. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_all_validators( - &self, - state_root: Option<Hash256>, - ) -> Result<Vec<ValidatorResponse>, Error> { - let client = self.0.clone(); - - let query_params = if let Some(state_root) = state_root { - vec![("state_root".into(), root_as_string(state_root))] - } else { - vec![] - }; - - let url = self.url("validators/all")?; - client.json_get(url, query_params).await - } - - /// Returns the active validators. - /// - /// If `state_root` is `Some`, the query will use the given state instead of the default - /// canonical head state. - pub async fn get_active_validators( - &self, - state_root: Option<Hash256>, - ) -> Result<Vec<ValidatorResponse>, Error> { - let client = self.0.clone(); - - let query_params = if let Some(state_root) = state_root { - vec![("state_root".into(), root_as_string(state_root))] - } else { - vec![] - }; - - let url = self.url("validators/active")?; - client.json_get(url, query_params).await - } - - /// Returns committees at the given epoch. - pub async fn get_committees(&self, epoch: Epoch) -> Result<Vec<Committee>, Error> { - let client = self.0.clone(); - - let url = self.url("committees")?; - client - .json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))]) - .await - } - - pub async fn proposer_slashing( - &self, - proposer_slashing: ProposerSlashing, - ) -> Result<bool, Error> { - let client = self.0.clone(); - - let url = self.url("proposer_slashing")?; - let response = client.json_post::<_>(url, proposer_slashing).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - pub async fn attester_slashing( - &self, - attester_slashing: AttesterSlashing<E>, - ) -> Result<bool, Error> { - let client = self.0.clone(); - - let url = self.url("attester_slashing")?; - let response = client.json_post::<_>(url, attester_slashing).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } -} - -/// Provides the functions on the `/spec` endpoint of the node. -#[derive(Clone)] -pub struct Spec<E>(HttpClient<E>); - -impl<E: EthSpec> Spec<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("spec/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - pub async fn get_eth2_config(&self) -> Result<Eth2Config, Error> { - let client = self.0.clone(); - let url = self.url("eth2_config")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/node` endpoint of the node. -#[derive(Clone)] -pub struct Node<E>(HttpClient<E>); - -impl<E: EthSpec> Node<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("node/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - pub async fn get_version(&self) -> Result<String, Error> { - let client = self.0.clone(); - let url = self.url("version")?; - client.json_get(url, vec![]).await - } - - pub async fn get_health(&self) -> Result<Health, Error> { - let client = self.0.clone(); - let url = self.url("health")?; - client.json_get(url, vec![]).await - } - - pub async fn syncing_status(&self) -> Result<SyncingResponse, Error> { - let client = self.0.clone(); - let url = self.url("syncing")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/advanced` endpoint of the node. -#[derive(Clone)] -pub struct Advanced<E>(HttpClient<E>); - -impl<E: EthSpec> Advanced<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("advanced/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Gets the core `ProtoArray` struct from the node. - pub async fn get_fork_choice(&self) -> Result<ProtoArray, Error> { - let client = self.0.clone(); - let url = self.url("fork_choice")?; - client.json_get(url, vec![]).await - } - - /// Gets the core `PersistedOperationPool` struct from the node. - pub async fn get_operation_pool(&self) -> Result<PersistedOperationPool<E>, Error> { - let client = self.0.clone(); - let url = self.url("operation_pool")?; - client.json_get(url, vec![]).await - } -} - -/// Provides the functions on the `/consensus` endpoint of the node. -#[derive(Clone)] -pub struct Consensus<E>(HttpClient<E>); - -impl<E: EthSpec> Consensus<E> { - fn url(&self, path: &str) -> Result<Url, Error> { - self.0 - .url("consensus/") - .and_then(move |url| url.join(path).map_err(Error::from)) - .map_err(Into::into) - } - - /// Gets a `IndividualVote` for each of the given `pubkeys`. - pub async fn get_individual_votes( - &self, - epoch: Epoch, - pubkeys: Vec<PublicKeyBytes>, - ) -> Result<IndividualVotesResponse, Error> { - let client = self.0.clone(); - let req_body = IndividualVotesRequest { epoch, pubkeys }; - - let url = self.url("individual_votes")?; - let response = client.json_post::<_>(url, req_body).await?; - let success = error_for_status(response).await.map_err(Error::from)?; - success.json().await.map_err(Error::from) - } - - /// Gets a `VoteCount` for the given `epoch`. - pub async fn get_vote_count(&self, epoch: Epoch) -> Result<IndividualVotesResponse, Error> { - let client = self.0.clone(); - let query_params = vec![("epoch".into(), format!("{}", epoch.as_u64()))]; - let url = self.url("vote_count")?; - client.json_get(url, query_params).await - } -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse<T: EthSpec> { - pub beacon_block: SignedBeaconBlock<T>, - pub root: Hash256, -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse<T: EthSpec> { - pub beacon_state: BeaconState<T>, - pub root: Hash256, -} - -fn root_as_string(root: Hash256) -> String { - format!("0x{:?}", root) -} - -fn as_ssz_hex_string<T: Encode>(item: &T) -> String { - format!("0x{}", hex::encode(item.as_ssz_bytes())) -} - -impl From<reqwest::Error> for Error { - fn from(e: reqwest::Error) -> Error { - Error::ReqwestError(e) - } -} - -impl From<url::ParseError> for Error { - fn from(e: url::ParseError) -> Error { - Error::UrlParseError(e) - } -} - -impl From<serde_json::Error> for Error { - fn from(e: serde_json::Error) -> Error { - Error::SerdeJsonError(e) - } -} diff --git a/common/rest_types/src/api_error.rs b/common/rest_types/src/api_error.rs deleted file mode 100644 index 1eac8d4a4..000000000 --- a/common/rest_types/src/api_error.rs +++ /dev/null @@ -1,99 +0,0 @@ -use hyper::{Body, Response, StatusCode}; -use std::error::Error as StdError; - -#[derive(PartialEq, Debug, Clone)] -pub enum ApiError { - MethodNotAllowed(String), - ServerError(String), - NotImplemented(String), - BadRequest(String), - NotFound(String), - UnsupportedType(String), - ImATeapot(String), // Just in case. - ProcessingError(String), // A 202 error, for when a block/attestation cannot be processed, but still transmitted. - InvalidHeaderValue(String), -} - -pub type ApiResult = Result<Response<Body>, ApiError>; - -impl ApiError { - pub fn status_code(self) -> (StatusCode, String) { - match self { - ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), - ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), - ApiError::BadRequest(desc) => (StatusCode::BAD_REQUEST, desc), - ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), - ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc), - ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), - ApiError::ProcessingError(desc) => (StatusCode::ACCEPTED, desc), - ApiError::InvalidHeaderValue(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - } - } -} - -impl Into<Response<Body>> for ApiError { - fn into(self) -> Response<Body> { - let (status_code, desc) = self.status_code(); - Response::builder() - .status(status_code) - .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(desc)) - .expect("Response should always be created.") - } -} - -impl From<store::Error> for ApiError { - fn from(e: store::Error) -> ApiError { - ApiError::ServerError(format!("Database error: {:?}", e)) - } -} - -impl From<types::BeaconStateError> for ApiError { - fn from(e: types::BeaconStateError) -> ApiError { - ApiError::ServerError(format!("BeaconState error: {:?}", e)) - } -} - -impl From<beacon_chain::BeaconChainError> for ApiError { - fn from(e: beacon_chain::BeaconChainError) -> ApiError { - ApiError::ServerError(format!("BeaconChainError error: {:?}", e)) - } -} - -impl From<state_processing::per_slot_processing::Error> for ApiError { - fn from(e: state_processing::per_slot_processing::Error) -> ApiError { - ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e)) - } -} - -impl From<hyper::error::Error> for ApiError { - fn from(e: hyper::error::Error) -> ApiError { - ApiError::ServerError(format!("Networking error: {:?}", e)) - } -} - -impl From<std::io::Error> for ApiError { - fn from(e: std::io::Error) -> ApiError { - ApiError::ServerError(format!("IO error: {:?}", e)) - } -} - -impl From<hyper::header::InvalidHeaderValue> for ApiError { - fn from(e: hyper::header::InvalidHeaderValue) -> ApiError { - ApiError::InvalidHeaderValue(format!("Invalid CORS header value: {:?}", e)) - } -} - -impl StdError for ApiError { - fn cause(&self) -> Option<&dyn StdError> { - None - } -} - -impl std::fmt::Display for ApiError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let status = self.clone().status_code(); - write!(f, "{:?}: {:?}", status.0, status.1) - } -} diff --git a/common/rest_types/src/beacon.rs b/common/rest_types/src/beacon.rs deleted file mode 100644 index 0a141ea28..000000000 --- a/common/rest_types/src/beacon.rs +++ /dev/null @@ -1,65 +0,0 @@ -//! A collection of REST API types for interaction with the beacon node. - -use bls::PublicKeyBytes; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::beacon_state::EthSpec; -use types::{BeaconState, CommitteeIndex, Hash256, SignedBeaconBlock, Slot, Validator}; - -/// Information about a block that is at the head of a chain. May or may not represent the -/// canonical head. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct HeadBeaconBlock { - pub beacon_block_root: Hash256, - pub beacon_block_slot: Slot, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse<T: EthSpec> { - pub root: Hash256, - pub beacon_block: SignedBeaconBlock<T>, -} - -/// Information about the block and state that are at head of the beacon chain. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct CanonicalHeadResponse { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub finalized_slot: Slot, - pub finalized_block_root: Hash256, - pub justified_slot: Slot, - pub justified_block_root: Hash256, - pub previous_justified_slot: Slot, - pub previous_justified_block_root: Hash256, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorResponse { - pub pubkey: PublicKeyBytes, - pub validator_index: Option<usize>, - pub balance: Option<u64>, - pub validator: Option<Validator>, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorRequest { - /// If set to `None`, uses the canonical head state. - pub state_root: Option<Hash256>, - pub pubkeys: Vec<PublicKeyBytes>, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct Committee { - pub slot: Slot, - pub index: CommitteeIndex, - pub committee: Vec<usize>, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse<T: EthSpec> { - pub root: Hash256, - pub beacon_state: BeaconState<T>, -} diff --git a/common/rest_types/src/consensus.rs b/common/rest_types/src/consensus.rs deleted file mode 100644 index 519b1ae24..000000000 --- a/common/rest_types/src/consensus.rs +++ /dev/null @@ -1,66 +0,0 @@ -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::ValidatorStatus; -use types::{Epoch, PublicKeyBytes}; - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesRequest { - pub epoch: Epoch, - pub pubkeys: Vec<PublicKeyBytes>, -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVote { - /// True if the validator has been slashed, ever. - pub is_slashed: bool, - /// True if the validator can withdraw in the current epoch. - pub is_withdrawable_in_current_epoch: bool, - /// True if the validator was active in the state's _current_ epoch. - pub is_active_in_current_epoch: bool, - /// True if the validator was active in the state's _previous_ epoch. - pub is_active_in_previous_epoch: bool, - /// The validator's effective balance in the _current_ epoch. - pub current_epoch_effective_balance_gwei: u64, - /// True if the validator had an attestation included in the _current_ epoch. - pub is_current_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _current_ - /// epoch matches the block root known to the state. - pub is_current_epoch_target_attester: bool, - /// True if the validator had an attestation included in the _previous_ epoch. - pub is_previous_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _previous_ - /// epoch matches the block root known to the state. - pub is_previous_epoch_target_attester: bool, - /// True if the validator's beacon block root attestation in the _previous_ epoch at the - /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. - pub is_previous_epoch_head_attester: bool, -} - -impl Into<IndividualVote> for ValidatorStatus { - fn into(self) -> IndividualVote { - IndividualVote { - is_slashed: self.is_slashed, - is_withdrawable_in_current_epoch: self.is_withdrawable_in_current_epoch, - is_active_in_current_epoch: self.is_active_in_current_epoch, - is_active_in_previous_epoch: self.is_active_in_previous_epoch, - current_epoch_effective_balance_gwei: self.current_epoch_effective_balance, - is_current_epoch_attester: self.is_current_epoch_attester, - is_current_epoch_target_attester: self.is_current_epoch_target_attester, - is_previous_epoch_attester: self.is_previous_epoch_attester, - is_previous_epoch_target_attester: self.is_previous_epoch_target_attester, - is_previous_epoch_head_attester: self.is_previous_epoch_head_attester, - } - } -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesResponse { - /// The epoch which is considered the "current" epoch. - pub epoch: Epoch, - /// The validators public key. - pub pubkey: PublicKeyBytes, - /// The index of the validator in state.validators. - pub validator_index: Option<usize>, - /// Voting statistics for the validator, if they voted in the given epoch. - pub vote: Option<IndividualVote>, -} diff --git a/common/rest_types/src/handler.rs b/common/rest_types/src/handler.rs deleted file mode 100644 index cbbcd73b1..000000000 --- a/common/rest_types/src/handler.rs +++ /dev/null @@ -1,247 +0,0 @@ -use crate::{ApiError, ApiResult}; -use environment::TaskExecutor; -use hyper::header; -use hyper::{Body, Request, Response, StatusCode}; -use serde::Deserialize; -use serde::Serialize; -use ssz::Encode; - -/// Defines the encoding for the API. -#[derive(Clone, Serialize, Deserialize, Copy)] -pub enum ApiEncodingFormat { - JSON, - YAML, - SSZ, -} - -impl ApiEncodingFormat { - pub fn get_content_type(&self) -> &str { - match self { - ApiEncodingFormat::JSON => "application/json", - ApiEncodingFormat::YAML => "application/yaml", - ApiEncodingFormat::SSZ => "application/ssz", - } - } -} - -impl From<&str> for ApiEncodingFormat { - fn from(f: &str) -> ApiEncodingFormat { - match f { - "application/yaml" => ApiEncodingFormat::YAML, - "application/ssz" => ApiEncodingFormat::SSZ, - _ => ApiEncodingFormat::JSON, - } - } -} - -/// Provides a HTTP request handler with Lighthouse-specific functionality. -pub struct Handler<T> { - executor: TaskExecutor, - req: Request<()>, - body: Body, - ctx: T, - encoding: ApiEncodingFormat, - allow_body: bool, -} - -impl<T: Clone + Send + Sync + 'static> Handler<T> { - /// Start handling a new request. - pub fn new(req: Request<Body>, ctx: T, executor: TaskExecutor) -> Result<Self, ApiError> { - let (req_parts, body) = req.into_parts(); - let req = Request::from_parts(req_parts, ()); - - let accept_header: String = req - .headers() - .get(header::ACCEPT) - .map_or(Ok(""), |h| h.to_str()) - .map_err(|e| { - ApiError::BadRequest(format!( - "The Accept header contains invalid characters: {:?}", - e - )) - }) - .map(String::from)?; - - Ok(Self { - executor, - req, - body, - ctx, - allow_body: false, - encoding: ApiEncodingFormat::from(accept_header.as_str()), - }) - } - - /// The default behaviour is to return an error if any body is supplied in the request. Calling - /// this function disables that error. - pub fn allow_body(mut self) -> Self { - self.allow_body = true; - self - } - - /// Return a simple static value. - /// - /// Does not use the blocking executor. - pub async fn static_value<V>(self, value: V) -> Result<HandledRequest<V>, ApiError> { - // Always check and disallow a body for a static value. - let _ = Self::get_body(self.body, false).await?; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Calls `func` in-line, on the core executor. - /// - /// This should only be used for very fast tasks. - pub async fn in_core_task<F, V>(self, func: F) -> Result<HandledRequest<V>, ApiError> - where - V: Send + Sync + 'static, - F: Fn(Request<Vec<u8>>, T) -> Result<V, ApiError> + Send + Sync + 'static, - { - let body = Self::get_body(self.body, self.allow_body).await?; - let (req_parts, _) = self.req.into_parts(); - let req = Request::from_parts(req_parts, body); - - let value = func(req, self.ctx)?; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Spawns `func` on the blocking executor. - /// - /// This method is suitable for handling long-running or intensive tasks. - pub async fn in_blocking_task<F, V>(self, func: F) -> Result<HandledRequest<V>, ApiError> - where - V: Send + Sync + 'static, - F: Fn(Request<Vec<u8>>, T) -> Result<V, ApiError> + Send + Sync + 'static, - { - let ctx = self.ctx; - let body = Self::get_body(self.body, self.allow_body).await?; - let (req_parts, _) = self.req.into_parts(); - let req = Request::from_parts(req_parts, body); - - let value = self - .executor - .clone() - .handle - .spawn_blocking(move || func(req, ctx)) - .await - .map_err(|e| { - ApiError::ServerError(format!( - "Failed to get blocking join handle: {}", - e.to_string() - )) - })??; - - Ok(HandledRequest { - value, - encoding: self.encoding, - }) - } - - /// Call `func`, then return a response that is suitable for an SSE stream. - pub async fn sse_stream<F>(self, func: F) -> ApiResult - where - F: Fn(Request<()>, T) -> Result<Body, ApiError>, - { - let body = func(self.req, self.ctx)?; - - Response::builder() - .status(200) - .header("Content-Type", "text/event-stream") - .header("Connection", "Keep-Alive") - .header("Cache-Control", "no-cache") - .header("Access-Control-Allow-Origin", "*") - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } - - /// Downloads the bytes for `body`. - async fn get_body(body: Body, allow_body: bool) -> Result<Vec<u8>, ApiError> { - let bytes = hyper::body::to_bytes(body) - .await - .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?; - - if !allow_body && !bytes[..].is_empty() { - Err(ApiError::BadRequest( - "The request body must be empty".to_string(), - )) - } else { - Ok(bytes.into_iter().collect()) - } - } -} - -/// A request that has been "handled" and now a result (`value`) needs to be serialize and -/// returned. -pub struct HandledRequest<V> { - encoding: ApiEncodingFormat, - value: V, -} - -impl HandledRequest<String> { - /// Simple encode a string as utf-8. - pub fn text_encoding(self) -> ApiResult { - Response::builder() - .status(StatusCode::OK) - .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(self.value)) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } -} - -impl<V: Serialize + Encode> HandledRequest<V> { - /// Suitable for all items which implement `serde` and `ssz`. - pub fn all_encodings(self) -> ApiResult { - match self.encoding { - ApiEncodingFormat::SSZ => Response::builder() - .status(StatusCode::OK) - .header("content-type", "application/ssz") - .body(Body::from(self.value.as_ssz_bytes())) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))), - _ => self.serde_encodings(), - } - } -} - -impl<V: Serialize> HandledRequest<V> { - /// Suitable for items which only implement `serde`. - pub fn serde_encodings(self) -> ApiResult { - let (body, content_type) = match self.encoding { - ApiEncodingFormat::JSON => ( - Body::from(serde_json::to_string(&self.value).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as JSON: {:?}", - e - )) - })?), - "application/json", - ), - ApiEncodingFormat::SSZ => { - return Err(ApiError::UnsupportedType( - "Response cannot be encoded as SSZ.".into(), - )); - } - ApiEncodingFormat::YAML => ( - Body::from(serde_yaml::to_string(&self.value).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as YAML: {:?}", - e - )) - })?), - "application/yaml", - ), - }; - - Response::builder() - .status(StatusCode::OK) - .header("content-type", content_type) - .body(body) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } -} diff --git a/common/rest_types/src/lib.rs b/common/rest_types/src/lib.rs deleted file mode 100644 index 1bedd1cad..000000000 --- a/common/rest_types/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! A collection of types used to pass data across the rest HTTP API. -//! -//! This is primarily used by the validator client and the beacon node rest API. - -mod api_error; -mod beacon; -mod consensus; -mod handler; -mod node; -mod validator; - -pub use api_error::{ApiError, ApiResult}; -pub use beacon::{ - BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, - ValidatorRequest, ValidatorResponse, -}; -pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse}; -pub use handler::{ApiEncodingFormat, Handler}; -pub use node::{Health, SyncingResponse, SyncingStatus}; -pub use validator::{ - ValidatorDutiesRequest, ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription, -}; diff --git a/common/rest_types/src/node.rs b/common/rest_types/src/node.rs deleted file mode 100644 index ca98645cc..000000000 --- a/common/rest_types/src/node.rs +++ /dev/null @@ -1,103 +0,0 @@ -//! Collection of types for the /node HTTP -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::Slot; - -#[cfg(target_os = "linux")] -use {procinfo::pid, psutil::process::Process}; - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -/// The current syncing status of the node. -pub struct SyncingStatus { - /// The starting slot of sync. - /// - /// For a finalized sync, this is the start slot of the current finalized syncing - /// chain. - /// - /// For head sync this is the last finalized slot. - pub starting_slot: Slot, - /// The current slot. - pub current_slot: Slot, - /// The highest known slot. For the current syncing chain. - /// - /// For a finalized sync, the target finalized slot. - /// For head sync, this is the highest known slot of all head chains. - pub highest_slot: Slot, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -/// The response for the /node/syncing HTTP GET. -pub struct SyncingResponse { - /// Is the node syncing. - pub is_syncing: bool, - /// The current sync status. - pub sync_status: SyncingStatus, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -/// Reports on the health of the Lighthouse instance. -pub struct Health { - /// The pid of this process. - pub pid: u32, - /// The number of threads used by this pid. - pub pid_num_threads: i32, - /// The total resident memory used by this pid. - pub pid_mem_resident_set_size: u64, - /// The total virtual memory used by this pid. - pub pid_mem_virtual_memory_size: u64, - /// Total virtual memory on the system - pub sys_virt_mem_total: u64, - /// Total virtual memory available for new processes. - pub sys_virt_mem_available: u64, - /// Total virtual memory used on the system - pub sys_virt_mem_used: u64, - /// Total virtual memory not used on the system - pub sys_virt_mem_free: u64, - /// Percentage of virtual memory used on the system - pub sys_virt_mem_percent: f32, - /// System load average over 1 minute. - pub sys_loadavg_1: f64, - /// System load average over 5 minutes. - pub sys_loadavg_5: f64, - /// System load average over 15 minutes. - pub sys_loadavg_15: f64, -} - -impl Health { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result<Self, String> { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result<Self, String> { - let process = - Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; - - let process_mem = process - .memory_info() - .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - - let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?; - - let vm = psutil::memory::virtual_memory() - .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - let loadavg = - psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - - Ok(Self { - pid: process.pid(), - pid_num_threads: stat.num_threads, - pid_mem_resident_set_size: process_mem.rss(), - pid_mem_virtual_memory_size: process_mem.vms(), - sys_virt_mem_total: vm.total(), - sys_virt_mem_available: vm.available(), - sys_virt_mem_used: vm.used(), - sys_virt_mem_free: vm.free(), - sys_virt_mem_percent: vm.percent(), - sys_loadavg_1: loadavg.one, - sys_loadavg_5: loadavg.five, - sys_loadavg_15: loadavg.fifteen, - }) - } -} diff --git a/common/rest_types/src/validator.rs b/common/rest_types/src/validator.rs deleted file mode 100644 index 2b0f07729..000000000 --- a/common/rest_types/src/validator.rs +++ /dev/null @@ -1,103 +0,0 @@ -use bls::{PublicKey, PublicKeyBytes}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use types::{CommitteeIndex, Epoch, Slot}; - -/// A Validator duty with the validator public key represented a `PublicKeyBytes`. -pub type ValidatorDutyBytes = ValidatorDutyBase<PublicKeyBytes>; -/// A validator duty with the pubkey represented as a `PublicKey`. -pub type ValidatorDuty = ValidatorDutyBase<PublicKey>; - -// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots` -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -pub struct ValidatorDutyBase<T> { - /// The validator's BLS public key, uniquely identifying them. - pub validator_pubkey: T, - /// The validator's index in `state.validators` - pub validator_index: Option<u64>, - /// The slot at which the validator must attest. - pub attestation_slot: Option<Slot>, - /// The index of the committee within `slot` of which the validator is a member. - pub attestation_committee_index: Option<CommitteeIndex>, - /// The position of the validator in the committee. - pub attestation_committee_position: Option<usize>, - /// The committee count at `attestation_slot`. - pub committee_count_at_slot: Option<u64>, - /// The slots in which a validator must propose a block (can be empty). - /// - /// Should be set to `None` when duties are not yet known (before the current epoch). - pub block_proposal_slots: Option<Vec<Slot>>, - /// This provides the modulo: `max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)` - /// which allows the validator client to determine if this duty requires the validator to be - /// aggregate attestations. - pub aggregator_modulo: Option<u64>, -} - -impl<T> ValidatorDutyBase<T> { - /// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`. - pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool - where - T: PartialEq, - { - self.validator_pubkey == other.validator_pubkey - && self.validator_index == other.validator_index - && self.attestation_slot == other.attestation_slot - && self.attestation_committee_index == other.attestation_committee_index - && self.attestation_committee_position == other.attestation_committee_position - && self.committee_count_at_slot == other.committee_count_at_slot - && self.aggregator_modulo == other.aggregator_modulo - } -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct ValidatorDutiesRequest { - pub epoch: Epoch, - pub pubkeys: Vec<PublicKeyBytes>, -} - -/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation -/// duties. -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct ValidatorSubscription { - /// The validators index. - pub validator_index: u64, - /// The index of the committee within `slot` of which the validator is a member. Used by the - /// beacon node to quickly evaluate the associated `SubnetId`. - pub attestation_committee_index: CommitteeIndex, - /// The slot in which to subscribe. - pub slot: Slot, - /// Committee count at slot to subscribe. - pub committee_count_at_slot: u64, - /// If true, the validator is an aggregator and the beacon node should aggregate attestations - /// for this slot. - pub is_aggregator: bool, -} - -#[cfg(test)] -mod test { - use super::*; - use bls::SecretKey; - - #[test] - fn eq_ignoring_proposal_slots() { - let validator_pubkey = SecretKey::deserialize(&[1; 32]).unwrap().public_key(); - - let duty1 = ValidatorDuty { - validator_pubkey, - validator_index: Some(10), - attestation_slot: Some(Slot::new(50)), - attestation_committee_index: Some(2), - attestation_committee_position: Some(6), - committee_count_at_slot: Some(4), - block_proposal_slots: None, - aggregator_modulo: Some(99), - }; - let duty2 = ValidatorDuty { - block_proposal_slots: Some(vec![Slot::new(42), Slot::new(45)]), - ..duty1.clone() - }; - assert_ne!(duty1, duty2); - assert!(duty1.eq_ignoring_proposal_slots(&duty2)); - assert!(duty2.eq_ignoring_proposal_slots(&duty1)); - } -} diff --git a/common/slot_clock/src/lib.rs b/common/slot_clock/src/lib.rs index 41c847498..0fe1bedfe 100644 --- a/common/slot_clock/src/lib.rs +++ b/common/slot_clock/src/lib.rs @@ -24,6 +24,16 @@ pub trait SlotClock: Send + Sync + Sized { /// Returns the slot at this present time. fn now(&self) -> Option<Slot>; + /// Returns the slot at this present time if genesis has happened. Otherwise, returns the + /// genesis slot. Returns `None` if there is an error reading the clock. + fn now_or_genesis(&self) -> Option<Slot> { + if self.is_prior_to_genesis()? { + Some(self.genesis_slot()) + } else { + self.now() + } + } + /// Indicates if the current time is prior to genesis time. /// /// Returns `None` if the system clock cannot be read. diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml new file mode 100644 index 000000000..ec0f2cfbf --- /dev/null +++ b/common/task_executor/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "task_executor" +version = "0.1.0" +authors = ["Sigma Prime <contact@sigmaprime.io>"] +edition = "2018" + +[dependencies] +tokio = "0.2.22" +slog = "2.5.2" +futures = "0.3.5" +exit-future = "0.2.0" +lazy_static = "1.4.0" +lighthouse_metrics = { path = "../lighthouse_metrics" } diff --git a/lighthouse/environment/src/executor.rs b/common/task_executor/src/lib.rs similarity index 91% rename from lighthouse/environment/src/executor.rs rename to common/task_executor/src/lib.rs index b5f415187..9f819dd2a 100644 --- a/lighthouse/environment/src/executor.rs +++ b/common/task_executor/src/lib.rs @@ -1,23 +1,24 @@ -use crate::metrics; +mod metrics; + use futures::channel::mpsc::Sender; use futures::prelude::*; -use slog::{debug, trace}; +use slog::{debug, o, trace}; use tokio::runtime::Handle; /// A wrapper over a runtime handle which can spawn async and blocking tasks. #[derive(Clone)] pub struct TaskExecutor { /// The handle to the runtime on which tasks are spawned - pub handle: Handle, + handle: Handle, /// The receiver exit future which on receiving shuts down the task - pub(crate) exit: exit_future::Exit, + exit: exit_future::Exit, /// Sender given to tasks, so that if they encounter a state in which execution cannot /// continue they can request that everything shuts down. /// /// The task must provide a reason for shutting down. - pub(crate) signal_tx: Sender<&'static str>, + signal_tx: Sender<&'static str>, - pub(crate) log: slog::Logger, + log: slog::Logger, } impl TaskExecutor { @@ -39,6 +40,16 @@ impl TaskExecutor { } } + /// Clones the task executor adding a service name. + pub fn clone_with_name(&self, service_name: String) -> Self { + TaskExecutor { + handle: self.handle.clone(), + exit: self.exit.clone(), + signal_tx: self.signal_tx.clone(), + log: self.log.new(o!("service" => service_name)), + } + } + /// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit`. The task is canceled /// when the corresponding exit_future `Signal` is fired/dropped. /// @@ -148,9 +159,3 @@ impl TaskExecutor { &self.log } } - -impl discv5::Executor for TaskExecutor { - fn spawn(&self, future: std::pin::Pin<Box<dyn Future<Output = ()> + Send>>) { - self.spawn(future, "discv5") - } -} diff --git a/lighthouse/environment/src/metrics.rs b/common/task_executor/src/metrics.rs similarity index 100% rename from lighthouse/environment/src/metrics.rs rename to common/task_executor/src/metrics.rs diff --git a/common/test_random_derive/Cargo.toml b/common/test_random_derive/Cargo.toml index a02cb7fda..0186ab326 100644 --- a/common/test_random_derive/Cargo.toml +++ b/common/test_random_derive/Cargo.toml @@ -9,5 +9,5 @@ description = "Procedural derive macros for implementation of TestRandom trait" proc-macro = true [dependencies] -syn = "1.0.18" -quote = "1.0.4" +syn = "1.0.42" +quote = "1.0.7" diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 9a833d2f5..df8eed3d0 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -13,10 +13,10 @@ insecure_keys = [] bls = { path = "../../crypto/bls" } eth2_keystore = { path = "../../crypto/eth2_keystore" } types = { path = "../../consensus/types" } -rand = "0.7.2" +rand = "0.7.3" deposit_contract = { path = "../deposit_contract" } -rayon = "1.3.0" -tree_hash = { path = "../../consensus/tree_hash" } +rayon = "1.4.1" +tree_hash = "0.1.1" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } hex = "0.4.2" diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 4cdb75a5f..ad1b01ee4 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -40,6 +40,7 @@ pub enum Error { UninitializedWithdrawalKeystore, #[cfg(feature = "insecure_keys")] InsecureKeysError(String), + MissingPasswordDir, } impl From<KeystoreError> for Error { @@ -51,7 +52,7 @@ impl From<KeystoreError> for Error { /// A builder for creating a `ValidatorDir`. pub struct Builder<'a> { base_validators_dir: PathBuf, - password_dir: PathBuf, + password_dir: Option<PathBuf>, pub(crate) voting_keystore: Option<(Keystore, PlainText)>, pub(crate) withdrawal_keystore: Option<(Keystore, PlainText)>, store_withdrawal_keystore: bool, @@ -60,10 +61,10 @@ pub struct Builder<'a> { impl<'a> Builder<'a> { /// Instantiate a new builder. - pub fn new(base_validators_dir: PathBuf, password_dir: PathBuf) -> Self { + pub fn new(base_validators_dir: PathBuf) -> Self { Self { base_validators_dir, - password_dir, + password_dir: None, voting_keystore: None, withdrawal_keystore: None, store_withdrawal_keystore: true, @@ -71,6 +72,12 @@ impl<'a> Builder<'a> { } } + /// Supply a directory in which to store the passwords for the validator keystores. + pub fn password_dir<P: Into<PathBuf>>(mut self, password_dir: P) -> Self { + self.password_dir = Some(password_dir.into()); + self + } + /// Build the `ValidatorDir` use the given `keystore` which can be unlocked with `password`. /// /// The builder will not necessarily check that `password` can unlock `keystore`. @@ -215,26 +222,35 @@ impl<'a> Builder<'a> { } } - // Only the withdrawal keystore if explicitly required. - if self.store_withdrawal_keystore { - // Write the withdrawal password to file. - write_password_to_file( - self.password_dir - .join(withdrawal_keypair.pk.to_hex_string()), - withdrawal_password.as_bytes(), - )?; + if self.password_dir.is_none() && self.store_withdrawal_keystore { + return Err(Error::MissingPasswordDir); + } - // Write the withdrawal keystore to file. - write_keystore_to_file(dir.join(WITHDRAWAL_KEYSTORE_FILE), &withdrawal_keystore)?; + if let Some(password_dir) = self.password_dir.as_ref() { + // Only the withdrawal keystore if explicitly required. + if self.store_withdrawal_keystore { + // Write the withdrawal password to file. + write_password_to_file( + password_dir.join(withdrawal_keypair.pk.to_hex_string()), + withdrawal_password.as_bytes(), + )?; + + // Write the withdrawal keystore to file. + write_keystore_to_file( + dir.join(WITHDRAWAL_KEYSTORE_FILE), + &withdrawal_keystore, + )?; + } } } - // Write the voting password to file. - write_password_to_file( - self.password_dir - .join(format!("0x{}", voting_keystore.pubkey())), - voting_password.as_bytes(), - )?; + if let Some(password_dir) = self.password_dir.as_ref() { + // Write the voting password to file. + write_password_to_file( + password_dir.join(format!("0x{}", voting_keystore.pubkey())), + voting_password.as_bytes(), + )?; + } // Write the voting keystore to file. write_keystore_to_file(dir.join(VOTING_KEYSTORE_FILE), &voting_keystore)?; diff --git a/common/validator_dir/src/insecure_keys.rs b/common/validator_dir/src/insecure_keys.rs index 65bf036d1..8043db749 100644 --- a/common/validator_dir/src/insecure_keys.rs +++ b/common/validator_dir/src/insecure_keys.rs @@ -73,7 +73,8 @@ pub fn build_deterministic_validator_dirs( indices: &[usize], ) -> Result<(), String> { for &i in indices { - Builder::new(validators_dir.clone(), password_dir.clone()) + Builder::new(validators_dir.clone()) + .password_dir(password_dir.clone()) .insecure_voting_keypair(i) .map_err(|e| format!("Unable to generate insecure keypair: {:?}", e))? .store_withdrawal_keystore(false) diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index 23cb3a8c1..109566f66 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -129,6 +129,11 @@ impl ValidatorDir { &self.dir } + /// Returns the path to the voting keystore JSON file. + pub fn voting_keystore_path(&self) -> PathBuf { + self.dir.join(VOTING_KEYSTORE_FILE) + } + /// Attempts to read the keystore in `self.dir` and decrypt the keypair using a password file /// in `password_dir`. /// diff --git a/common/validator_dir/tests/tests.rs b/common/validator_dir/tests/tests.rs index 6e9bdc2b9..fd1b79f14 100644 --- a/common/validator_dir/tests/tests.rs +++ b/common/validator_dir/tests/tests.rs @@ -78,13 +78,11 @@ impl Harness { * Build the `ValidatorDir`. */ - let builder = Builder::new( - self.validators_dir.path().into(), - self.password_dir.path().into(), - ) - // Note: setting the withdrawal keystore here ensure that it can get overriden by later - // calls to `random_withdrawal_keystore`. - .store_withdrawal_keystore(config.store_withdrawal_keystore); + let builder = Builder::new(self.validators_dir.path().into()) + .password_dir(self.password_dir.path()) + // Note: setting the withdrawal keystore here ensure that it can get replaced by + // further calls to `random_withdrawal_keystore`. + .store_withdrawal_keystore(config.store_withdrawal_keystore); let builder = if config.random_voting_keystore { builder.random_voting_keystore().unwrap() @@ -208,13 +206,11 @@ fn without_voting_keystore() { let harness = Harness::new(); assert!(matches!( - Builder::new( - harness.validators_dir.path().into(), - harness.password_dir.path().into(), - ) - .random_withdrawal_keystore() - .unwrap() - .build(), + Builder::new(harness.validators_dir.path().into(),) + .password_dir(harness.password_dir.path()) + .random_withdrawal_keystore() + .unwrap() + .build(), Err(BuilderError::UninitializedVotingKeystore) )) } @@ -225,26 +221,22 @@ fn without_withdrawal_keystore() { let spec = &MainnetEthSpec::default_spec(); // Should build without withdrawal keystore if not storing the it or creating eth1 data. - Builder::new( - harness.validators_dir.path().into(), - harness.password_dir.path().into(), - ) - .random_voting_keystore() - .unwrap() - .store_withdrawal_keystore(false) - .build() - .unwrap(); + Builder::new(harness.validators_dir.path().into()) + .password_dir(harness.password_dir.path()) + .random_voting_keystore() + .unwrap() + .store_withdrawal_keystore(false) + .build() + .unwrap(); assert!( matches!( - Builder::new( - harness.validators_dir.path().into(), - harness.password_dir.path().into(), - ) - .random_voting_keystore() - .unwrap() - .store_withdrawal_keystore(true) - .build(), + Builder::new(harness.validators_dir.path().into(),) + .password_dir(harness.password_dir.path()) + .random_voting_keystore() + .unwrap() + .store_withdrawal_keystore(true) + .build(), Err(BuilderError::UninitializedWithdrawalKeystore) ), "storing the keystore requires keystore" @@ -252,14 +244,12 @@ fn without_withdrawal_keystore() { assert!( matches!( - Builder::new( - harness.validators_dir.path().into(), - harness.password_dir.path().into(), - ) - .random_voting_keystore() - .unwrap() - .create_eth1_tx_data(42, spec) - .build(), + Builder::new(harness.validators_dir.path().into(),) + .password_dir(harness.password_dir.path()) + .random_voting_keystore() + .unwrap() + .create_eth1_tx_data(42, spec) + .build(), Err(BuilderError::UninitializedWithdrawalKeystore) ), "storing the keystore requires keystore" diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml new file mode 100644 index 000000000..dd029e3c2 --- /dev/null +++ b/common/warp_utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "warp_utils" +version = "0.1.0" +authors = ["Paul Hauner <paul@paulhauner.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +warp = "0.2.5" +eth2 = { path = "../eth2" } +types = { path = "../../consensus/types" } +beacon_chain = { path = "../../beacon_node/beacon_chain" } +state_processing = { path = "../../consensus/state_processing" } +safe_arith = { path = "../../consensus/safe_arith" } +serde = { version = "1.0.116", features = ["derive"] } +tokio = { version = "0.2.22", features = ["sync"] } diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs new file mode 100644 index 000000000..ba02273e6 --- /dev/null +++ b/common/warp_utils/src/lib.rs @@ -0,0 +1,6 @@ +//! This crate contains functions that are common across multiple `warp` HTTP servers in the +//! Lighthouse project. E.g., the `http_api` and `http_metrics` crates. + +pub mod reject; +pub mod reply; +pub mod task; diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs new file mode 100644 index 000000000..020fa19d8 --- /dev/null +++ b/common/warp_utils/src/reject.rs @@ -0,0 +1,186 @@ +use eth2::types::ErrorMessage; +use std::convert::Infallible; +use warp::{http::StatusCode, reject::Reject}; + +#[derive(Debug)] +pub struct BeaconChainError(pub beacon_chain::BeaconChainError); + +impl Reject for BeaconChainError {} + +pub fn beacon_chain_error(e: beacon_chain::BeaconChainError) -> warp::reject::Rejection { + warp::reject::custom(BeaconChainError(e)) +} + +#[derive(Debug)] +pub struct BeaconStateError(pub types::BeaconStateError); + +impl Reject for BeaconStateError {} + +pub fn beacon_state_error(e: types::BeaconStateError) -> warp::reject::Rejection { + warp::reject::custom(BeaconStateError(e)) +} + +#[derive(Debug)] +pub struct ArithError(pub safe_arith::ArithError); + +impl Reject for ArithError {} + +pub fn arith_error(e: safe_arith::ArithError) -> warp::reject::Rejection { + warp::reject::custom(ArithError(e)) +} + +#[derive(Debug)] +pub struct SlotProcessingError(pub state_processing::SlotProcessingError); + +impl Reject for SlotProcessingError {} + +pub fn slot_processing_error(e: state_processing::SlotProcessingError) -> warp::reject::Rejection { + warp::reject::custom(SlotProcessingError(e)) +} + +#[derive(Debug)] +pub struct BlockProductionError(pub beacon_chain::BlockProductionError); + +impl Reject for BlockProductionError {} + +pub fn block_production_error(e: beacon_chain::BlockProductionError) -> warp::reject::Rejection { + warp::reject::custom(BlockProductionError(e)) +} + +#[derive(Debug)] +pub struct CustomNotFound(pub String); + +impl Reject for CustomNotFound {} + +pub fn custom_not_found(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomNotFound(msg)) +} + +#[derive(Debug)] +pub struct CustomBadRequest(pub String); + +impl Reject for CustomBadRequest {} + +pub fn custom_bad_request(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomBadRequest(msg)) +} + +#[derive(Debug)] +pub struct CustomServerError(pub String); + +impl Reject for CustomServerError {} + +pub fn custom_server_error(msg: String) -> warp::reject::Rejection { + warp::reject::custom(CustomServerError(msg)) +} + +#[derive(Debug)] +pub struct BroadcastWithoutImport(pub String); + +impl Reject for BroadcastWithoutImport {} + +pub fn broadcast_without_import(msg: String) -> warp::reject::Rejection { + warp::reject::custom(BroadcastWithoutImport(msg)) +} + +#[derive(Debug)] +pub struct ObjectInvalid(pub String); + +impl Reject for ObjectInvalid {} + +pub fn object_invalid(msg: String) -> warp::reject::Rejection { + warp::reject::custom(ObjectInvalid(msg)) +} + +#[derive(Debug)] +pub struct NotSynced(pub String); + +impl Reject for NotSynced {} + +pub fn not_synced(msg: String) -> warp::reject::Rejection { + warp::reject::custom(NotSynced(msg)) +} + +#[derive(Debug)] +pub struct InvalidAuthorization(pub String); + +impl Reject for InvalidAuthorization {} + +pub fn invalid_auth(msg: String) -> warp::reject::Rejection { + warp::reject::custom(InvalidAuthorization(msg)) +} + +/// This function receives a `Rejection` and tries to return a custom +/// value, otherwise simply passes the rejection along. +pub async fn handle_rejection(err: warp::Rejection) -> Result<impl warp::Reply, Infallible> { + let code; + let message; + + if err.is_not_found() { + code = StatusCode::NOT_FOUND; + message = "NOT_FOUND".to_string(); + } else if let Some(e) = err.find::<warp::filters::body::BodyDeserializeError>() { + message = format!("BAD_REQUEST: body deserialize error: {}", e); + code = StatusCode::BAD_REQUEST; + } else if let Some(e) = err.find::<warp::reject::InvalidQuery>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: invalid query: {}", e); + } else if let Some(e) = err.find::<crate::reject::BeaconChainError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::BeaconStateError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::SlotProcessingError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::BlockProductionError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("UNHANDLED_ERROR: {:?}", e.0); + } else if let Some(e) = err.find::<crate::reject::CustomNotFound>() { + code = StatusCode::NOT_FOUND; + message = format!("NOT_FOUND: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::CustomBadRequest>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::CustomServerError>() { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = format!("INTERNAL_SERVER_ERROR: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::BroadcastWithoutImport>() { + code = StatusCode::ACCEPTED; + message = format!( + "ACCEPTED: the object was broadcast to the network without being \ + fully imported to the local database: {}", + e.0 + ); + } else if let Some(e) = err.find::<crate::reject::ObjectInvalid>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: Invalid object: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::NotSynced>() { + code = StatusCode::SERVICE_UNAVAILABLE; + message = format!("SERVICE_UNAVAILABLE: beacon node is syncing: {}", e.0); + } else if let Some(e) = err.find::<crate::reject::InvalidAuthorization>() { + code = StatusCode::FORBIDDEN; + message = format!("FORBIDDEN: Invalid auth token: {}", e.0); + } else if let Some(e) = err.find::<warp::reject::MissingHeader>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: missing {} header", e.name()); + } else if let Some(e) = err.find::<warp::reject::InvalidHeader>() { + code = StatusCode::BAD_REQUEST; + message = format!("BAD_REQUEST: invalid {} header", e.name()); + } else if err.find::<warp::reject::MethodNotAllowed>().is_some() { + code = StatusCode::METHOD_NOT_ALLOWED; + message = "METHOD_NOT_ALLOWED".to_string(); + } else { + code = StatusCode::INTERNAL_SERVER_ERROR; + message = "UNHANDLED_REJECTION".to_string(); + } + + let json = warp::reply::json(&ErrorMessage { + code: code.as_u16(), + message, + stacktraces: vec![], + }); + + Ok(warp::reply::with_status(json, code)) +} diff --git a/common/warp_utils/src/reply.rs b/common/warp_utils/src/reply.rs new file mode 100644 index 000000000..dcec6214f --- /dev/null +++ b/common/warp_utils/src/reply.rs @@ -0,0 +1,15 @@ +/// Add CORS headers to `reply` only if `allow_origin.is_some()`. +pub fn maybe_cors<T: warp::Reply + 'static>( + reply: T, + allow_origin: Option<&String>, +) -> Box<dyn warp::Reply> { + if let Some(allow_origin) = allow_origin { + Box::new(warp::reply::with_header( + reply, + "Access-Control-Allow-Origin", + allow_origin, + )) + } else { + Box::new(reply) + } +} diff --git a/common/warp_utils/src/task.rs b/common/warp_utils/src/task.rs new file mode 100644 index 000000000..da4cf91be --- /dev/null +++ b/common/warp_utils/src/task.rs @@ -0,0 +1,21 @@ +use serde::Serialize; + +/// Execute some task in a tokio "blocking thread". These threads are ideal for long-running +/// (blocking) tasks since they don't jam up the core executor. +pub async fn blocking_task<F, T>(func: F) -> T +where + F: Fn() -> T, +{ + tokio::task::block_in_place(func) +} + +/// A convenience wrapper around `blocking_task` for use with `warp` JSON responses. +pub async fn blocking_json_task<F, T>(func: F) -> Result<warp::reply::Json, warp::Rejection> +where + F: Fn() -> Result<T, warp::Rejection>, + T: Serialize, +{ + blocking_task(func) + .await + .map(|resp| warp::reply::json(&resp)) +} diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 8cb796edd..5de54fe69 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -5,13 +5,13 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"] edition = "2018" [dependencies] -ethereum-types = "0.9.1" +ethereum-types = "0.9.2" eth2_ssz_types = { path = "../ssz_types" } eth2_hashing = "0.1.0" eth2_ssz_derive = "0.1.0" eth2_ssz = "0.1.2" -tree_hash = "0.1.0" -smallvec = "1.4.1" +tree_hash = "0.1.1" +smallvec = "1.4.2" [dev-dependencies] quickcheck = "0.9.2" diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index e07111407..a5724bc1a 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -9,13 +9,13 @@ edition = "2018" [dependencies] types = { path = "../types" } proto_array = { path = "../proto_array" } -eth2_ssz = { path = "../ssz" } -eth2_ssz_derive = { path = "../ssz_derive" } +eth2_ssz = "0.1.2" +eth2_ssz_derive = "0.1.0" [dev-dependencies] state_processing = { path = "../../consensus/state_processing" } beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } -tree_hash = { path = "../../consensus/tree_hash" } +tree_hash = "0.1.1" slot_clock = { path = "../../common/slot_clock" } hex = "0.4.2" diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 99f998e55..f6c43ae42 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -4,7 +4,7 @@ use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice}; use ssz_derive::{Decode, Encode}; use types::{ BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, - IndexedAttestation, Slot, + IndexedAttestation, RelativeEpoch, ShufflingId, Slot, }; use crate::ForkChoiceStore; @@ -240,10 +240,18 @@ where /// Instantiates `Self` from the genesis parameters. pub fn from_genesis( fc_store: T, + genesis_block_root: Hash256, genesis_block: &BeaconBlock<E>, + genesis_state: &BeaconState<E>, ) -> Result<Self, Error<T::Error>> { let finalized_block_slot = genesis_block.slot; let finalized_block_state_root = genesis_block.state_root; + let current_epoch_shuffling_id = + ShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Current) + .map_err(Error::BeaconStateError)?; + let next_epoch_shuffling_id = + ShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Next) + .map_err(Error::BeaconStateError)?; let proto_array = ProtoArrayForkChoice::new( finalized_block_slot, @@ -251,6 +259,8 @@ where fc_store.justified_checkpoint().epoch, fc_store.finalized_checkpoint().epoch, fc_store.finalized_checkpoint().root, + current_epoch_shuffling_id, + next_epoch_shuffling_id, )?; Ok(Self { @@ -534,6 +544,10 @@ where root: block_root, parent_root: Some(block.parent_root), target_root, + current_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Current) + .map_err(Error::BeaconStateError)?, + next_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Next) + .map_err(Error::BeaconStateError)?, state_root: block.state_root, justified_epoch: state.current_justified_checkpoint.epoch, finalized_epoch: state.finalized_checkpoint.epoch, diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index 78c7534cd..7b508afd4 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -6,3 +6,4 @@ pub use crate::fork_choice::{ SAFE_SLOTS_TO_UPDATE_JUSTIFIED, }; pub use fork_choice_store::ForkChoiceStore; +pub use proto_array::Block as ProtoBlock; diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 712b653b7..21cdfbc4a 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -407,7 +407,7 @@ impl ForkChoiceTest { let mut verified_attestation = self .harness .chain - .verify_unaggregated_attestation_for_gossip(attestation, subnet_id) + .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id)) .expect("precondition: should gossip verify attestation"); if let MutationDelay::Blocks(slots) = delay { diff --git a/consensus/int_to_bytes/Cargo.toml b/consensus/int_to_bytes/Cargo.toml index 87839ccaa..736fed9c8 100644 --- a/consensus/int_to_bytes/Cargo.toml +++ b/consensus/int_to_bytes/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2018" [dependencies] -bytes = "0.5.4" +bytes = "0.5.6" [dev-dependencies] -yaml-rust = "0.4.3" +yaml-rust = "0.4.4" hex = "0.4.2" diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 84745d224..7c0ff5207 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"] edition = "2018" [dependencies] -ethereum-types = "0.9.1" +ethereum-types = "0.9.2" eth2_hashing = "0.1.0" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 63f2d7fd7..111f24a3b 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -12,6 +12,6 @@ path = "src/bin.rs" types = { path = "../types" } eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -serde = "1.0.110" -serde_derive = "1.0.110" -serde_yaml = "0.8.11" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_yaml = "0.8.13" diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 6e1bd970b..9cac0bafb 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -4,7 +4,7 @@ mod votes; use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; use serde_derive::{Deserialize, Serialize}; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; pub use ffg_updates::*; pub use no_votes::*; @@ -55,12 +55,15 @@ pub struct ForkChoiceTestDefinition { impl ForkChoiceTestDefinition { pub fn run(self) { + let junk_shuffling_id = ShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fork_choice = ProtoArrayForkChoice::new( self.finalized_block_slot, Hash256::zero(), self.justified_epoch, self.finalized_epoch, self.finalized_root, + junk_shuffling_id.clone(), + junk_shuffling_id, ) .expect("should create fork choice struct"); @@ -125,6 +128,14 @@ impl ForkChoiceTestDefinition { parent_root: Some(parent_root), state_root: Hash256::zero(), target_root: Hash256::zero(), + current_epoch_shuffling_id: ShufflingId::from_components( + Epoch::new(0), + Hash256::zero(), + ), + next_epoch_shuffling_id: ShufflingId::from_components( + Epoch::new(0), + Hash256::zero(), + ), justified_epoch, finalized_epoch, }; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 18db8d340..c89a96628 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -2,7 +2,7 @@ use crate::{error::Error, Block}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; #[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] pub struct ProtoNode { @@ -18,6 +18,8 @@ pub struct ProtoNode { /// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// components (namely fork choice attestation verification). pub target_root: Hash256, + pub current_epoch_shuffling_id: ShufflingId, + pub next_epoch_shuffling_id: ShufflingId, pub root: Hash256, pub parent: Option<usize>, pub justified_epoch: Epoch, @@ -142,6 +144,8 @@ impl ProtoArray { slot: block.slot, root: block.root, target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id, + next_epoch_shuffling_id: block.next_epoch_shuffling_id, state_root: block.state_root, parent: block .parent_root diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 451f39993..e4cf5bbc6 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -4,7 +4,7 @@ use crate::ssz_container::SszContainer; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, Hash256, ShufflingId, Slot}; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -25,6 +25,8 @@ pub struct Block { pub parent_root: Option<Hash256>, pub state_root: Hash256, pub target_root: Hash256, + pub current_epoch_shuffling_id: ShufflingId, + pub next_epoch_shuffling_id: ShufflingId, pub justified_epoch: Epoch, pub finalized_epoch: Epoch, } @@ -70,6 +72,8 @@ impl ProtoArrayForkChoice { justified_epoch: Epoch, finalized_epoch: Epoch, finalized_root: Hash256, + current_epoch_shuffling_id: ShufflingId, + next_epoch_shuffling_id: ShufflingId, ) -> Result<Self, String> { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, @@ -87,6 +91,8 @@ impl ProtoArrayForkChoice { // We are using the finalized_root as the target_root, since it always lies on an // epoch boundary. target_root: finalized_root, + current_epoch_shuffling_id, + next_epoch_shuffling_id, justified_epoch, finalized_epoch, }; @@ -194,6 +200,8 @@ impl ProtoArrayForkChoice { parent_root, state_root: block.state_root, target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), justified_epoch: block.justified_epoch, finalized_epoch: block.finalized_epoch, }) @@ -341,6 +349,7 @@ mod test_compute_deltas { let finalized_desc = Hash256::from_low_u64_be(2); let not_finalized_desc = Hash256::from_low_u64_be(3); let unknown = Hash256::from_low_u64_be(4); + let junk_shuffling_id = ShufflingId::from_components(Epoch::new(0), Hash256::zero()); let mut fc = ProtoArrayForkChoice::new( genesis_slot, @@ -348,6 +357,8 @@ mod test_compute_deltas { genesis_epoch, genesis_epoch, finalized_root, + junk_shuffling_id.clone(), + junk_shuffling_id.clone(), ) .unwrap(); @@ -359,6 +370,8 @@ mod test_compute_deltas { parent_root: Some(finalized_root), state_root, target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), justified_epoch: genesis_epoch, finalized_epoch: genesis_epoch, }) @@ -372,6 +385,8 @@ mod test_compute_deltas { parent_root: None, state_root, target_root: finalized_root, + current_epoch_shuffling_id: junk_shuffling_id.clone(), + next_epoch_shuffling_id: junk_shuffling_id.clone(), justified_epoch: genesis_epoch, finalized_epoch: genesis_epoch, }) diff --git a/consensus/serde_hex/Cargo.toml b/consensus/serde_hex/Cargo.toml deleted file mode 100644 index 2df5ff02a..000000000 --- a/consensus/serde_hex/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "serde_hex" -version = "0.2.0" -authors = ["Paul Hauner <paul@paulhauner.com>"] -edition = "2018" - -[dependencies] -serde = "1.0.110" -hex = "0.4.2" diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml index 1fb35736b..060179846 100644 --- a/consensus/serde_utils/Cargo.toml +++ b/consensus/serde_utils/Cargo.toml @@ -5,8 +5,9 @@ authors = ["Paul Hauner <paul@paulhauner.com", "Michael Sproul <michael@sigmapri edition = "2018" [dependencies] -serde = { version = "1.0.110", features = ["derive"] } -serde_derive = "1.0.110" +serde = { version = "1.0.116", features = ["derive"] } +serde_derive = "1.0.116" +hex = "0.4.2" [dev-dependencies] -serde_json = "1.0.52" +serde_json = "1.0.58" diff --git a/consensus/serde_utils/src/bytes_4_hex.rs b/consensus/serde_utils/src/bytes_4_hex.rs new file mode 100644 index 000000000..e057d1a12 --- /dev/null +++ b/consensus/serde_utils/src/bytes_4_hex.rs @@ -0,0 +1,38 @@ +//! Formats `[u8; 4]` as a 0x-prefixed hex string. +//! +//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. + +use crate::hex::PrefixedHexVisitor; +use serde::de::Error; +use serde::{Deserializer, Serializer}; + +const BYTES_LEN: usize = 4; + +pub fn serialize<S>(bytes: &[u8; BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> +where + D: Deserializer<'de>, +{ + let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; + + if decoded.len() != BYTES_LEN { + return Err(D::Error::custom(format!( + "expected {} bytes for array, got {}", + BYTES_LEN, + decoded.len() + ))); + } + + let mut array = [0; BYTES_LEN]; + array.copy_from_slice(&decoded); + Ok(array) +} diff --git a/consensus/serde_hex/src/lib.rs b/consensus/serde_utils/src/hex.rs similarity index 81% rename from consensus/serde_hex/src/lib.rs rename to consensus/serde_utils/src/hex.rs index db8422275..79dfaa506 100644 --- a/consensus/serde_hex/src/lib.rs +++ b/consensus/serde_utils/src/hex.rs @@ -1,6 +1,9 @@ +//! Provides utilities for parsing 0x-prefixed hex strings. + use serde::de::{self, Visitor}; use std::fmt; +/// Encode `data` as a 0x-prefixed hex string. pub fn encode<T: AsRef<[u8]>>(data: T) -> String { let hex = hex::encode(data); let mut s = "0x".to_string(); @@ -8,6 +11,15 @@ pub fn encode<T: AsRef<[u8]>>(data: T) -> String { s } +/// Decode `data` from a 0x-prefixed hex string. +pub fn decode(s: &str) -> Result<Vec<u8>, String> { + if s.starts_with("0x") { + hex::decode(&s[2..]).map_err(|e| format!("invalid hex: {:?}", e)) + } else { + Err("hex must have 0x prefix".to_string()) + } +} + pub struct PrefixedHexVisitor; impl<'de> Visitor<'de> for PrefixedHexVisitor { diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs index df2b44b62..0016e67a3 100644 --- a/consensus/serde_utils/src/lib.rs +++ b/consensus/serde_utils/src/lib.rs @@ -1,2 +1,9 @@ -pub mod quoted_u64; +mod quoted_int; + +pub mod bytes_4_hex; +pub mod hex; pub mod quoted_u64_vec; +pub mod u32_hex; +pub mod u8_hex; + +pub use quoted_int::{quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs new file mode 100644 index 000000000..24edf1ebe --- /dev/null +++ b/consensus/serde_utils/src/quoted_int.rs @@ -0,0 +1,144 @@ +//! Formats some integer types using quotes. +//! +//! E.g., `1` serializes as `"1"`. +//! +//! Quotes can be optional during decoding. + +use serde::{Deserializer, Serializer}; +use serde_derive::{Deserialize, Serialize}; +use std::convert::TryFrom; +use std::marker::PhantomData; + +macro_rules! define_mod { + ($int: ty, $visit_fn: ident) => { + /// Serde support for deserializing quoted integers. + /// + /// Configurable so that quotes are either required or optional. + pub struct QuotedIntVisitor<T> { + require_quotes: bool, + _phantom: PhantomData<T>, + } + + impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor<T> + where + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + type Value = T; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + if self.require_quotes { + write!(formatter, "a quoted integer") + } else { + write!(formatter, "a quoted or unquoted integer") + } + } + + fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + s.parse::<$int>() + .map(T::from) + .map_err(serde::de::Error::custom) + } + + fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + if self.require_quotes { + Err(serde::de::Error::custom( + "received unquoted integer when quotes are required", + )) + } else { + T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer")) + } + } + } + + /// Wrapper type for requiring quotes on a `$int`-like type. + /// + /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested + /// inside types like `Option`, `Result` and `Vec`. + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] + #[serde(transparent)] + pub struct Quoted<T> + where + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + #[serde(with = "require_quotes")] + pub value: T, + } + + /// Serialize with quotes. + pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + T: From<$int> + Into<$int> + Copy, + { + let v: $int = (*value).into(); + serializer.serialize_str(&format!("{}", v)) + } + + /// Deserialize with or without quotes. + pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> + where + D: Deserializer<'de>, + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: false, + _phantom: PhantomData, + }) + } + + /// Requires quotes when deserializing. + /// + /// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. + pub mod require_quotes { + pub use super::serialize; + use super::*; + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> + where + D: Deserializer<'de>, + T: From<$int> + Into<$int> + Copy + TryFrom<u64>, + { + deserializer.deserialize_any(QuotedIntVisitor { + require_quotes: true, + _phantom: PhantomData, + }) + } + } + + #[cfg(test)] + mod test { + use super::*; + + #[test] + fn require_quotes() { + let x = serde_json::from_str::<Quoted<$int>>("\"8\"").unwrap(); + assert_eq!(x.value, 8); + serde_json::from_str::<Quoted<$int>>("8").unwrap_err(); + } + } + }; +} + +pub mod quoted_u8 { + use super::*; + + define_mod!(u8, visit_u8); +} + +pub mod quoted_u32 { + use super::*; + + define_mod!(u32, visit_u32); +} + +pub mod quoted_u64 { + use super::*; + + define_mod!(u64, visit_u64); +} diff --git a/consensus/serde_utils/src/quoted_u64.rs b/consensus/serde_utils/src/quoted_u64.rs deleted file mode 100644 index 2e73a104f..000000000 --- a/consensus/serde_utils/src/quoted_u64.rs +++ /dev/null @@ -1,115 +0,0 @@ -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; - -/// Serde support for deserializing quoted integers. -/// -/// Configurable so that quotes are either required or optional. -pub struct QuotedIntVisitor<T> { - require_quotes: bool, - _phantom: PhantomData<T>, -} - -impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor<T> -where - T: From<u64> + Into<u64> + Copy, -{ - type Value = T; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - if self.require_quotes { - write!(formatter, "a quoted integer") - } else { - write!(formatter, "a quoted or unquoted integer") - } - } - - fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> - where - E: serde::de::Error, - { - s.parse::<u64>() - .map(T::from) - .map_err(serde::de::Error::custom) - } - - fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> - where - E: serde::de::Error, - { - if self.require_quotes { - Err(serde::de::Error::custom( - "received unquoted integer when quotes are required", - )) - } else { - Ok(T::from(v)) - } - } -} - -/// Wrapper type for requiring quotes on a `u64`-like type. -/// -/// Unlike using `serde(with = "quoted_u64::require_quotes")` this is composable, and can be nested -/// inside types like `Option`, `Result` and `Vec`. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] -#[serde(transparent)] -pub struct Quoted<T> -where - T: From<u64> + Into<u64> + Copy, -{ - #[serde(with = "require_quotes")] - pub value: T, -} - -/// Serialize with quotes. -pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, - T: From<u64> + Into<u64> + Copy, -{ - let v: u64 = (*value).into(); - serializer.serialize_str(&format!("{}", v)) -} - -/// Deserialize with or without quotes. -pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> -where - D: Deserializer<'de>, - T: From<u64> + Into<u64> + Copy, -{ - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: false, - _phantom: PhantomData, - }) -} - -/// Requires quotes when deserializing. -/// -/// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. -pub mod require_quotes { - pub use super::serialize; - use super::*; - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error> - where - D: Deserializer<'de>, - T: From<u64> + Into<u64> + Copy, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: true, - _phantom: PhantomData, - }) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn require_quotes() { - let x = serde_json::from_str::<Quoted<u64>>("\"8\"").unwrap(); - assert_eq!(x.value, 8); - serde_json::from_str::<Quoted<u64>>("8").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs index c5badee50..f124c9890 100644 --- a/consensus/serde_utils/src/quoted_u64_vec.rs +++ b/consensus/serde_utils/src/quoted_u64_vec.rs @@ -1,3 +1,9 @@ +//! Formats `Vec<u64>` using quotes. +//! +//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. +//! +//! Quotes can be optional during decoding. + use serde::ser::SerializeSeq; use serde::{Deserializer, Serializer}; use serde_derive::{Deserialize, Serialize}; @@ -6,7 +12,7 @@ use serde_derive::{Deserialize, Serialize}; #[serde(transparent)] pub struct QuotedIntWrapper { #[serde(with = "crate::quoted_u64")] - int: u64, + pub int: u64, } pub struct QuotedIntVecVisitor; diff --git a/consensus/serde_utils/src/u32_hex.rs b/consensus/serde_utils/src/u32_hex.rs new file mode 100644 index 000000000..c1ab3537b --- /dev/null +++ b/consensus/serde_utils/src/u32_hex.rs @@ -0,0 +1,21 @@ +//! Formats `u32` as a 0x-prefixed, little-endian hex string. +//! +//! E.g., `0` serializes as `"0x00000000"`. + +use crate::bytes_4_hex; +use serde::{Deserializer, Serializer}; + +pub fn serialize<S>(num: &u32, serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + let hex = format!("0x{}", hex::encode(num.to_le_bytes())); + serializer.serialize_str(&hex) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<u32, D::Error> +where + D: Deserializer<'de>, +{ + bytes_4_hex::deserialize(deserializer).map(u32::from_le_bytes) +} diff --git a/consensus/serde_utils/src/u8_hex.rs b/consensus/serde_utils/src/u8_hex.rs new file mode 100644 index 000000000..8083e1d12 --- /dev/null +++ b/consensus/serde_utils/src/u8_hex.rs @@ -0,0 +1,29 @@ +//! Formats `u8` as a 0x-prefixed hex string. +//! +//! E.g., `0` serializes as `"0x00"`. + +use crate::hex::PrefixedHexVisitor; +use serde::de::Error; +use serde::{Deserializer, Serializer}; + +pub fn serialize<S>(byte: &u8, serializer: S) -> Result<S::Ok, S::Error> +where + S: Serializer, +{ + let hex = format!("0x{}", hex::encode([*byte])); + serializer.serialize_str(&hex) +} + +pub fn deserialize<'de, D>(deserializer: D) -> Result<u8, D::Error> +where + D: Deserializer<'de>, +{ + let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; + if bytes.len() != 1 { + return Err(D::Error::custom(format!( + "expected 1 byte for u8, got {}", + bytes.len() + ))); + } + Ok(bytes[0]) +} diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml index c71593301..63f98588a 100644 --- a/consensus/ssz/Cargo.toml +++ b/consensus/ssz/Cargo.toml @@ -13,8 +13,8 @@ name = "ssz" eth2_ssz_derive = "0.1.0" [dependencies] -ethereum-types = "0.9.1" -smallvec = "1.4.1" +ethereum-types = "0.9.2" +smallvec = "1.4.2" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml index e074f001a..fb0de111c 100644 --- a/consensus/ssz_derive/Cargo.toml +++ b/consensus/ssz_derive/Cargo.toml @@ -11,5 +11,5 @@ name = "ssz_derive" proc-macro = true [dependencies] -syn = "1.0.18" -quote = "1.0.4" +syn = "1.0.42" +quote = "1.0.7" diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml index 144b3ce31..a0ea0dbcd 100644 --- a/consensus/ssz_types/Cargo.toml +++ b/consensus/ssz_types/Cargo.toml @@ -8,13 +8,13 @@ edition = "2018" name = "ssz_types" [dependencies] -tree_hash = "0.1.0" -serde = "1.0.110" -serde_derive = "1.0.110" -serde_hex = { path = "../serde_hex" } +tree_hash = "0.1.1" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_utils = { path = "../serde_utils" } eth2_ssz = "0.1.2" typenum = "1.12.0" -arbitrary = { version = "0.4.4", features = ["derive"], optional = true } +arbitrary = { version = "0.4.6", features = ["derive"], optional = true } [dev-dependencies] tree_hash_derive = "0.2.0" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs index 1b6dce3ec..09fa9fc2d 100644 --- a/consensus/ssz_types/src/bitfield.rs +++ b/consensus/ssz_types/src/bitfield.rs @@ -3,7 +3,7 @@ use crate::Error; use core::marker::PhantomData; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; use ssz::{Decode, Encode}; use tree_hash::Hash256; use typenum::Unsigned; diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index bd0de6c19..98ce4ff55 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -9,29 +9,29 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.3.2" +criterion = "0.3.3" env_logger = "0.7.1" -serde = "1.0.110" -serde_derive = "1.0.110" +serde = "1.0.116" +serde_derive = "1.0.116" lazy_static = "1.4.0" -serde_yaml = "0.8.11" +serde_yaml = "0.8.13" [dependencies] bls = { path = "../../crypto/bls" } -integer-sqrt = "0.1.3" +integer-sqrt = "0.1.5" itertools = "0.9.0" eth2_ssz = "0.1.2" eth2_ssz_types = { path = "../ssz_types" } merkle_proof = { path = "../merkle_proof" } -log = "0.4.8" +log = "0.4.11" safe_arith = { path = "../safe_arith" } -tree_hash = "0.1.0" +tree_hash = "0.1.1" tree_hash_derive = "0.2.0" types = { path = "../types", default-features = false } -rayon = "1.3.0" +rayon = "1.4.1" eth2_hashing = "0.1.0" int_to_bytes = { path = "../int_to_bytes" } -arbitrary = { version = "0.4.4", features = ["derive"], optional = true } +arbitrary = { version = "0.4.6", features = ["derive"], optional = true } [features] default = ["legacy-arith"] diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 12af74aff..118e471b0 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -9,11 +9,11 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.3.2" +criterion = "0.3.3" [dependencies] eth2_hashing = "0.1.0" -ethereum-types = "0.9.1" +ethereum-types = "0.9.2" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml index 436805558..5837afd85 100644 --- a/consensus/tree_hash/Cargo.toml +++ b/consensus/tree_hash/Cargo.toml @@ -11,16 +11,16 @@ name = "benches" harness = false [dev-dependencies] -criterion = "0.3.2" +criterion = "0.3.3" rand = "0.7.3" tree_hash_derive = "0.2.0" types = { path = "../types" } lazy_static = "1.4.0" [dependencies] -ethereum-types = "0.9.1" +ethereum-types = "0.9.2" eth2_hashing = "0.1.0" -smallvec = "1.4.1" +smallvec = "1.4.2" [features] arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/tree_hash_derive/Cargo.toml b/consensus/tree_hash_derive/Cargo.toml index 11caabe07..10c743cb3 100644 --- a/consensus/tree_hash_derive/Cargo.toml +++ b/consensus/tree_hash_derive/Cargo.toml @@ -10,5 +10,5 @@ license = "Apache-2.0" proc-macro = true [dependencies] -syn = "1.0.18" -quote = "1.0.4" +syn = "1.0.42" +quote = "1.0.7" diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 8f6fed4b4..8984beeda 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -12,38 +12,39 @@ harness = false bls = { path = "../../crypto/bls" } compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } -dirs = "2.0.2" eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } -ethereum-types = "0.9.1" +ethereum-types = "0.9.2" eth2_hashing = "0.1.0" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } -log = "0.4.8" +log = "0.4.11" merkle_proof = { path = "../merkle_proof" } -rayon = "1.3.0" +rayon = "1.4.1" rand = "0.7.3" safe_arith = { path = "../safe_arith" } -serde = "1.0.110" -serde_derive = "1.0.110" +serde = "1.0.116" +serde_derive = "1.0.116" slog = "2.5.2" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" eth2_ssz_types = { path = "../ssz_types" } swap_or_not_shuffle = { path = "../swap_or_not_shuffle" } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = "0.1.0" +tree_hash = "0.1.1" tree_hash_derive = "0.2.0" rand_xorshift = "0.2.0" cached_tree_hash = { path = "../cached_tree_hash" } -serde_yaml = "0.8.11" +serde_yaml = "0.8.13" tempfile = "3.1.0" derivative = "2.1.1" -rusqlite = { version = "0.23.1", features = ["bundled"], optional = true } -arbitrary = { version = "0.4.4", features = ["derive"], optional = true } +rusqlite = { version = "0.24.0", features = ["bundled"], optional = true } +arbitrary = { version = "0.4.6", features = ["derive"], optional = true } +serde_utils = { path = "../serde_utils" } +regex = "1.3.9" [dev-dependencies] -serde_json = "1.0.52" -criterion = "0.3.2" +serde_json = "1.0.58" +criterion = "0.3.3" [features] default = ["sqlite", "legacy-arith"] diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 737c891c9..528712261 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct AggregateAndProof<T: EthSpec> { /// The index of the validator that created the attestation. + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate attestation. pub aggregate: Attestation<T>, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index 67fb28002..07fa529e0 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -26,6 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct AttestationData { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, // LMD GHOST vote diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index c32e4683e..613d7fd1c 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -12,4 +12,7 @@ pub struct AttestationDuty { pub committee_position: usize, /// The total number of attesters in the committee. pub committee_len: usize, + /// The committee count at `attestation_slot`. + #[serde(with = "serde_utils::quoted_u64")] + pub committees_at_slot: u64, } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index eeb10458b..d3a916070 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct BeaconBlock<T: EthSpec> { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 489c5bc9d..ef28307ed 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{graffiti_from_hex_str, graffiti_to_hex_str, Graffiti}; use crate::*; use serde_derive::{Deserialize, Serialize}; @@ -17,10 +16,6 @@ use tree_hash_derive::TreeHash; pub struct BeaconBlockBody<T: EthSpec> { pub randao_reveal: Signature, pub eth1_data: Eth1Data, - #[serde( - serialize_with = "graffiti_to_hex_str", - deserialize_with = "graffiti_from_hex_str" - )] pub graffiti: Graffiti, pub proposer_slashings: VariableList<ProposerSlashing, T::MaxProposerSlashings>, pub attester_slashings: VariableList<AttesterSlashing<T>, T::MaxAttesterSlashings>, diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 04a20e56d..708c0e16f 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -14,6 +14,7 @@ use tree_hash_derive::TreeHash; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct BeaconBlockHeader { pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index a2d923da9..25cb85ce8 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -157,6 +157,7 @@ where T: EthSpec, { // Versioning + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, pub genesis_validators_root: Hash256, pub slot: Slot, @@ -173,6 +174,7 @@ where // Ethereum 1.0 chain data pub eth1_data: Eth1Data, pub eth1_data_votes: VariableList<Eth1Data, T::SlotsPerEth1VotingPeriod>, + #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry @@ -913,6 +915,13 @@ impl<T: EthSpec> BeaconState<T> { self.exit_cache = ExitCache::default(); } + /// Returns `true` if the committee cache for `relative_epoch` is built and ready to use. + pub fn committee_cache_is_initialized(&self, relative_epoch: RelativeEpoch) -> bool { + let i = Self::committee_cache_index(relative_epoch); + + self.committee_caches[i].is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) + } + /// Build an epoch cache, unless it is has already been built. pub fn build_committee_cache( &mut self, diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 6ee24cd2b..728c9cf02 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -186,6 +186,7 @@ impl CommitteeCache { index, committee_position, committee_len, + committees_at_slot: self.committees_per_slot(), }) }) } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index c621acb81..7327895ee 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -4,10 +4,6 @@ use serde_derive::{Deserialize, Serialize}; use std::fs::File; use std::path::Path; use tree_hash::TreeHash; -use utils::{ - fork_from_hex_str, fork_to_hex_str, u32_from_hex_str, u32_to_hex_str, u8_from_hex_str, - u8_to_hex_str, -}; /// Each of the BLS signature domains. /// @@ -65,12 +61,9 @@ pub struct ChainSpec { /* * Initial Values */ - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub genesis_fork_version: [u8; 4], - #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] + #[serde(with = "serde_utils::u8_hex")] pub bls_withdrawal_prefix_byte: u8, /* @@ -115,6 +108,7 @@ pub struct ChainSpec { */ pub eth1_follow_distance: u64, pub seconds_per_eth1_block: u64, + pub deposit_contract_address: Address, /* * Networking @@ -326,6 +320,9 @@ impl ChainSpec { */ eth1_follow_distance: 1_024, seconds_per_eth1_block: 14, + deposit_contract_address: "1234567890123456789012345678901234567890" + .parse() + .expect("chain spec deposit contract address"), /* * Network specific @@ -448,104 +445,127 @@ pub struct YamlConfig { #[serde(default)] config_name: String, // ChainSpec - max_committees_per_slot: usize, - target_committee_size: usize, + #[serde(with = "serde_utils::quoted_u64")] + max_committees_per_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] + target_committee_size: u64, + #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, + #[serde(with = "serde_utils::quoted_u8")] shuffle_round_count: u8, + #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, + #[serde(with = "serde_utils::quoted_u64")] min_genesis_time: u64, + #[serde(with = "serde_utils::quoted_u64")] genesis_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] min_deposit_amount: u64, + #[serde(with = "serde_utils::quoted_u64")] max_effective_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] ejection_balance: u64, + #[serde(with = "serde_utils::quoted_u64")] effective_balance_increment: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_downward_multiplier: u64, + #[serde(with = "serde_utils::quoted_u64")] hysteresis_upward_multiplier: u64, // Proportional slashing multiplier defaults to 3 for compatibility with Altona and Medalla. #[serde(default = "default_proportional_slashing_multiplier")] + #[serde(with = "serde_utils::quoted_u64")] proportional_slashing_multiplier: u64, - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], - #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] + #[serde(with = "serde_utils::u8_hex")] bls_withdrawal_prefix: u8, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, + #[serde(with = "serde_utils::quoted_u64")] min_attestation_inclusion_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] min_seed_lookahead: u64, + #[serde(with = "serde_utils::quoted_u64")] max_seed_lookahead: u64, + #[serde(with = "serde_utils::quoted_u64")] min_epochs_to_inactivity_penalty: u64, + #[serde(with = "serde_utils::quoted_u64")] min_validator_withdrawability_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] shard_committee_period: u64, + #[serde(with = "serde_utils::quoted_u64")] base_reward_factor: u64, + #[serde(with = "serde_utils::quoted_u64")] whistleblower_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] proposer_reward_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] inactivity_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] min_slashing_penalty_quotient: u64, + #[serde(with = "serde_utils::quoted_u64")] safe_slots_to_update_justified: u64, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_beacon_proposer: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_beacon_attester: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_randao: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_deposit: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_voluntary_exit: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_selection_proof: u32, - #[serde( - deserialize_with = "u32_from_hex_str", - serialize_with = "u32_to_hex_str" - )] + #[serde(with = "serde_utils::u32_hex")] domain_aggregate_and_proof: u32, // EthSpec + #[serde(with = "serde_utils::quoted_u32")] max_validators_per_committee: u32, + #[serde(with = "serde_utils::quoted_u64")] slots_per_epoch: u64, + #[serde(with = "serde_utils::quoted_u64")] epochs_per_eth1_voting_period: u64, - slots_per_historical_root: usize, - epochs_per_historical_vector: usize, - epochs_per_slashings_vector: usize, + #[serde(with = "serde_utils::quoted_u64")] + slots_per_historical_root: u64, + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_historical_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] + epochs_per_slashings_vector: u64, + #[serde(with = "serde_utils::quoted_u64")] historical_roots_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] validator_registry_limit: u64, + #[serde(with = "serde_utils::quoted_u32")] max_proposer_slashings: u32, + #[serde(with = "serde_utils::quoted_u32")] max_attester_slashings: u32, + #[serde(with = "serde_utils::quoted_u32")] max_attestations: u32, + #[serde(with = "serde_utils::quoted_u32")] max_deposits: u32, + #[serde(with = "serde_utils::quoted_u32")] max_voluntary_exits: u32, // Validator + #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, + #[serde(with = "serde_utils::quoted_u64")] target_aggregators_per_committee: u64, + #[serde(with = "serde_utils::quoted_u64")] random_subnets_per_validator: u64, + #[serde(with = "serde_utils::quoted_u64")] epochs_per_random_subnet_subscription: u64, + #[serde(with = "serde_utils::quoted_u64")] seconds_per_eth1_block: u64, + deposit_contract_address: Address, /* TODO: incorporate these into ChainSpec and turn on `serde(deny_unknown_fields)` deposit_chain_id: u64, deposit_network_id: u64, - deposit_contract_address: String, */ } @@ -568,8 +588,8 @@ impl YamlConfig { Self { config_name: T::spec_name().to_string(), // ChainSpec - max_committees_per_slot: spec.max_committees_per_slot, - target_committee_size: spec.target_committee_size, + max_committees_per_slot: spec.max_committees_per_slot as u64, + target_committee_size: spec.target_committee_size as u64, min_per_epoch_churn_limit: spec.min_per_epoch_churn_limit, churn_limit_quotient: spec.churn_limit_quotient, shuffle_round_count: spec.shuffle_round_count, @@ -611,9 +631,9 @@ impl YamlConfig { max_validators_per_committee: T::MaxValidatorsPerCommittee::to_u32(), slots_per_epoch: T::slots_per_epoch(), epochs_per_eth1_voting_period: T::EpochsPerEth1VotingPeriod::to_u64(), - slots_per_historical_root: T::slots_per_historical_root(), - epochs_per_historical_vector: T::epochs_per_historical_vector(), - epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_usize(), + slots_per_historical_root: T::slots_per_historical_root() as u64, + epochs_per_historical_vector: T::epochs_per_historical_vector() as u64, + epochs_per_slashings_vector: T::EpochsPerSlashingsVector::to_u64(), historical_roots_limit: T::HistoricalRootsLimit::to_u64(), validator_registry_limit: T::ValidatorRegistryLimit::to_u64(), max_proposer_slashings: T::MaxProposerSlashings::to_u32(), @@ -628,6 +648,7 @@ impl YamlConfig { random_subnets_per_validator: spec.random_subnets_per_validator, epochs_per_random_subnet_subscription: spec.epochs_per_random_subnet_subscription, seconds_per_eth1_block: spec.seconds_per_eth1_block, + deposit_contract_address: spec.deposit_contract_address, } } @@ -643,9 +664,9 @@ impl YamlConfig { if self.max_validators_per_committee != T::MaxValidatorsPerCommittee::to_u32() || self.slots_per_epoch != T::slots_per_epoch() || self.epochs_per_eth1_voting_period != T::EpochsPerEth1VotingPeriod::to_u64() - || self.slots_per_historical_root != T::slots_per_historical_root() - || self.epochs_per_historical_vector != T::epochs_per_historical_vector() - || self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_usize() + || self.slots_per_historical_root != T::slots_per_historical_root() as u64 + || self.epochs_per_historical_vector != T::epochs_per_historical_vector() as u64 + || self.epochs_per_slashings_vector != T::EpochsPerSlashingsVector::to_u64() || self.historical_roots_limit != T::HistoricalRootsLimit::to_u64() || self.validator_registry_limit != T::ValidatorRegistryLimit::to_u64() || self.max_proposer_slashings != T::MaxProposerSlashings::to_u32() @@ -662,8 +683,8 @@ impl YamlConfig { /* * Misc */ - max_committees_per_slot: self.max_committees_per_slot, - target_committee_size: self.target_committee_size, + max_committees_per_slot: self.max_committees_per_slot as usize, + target_committee_size: self.target_committee_size as usize, min_per_epoch_churn_limit: self.min_per_epoch_churn_limit, churn_limit_quotient: self.churn_limit_quotient, shuffle_round_count: self.shuffle_round_count, @@ -685,6 +706,7 @@ impl YamlConfig { random_subnets_per_validator: self.random_subnets_per_validator, epochs_per_random_subnet_subscription: self.epochs_per_random_subnet_subscription, seconds_per_eth1_block: self.seconds_per_eth1_block, + deposit_contract_address: self.deposit_contract_address, /* * Gwei values */ diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index ce72c362e..8e2050a0b 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub signature: SignatureBytes, } diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index fe283a17f..92f6b66bf 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -15,6 +15,7 @@ use tree_hash_derive::TreeHash; pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index e10744368..008b7933f 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::Epoch; use serde_derive::{Deserialize, Serialize}; @@ -16,15 +15,9 @@ use tree_hash_derive::TreeHash; Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct EnrForkId { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index dcc1ea098..e3b74cc49 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -26,6 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct Eth1Data { pub deposit_root: Hash256, + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub block_hash: Hash256, } diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index 8e95710c4..b129271ba 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::Epoch; use serde_derive::{Deserialize, Serialize}; @@ -25,15 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Fork { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index bad6f6219..092102f77 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -1,5 +1,4 @@ use crate::test_utils::TestRandom; -use crate::utils::{fork_from_hex_str, fork_to_hex_str}; use crate::{Hash256, SignedRoot}; use serde_derive::{Deserialize, Serialize}; @@ -15,10 +14,7 @@ use tree_hash_derive::TreeHash; Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] pub struct ForkData { - #[serde( - serialize_with = "fork_to_hex_str", - deserialize_with = "fork_from_hex_str" - )] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub genesis_validators_root: Hash256, } diff --git a/consensus/types/src/free_attestation.rs b/consensus/types/src/free_attestation.rs index 6215fb0cd..79bc149e4 100644 --- a/consensus/types/src/free_attestation.rs +++ b/consensus/types/src/free_attestation.rs @@ -9,5 +9,6 @@ use serde_derive::Serialize; pub struct FreeAttestation { pub data: AttestationData, pub signature: Signature, + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs new file mode 100644 index 000000000..f35df9383 --- /dev/null +++ b/consensus/types/src/graffiti.rs @@ -0,0 +1,132 @@ +use crate::{ + test_utils::{RngCore, TestRandom}, + Hash256, +}; +use regex::bytes::Regex; +use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; +use ssz::{Decode, DecodeError, Encode}; +use std::fmt; +use tree_hash::TreeHash; + +pub const GRAFFITI_BYTES_LEN: usize = 32; + +/// The 32-byte `graffiti` field on a beacon block. +#[derive(Default, Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] +#[serde(transparent)] +#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] +pub struct Graffiti(#[serde(with = "serde_graffiti")] pub [u8; GRAFFITI_BYTES_LEN]); + +impl Graffiti { + pub fn as_utf8_lossy(&self) -> String { + #[allow(clippy::invalid_regex)] + let re = Regex::new("\\p{C}").expect("graffiti regex is valid"); + String::from_utf8_lossy(&re.replace_all(&self.0[..], &b""[..])).to_string() + } +} + +impl fmt::Display for Graffiti { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", serde_utils::hex::encode(&self.0)) + } +} + +impl From<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { + fn from(bytes: [u8; GRAFFITI_BYTES_LEN]) -> Self { + Self(bytes) + } +} + +impl Into<[u8; GRAFFITI_BYTES_LEN]> for Graffiti { + fn into(self) -> [u8; GRAFFITI_BYTES_LEN] { + self.0 + } +} + +pub mod serde_graffiti { + use super::*; + + pub fn serialize<S>(bytes: &[u8; GRAFFITI_BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + serializer.serialize_str(&serde_utils::hex::encode(bytes)) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> + where + D: Deserializer<'de>, + { + let s: String = Deserialize::deserialize(deserializer)?; + + let bytes = serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + + if bytes.len() != GRAFFITI_BYTES_LEN { + return Err(D::Error::custom(format!( + "incorrect byte length {}, expected {}", + bytes.len(), + GRAFFITI_BYTES_LEN + ))); + } + + let mut array = [0; GRAFFITI_BYTES_LEN]; + array[..].copy_from_slice(&bytes); + + Ok(array) + } +} + +impl Encode for Graffiti { + fn is_ssz_fixed_len() -> bool { + <[u8; GRAFFITI_BYTES_LEN] as Encode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; GRAFFITI_BYTES_LEN] as Encode>::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + self.0.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec<u8>) { + self.0.ssz_append(buf) + } +} + +impl Decode for Graffiti { + fn is_ssz_fixed_len() -> bool { + <[u8; GRAFFITI_BYTES_LEN] as Decode>::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + <[u8; GRAFFITI_BYTES_LEN] as Decode>::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { + <[u8; GRAFFITI_BYTES_LEN]>::from_ssz_bytes(bytes).map(Self) + } +} + +impl TreeHash for Graffiti { + fn tree_hash_type() -> tree_hash::TreeHashType { + <[u8; GRAFFITI_BYTES_LEN]>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec<u8> { + self.0.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <[u8; GRAFFITI_BYTES_LEN]>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> tree_hash::Hash256 { + self.0.tree_hash_root() + } +} + +impl TestRandom for Graffiti { + fn random_for_test(rng: &mut impl RngCore) -> Self { + Self::from(Hash256::random_for_test(rng).to_fixed_bytes()) + } +} diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 341db1807..eaae75de8 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -18,6 +18,7 @@ use tree_hash_derive::TreeHash; #[serde(bound = "T: EthSpec")] pub struct IndexedAttestation<T: EthSpec> { /// Lists validator registry indices, not committee indices. + #[serde(with = "quoted_variable_list_u64")] pub attesting_indices: VariableList<u64, T::MaxValidatorsPerCommittee>, pub data: AttestationData, pub signature: AggregateSignature, @@ -53,6 +54,43 @@ impl<T: EthSpec> Hash for IndexedAttestation<T> { } } +/// Serialize a variable list of `u64` such that each int is quoted. Deserialize a variable +/// list supporting both quoted and un-quoted ints. +/// +/// E.g.,`["0", "1", "2"]` +mod quoted_variable_list_u64 { + use super::*; + use crate::Unsigned; + use serde::ser::SerializeSeq; + use serde::{Deserializer, Serializer}; + use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; + + pub fn serialize<S, T>(value: &VariableList<u64, T>, serializer: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + T: Unsigned, + { + let mut seq = serializer.serialize_seq(Some(value.len()))?; + for &int in value.iter() { + seq.serialize_element(&QuotedIntWrapper { int })?; + } + seq.end() + } + + pub fn deserialize<'de, D, T>(deserializer: D) -> Result<VariableList<u64, T>, D::Error> + where + D: Deserializer<'de>, + T: Unsigned, + { + deserializer + .deserialize_any(QuotedIntVecVisitor) + .and_then(|vec| { + VariableList::new(vec) + .map_err(|e| serde::de::Error::custom(format!("invalid length: {:?}", e))) + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 19697118a..65c1290d7 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -29,19 +29,21 @@ pub mod eth_spec; pub mod fork; pub mod fork_data; pub mod free_attestation; +pub mod graffiti; pub mod historical_batch; pub mod indexed_attestation; pub mod pending_attestation; pub mod proposer_slashing; pub mod relative_epoch; pub mod selection_proof; +pub mod shuffling_id; pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; pub mod signed_voluntary_exit; pub mod signing_data; -pub mod utils; pub mod validator; +pub mod validator_subscription; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; @@ -74,12 +76,14 @@ pub use crate::eth1_data::Eth1Data; pub use crate::fork::Fork; pub use crate::fork_data::ForkData; pub use crate::free_attestation::FreeAttestation; +pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN}; pub use crate::historical_batch::HistoricalBatch; pub use crate::indexed_attestation::IndexedAttestation; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; pub use crate::selection_proof::SelectionProof; +pub use crate::shuffling_id::ShufflingId; pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash}; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; @@ -88,6 +92,7 @@ pub use crate::signing_data::{SignedRoot, SigningData}; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::subnet_id::SubnetId; pub use crate::validator::Validator; +pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; pub type CommitteeIndex = u64; @@ -99,4 +104,3 @@ pub use bls::{ AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; -pub use utils::{Graffiti, GRAFFITI_BYTES_LEN}; diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 70ebb1bbd..f4b0fd9b1 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -13,7 +13,9 @@ use tree_hash_derive::TreeHash; pub struct PendingAttestation<T: EthSpec> { pub aggregation_bits: BitList<T::MaxValidatorsPerCommittee>, pub data: AttestationData, + #[serde(with = "serde_utils::quoted_u64")] pub inclusion_delay: u64, + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, } diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs new file mode 100644 index 000000000..d54b5fa64 --- /dev/null +++ b/consensus/types/src/shuffling_id.rs @@ -0,0 +1,61 @@ +use crate::*; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::hash::Hash; + +/// Can be used to key (ID) the shuffling in some chain, in some epoch. +/// +/// ## Reasoning +/// +/// We say that the ID of some shuffling is always equal to a 2-tuple: +/// +/// - The epoch for which the shuffling should be effective. +/// - A block root, where this is the root at the *last* slot of the penultimate epoch. I.e., the +/// final block which contributed a randao reveal to the seed for the shuffling. +/// +/// The struct stores exactly that 2-tuple. +#[derive(Debug, PartialEq, Eq, Clone, Hash, Serialize, Deserialize, Encode, Decode)] +pub struct ShufflingId { + pub shuffling_epoch: Epoch, + shuffling_decision_block: Hash256, +} + +impl ShufflingId { + /// Using the given `state`, return the shuffling id for the shuffling at the given + /// `relative_epoch`. + /// + /// The `block_root` provided should be either: + /// + /// - The root of the block which produced this state. + /// - If the state is from a skip slot, the root of the latest block in that state. + pub fn new<E: EthSpec>( + block_root: Hash256, + state: &BeaconState<E>, + relative_epoch: RelativeEpoch, + ) -> Result<Self, BeaconStateError> { + let shuffling_epoch = relative_epoch.into_epoch(state.current_epoch()); + + let shuffling_decision_slot = shuffling_epoch + .saturating_sub(1_u64) + .start_slot(E::slots_per_epoch()) + .saturating_sub(1_u64); + + let shuffling_decision_block = if state.slot == shuffling_decision_slot { + block_root + } else { + *state.get_block_root(shuffling_decision_slot)? + }; + + Ok(Self { + shuffling_epoch, + shuffling_decision_block, + }) + } + + pub fn from_components(shuffling_epoch: Epoch, shuffling_decision_block: Hash256) -> Self { + Self { + shuffling_epoch, + shuffling_decision_block, + } + } +} diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/slot_epoch_macros.rs index 26b80692c..caf31417d 100644 --- a/consensus/types/src/slot_epoch_macros.rs +++ b/consensus/types/src/slot_epoch_macros.rs @@ -313,6 +313,18 @@ macro_rules! impl_ssz { }; } +macro_rules! impl_from_str { + ($type: ident) => { + impl std::str::FromStr for $type { + type Err = std::num::ParseIntError; + + fn from_str(s: &str) -> Result<$type, Self::Err> { + u64::from_str(s).map($type) + } + } + }; +} + macro_rules! impl_common { ($type: ident) => { impl_from_into_u64!($type); @@ -328,6 +340,7 @@ macro_rules! impl_common { impl_display!($type); impl_debug!($type); impl_ssz!($type); + impl_from_str!($type); }; } diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index 80cc24977..667e2c9b7 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -6,7 +6,8 @@ use std::ops::{Deref, DerefMut}; #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct SubnetId(u64); +#[serde(transparent)] +pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); impl SubnetId { pub fn new(id: u64) -> Self { diff --git a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs b/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs index 67a3dae26..922d4017f 100644 --- a/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/consensus/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -4,21 +4,9 @@ use crate::*; use bls::get_withdrawal_credentials; use log::debug; use rayon::prelude::*; -use std::path::PathBuf; pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; -/// Returns the directory where the generated keypairs should be stored. -/// -/// It is either `$HOME/.lighthouse/keypairs.raw_keypairs` or, if `$HOME` is not available, -/// `./keypairs.raw_keypairs`. -pub fn keypairs_path() -> PathBuf { - let dir = dirs::home_dir() - .map(|home| (home.join(".lighthouse"))) - .unwrap_or_else(|| PathBuf::from("")); - dir.join(KEYPAIRS_FILE) -} - /// Builds a beacon state to be used for testing purposes. /// /// This struct should **never be used for production purposes.** diff --git a/consensus/types/src/utils.rs b/consensus/types/src/utils.rs deleted file mode 100644 index a527fc18f..000000000 --- a/consensus/types/src/utils.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod serde_utils; - -pub use self::serde_utils::*; diff --git a/consensus/types/src/utils/serde_utils.rs b/consensus/types/src/utils/serde_utils.rs deleted file mode 100644 index 36b719646..000000000 --- a/consensus/types/src/utils/serde_utils.rs +++ /dev/null @@ -1,134 +0,0 @@ -use serde::de::Error; -use serde::{Deserialize, Deserializer, Serializer}; - -pub const FORK_BYTES_LEN: usize = 4; -pub const GRAFFITI_BYTES_LEN: usize = 32; - -/// Type for a slice of `GRAFFITI_BYTES_LEN` bytes. -/// -/// Gets included inside each `BeaconBlockBody`. -pub type Graffiti = [u8; GRAFFITI_BYTES_LEN]; - -pub fn u8_from_hex_str<'de, D>(deserializer: D) -> Result<u8, D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - - let start = match s.as_str().get(2..) { - Some(start) => start, - None => return Err(D::Error::custom("string length too small")), - }; - u8::from_str_radix(&start, 16).map_err(D::Error::custom) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. -pub fn u8_to_hex_str<S>(byte: &u8, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex: String = "0x".to_string(); - hex.push_str(&hex::encode(&[*byte])); - - serializer.serialize_str(&hex) -} - -pub fn u32_from_hex_str<'de, D>(deserializer: D) -> Result<u32, D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - - u32::from_str_radix(&start, 16) - .map_err(D::Error::custom) - .map(u32::from_be) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `num` to be a ref. -pub fn u32_to_hex_str<S>(num: &u32, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex: String = "0x".to_string(); - let bytes = num.to_le_bytes(); - hex.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex) -} - -pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let mut array = [0 as u8; FORK_BYTES_LEN]; - - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - let decoded: Vec<u8> = hex::decode(&start).map_err(D::Error::custom)?; - - if decoded.len() != FORK_BYTES_LEN { - return Err(D::Error::custom("Fork length too long")); - } - - for (i, item) in array.iter_mut().enumerate() { - if i > decoded.len() { - break; - } - *item = decoded[i]; - } - Ok(array) -} - -#[allow(clippy::trivially_copy_pass_by_ref)] -pub fn fork_to_hex_str<S>(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn graffiti_to_hex_str<S>(bytes: &Graffiti, serializer: S) -> Result<S::Ok, S::Error> -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn graffiti_from_hex_str<'de, D>(deserializer: D) -> Result<Graffiti, D::Error> -where - D: Deserializer<'de>, -{ - let s: String = Deserialize::deserialize(deserializer)?; - let mut array = Graffiti::default(); - - let start = s - .as_str() - .get(2..) - .ok_or_else(|| D::Error::custom("string length too small"))?; - let decoded: Vec<u8> = hex::decode(&start).map_err(D::Error::custom)?; - - if decoded.len() > GRAFFITI_BYTES_LEN { - return Err(D::Error::custom("Fork length too long")); - } - - for (i, item) in array.iter_mut().enumerate() { - if i > decoded.len() { - break; - } - *item = decoded[i]; - } - Ok(array) -} diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator_subscription.rs new file mode 100644 index 000000000..fd48660c5 --- /dev/null +++ b/consensus/types/src/validator_subscription.rs @@ -0,0 +1,21 @@ +use crate::*; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; + +/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation +/// duties. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct ValidatorSubscription { + /// The validators index. + pub validator_index: u64, + /// The index of the committee within `slot` of which the validator is a member. Used by the + /// beacon node to quickly evaluate the associated `SubnetId`. + pub attestation_committee_index: CommitteeIndex, + /// The slot in which to subscribe. + pub slot: Slot, + /// Committee count at slot to subscribe. + pub committee_count_at_slot: u64, + /// If true, the validator is an aggregator and the beacon node should aggregate attestations + /// for this slot. + pub is_aggregator: bool, +} diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index a9509d7af..c33ea7e79 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -16,6 +16,7 @@ use tree_hash_derive::TreeHash; pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index e1cb1fde3..7461a70b6 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -6,17 +6,17 @@ edition = "2018" [dependencies] eth2_ssz = "0.1.2" -tree_hash = "0.1.0" +tree_hash = "0.1.1" milagro_bls = { git = "https://github.com/sigp/milagro_bls", branch = "paulh" } -rand = "0.7.2" -serde = "1.0.102" -serde_derive = "1.0.102" -serde_hex = { path = "../../consensus/serde_hex" } -hex = "0.3" +rand = "0.7.3" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_utils = { path = "../../consensus/serde_utils" } +hex = "0.4.2" eth2_hashing = "0.1.0" -ethereum-types = "0.9.1" -arbitrary = { version = "0.4.4", features = ["derive"], optional = true } -zeroize = { version = "1.0.0", features = ["zeroize_derive"] } +ethereum-types = "0.9.2" +arbitrary = { version = "0.4.6", features = ["derive"], optional = true } +zeroize = { version = "1.1.1", features = ["zeroize_derive"] } blst = { git = "https://github.com/sigp/blst.git", rev = "284f7059642851c760a09fb1708bcb59c7ca323c" } [features] diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 240b7d188..0517512f8 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -6,7 +6,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::marker::PhantomData; @@ -245,6 +245,23 @@ where impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl<Pub, AggPub, Sig, AggSig> fmt::Display for GenericAggregateSignature<Pub, AggPub, Sig, AggSig> +where + Sig: TSignature<Pub>, + AggSig: TAggregateSignature<Pub, AggPub, Sig>, +{ + impl_display!(); +} + +impl<Pub, AggPub, Sig, AggSig> std::str::FromStr + for GenericAggregateSignature<Pub, AggPub, Sig, AggSig> +where + Sig: TSignature<Pub>, + AggSig: TAggregateSignature<Pub, AggPub, Sig>, +{ + impl_from_str!(); +} + impl<Pub, AggPub, Sig, AggSig> Serialize for GenericAggregateSignature<Pub, AggPub, Sig, AggSig> where Sig: TSignature<Pub>, diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 29814d24a..7b22d2729 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,7 +1,7 @@ use crate::Error; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; @@ -97,6 +97,14 @@ impl<Pub: TPublicKey> TreeHash for GenericPublicKey<Pub> { impl_tree_hash!(PUBLIC_KEY_BYTES_LEN); } +impl<Pub: TPublicKey> fmt::Display for GenericPublicKey<Pub> { + impl_display!(); +} + +impl<Pub: TPublicKey> std::str::FromStr for GenericPublicKey<Pub> { + impl_from_str!(); +} + impl<Pub: TPublicKey> Serialize for GenericPublicKey<Pub> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index beceac1c9..387eb91c9 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -4,7 +4,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; @@ -101,6 +101,16 @@ where Pub: TPublicKey, { fn from(pk: GenericPublicKey<Pub>) -> Self { + Self::from(&pk) + } +} + +/// Serializes the `PublicKey` in compressed form, storing the bytes in the newly created `Self`. +impl<Pub> From<&GenericPublicKey<Pub>> for GenericPublicKeyBytes<Pub> +where + Pub: TPublicKey, +{ + fn from(pk: &GenericPublicKey<Pub>) -> Self { Self { bytes: pk.serialize(), _phantom: PhantomData, @@ -132,6 +142,14 @@ impl<Pub> TreeHash for GenericPublicKeyBytes<Pub> { impl_tree_hash!(PUBLIC_KEY_BYTES_LEN); } +impl<Pub> fmt::Display for GenericPublicKeyBytes<Pub> { + impl_display!(); +} + +impl<Pub> std::str::FromStr for GenericPublicKeyBytes<Pub> { + impl_from_str!(); +} + impl<Pub> Serialize for GenericPublicKeyBytes<Pub> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 28a936195..44250d4a6 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -4,7 +4,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::marker::PhantomData; @@ -149,6 +149,14 @@ impl<PublicKey, T: TSignature<PublicKey>> TreeHash for GenericSignature<PublicKe impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl<PublicKey, T: TSignature<PublicKey>> fmt::Display for GenericSignature<PublicKey, T> { + impl_display!(); +} + +impl<PublicKey, T: TSignature<PublicKey>> std::str::FromStr for GenericSignature<PublicKey, T> { + impl_from_str!(); +} + impl<PublicKey, T: TSignature<PublicKey>> Serialize for GenericSignature<PublicKey, T> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index 1f987ecd3..bc7e7f111 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -5,7 +5,7 @@ use crate::{ }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; @@ -124,6 +124,14 @@ impl<Pub, Sig> TreeHash for GenericSignatureBytes<Pub, Sig> { impl_tree_hash!(SIGNATURE_BYTES_LEN); } +impl<Pub, Sig> fmt::Display for GenericSignatureBytes<Pub, Sig> { + impl_display!(); +} + +impl<Pub, Sig> std::str::FromStr for GenericSignatureBytes<Pub, Sig> { + impl_from_str!(); +} + impl<Pub, Sig> Serialize for GenericSignatureBytes<Pub, Sig> { impl_serde_serialize!(); } diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index 3700c40f7..6a35637b4 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -66,7 +66,7 @@ pub fn verify_signature_sets<'a>( // TODO: remove this `unsafe` code-block once we get a safe option from `blst`. // - // See https://github.com/supranational/blst/issues/13 + // https://github.com/sigp/lighthouse/issues/1720 unsafe { blst::blst_scalar_from_uint64(rand_i.as_mut_ptr(), vals.as_ptr()); rands.push(rand_i.assume_init()); diff --git a/crypto/bls/src/macros.rs b/crypto/bls/src/macros.rs index ca103da6d..136faeb44 100644 --- a/crypto/bls/src/macros.rs +++ b/crypto/bls/src/macros.rs @@ -76,6 +76,35 @@ macro_rules! impl_ssz_decode { }; } +/// Contains the functions required for a `fmt::Display` implementation. +/// +/// Does not include the `Impl` section since it gets very complicated when it comes to generics. +macro_rules! impl_display { + () => { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", hex_encode(self.serialize().to_vec())) + } + }; +} + +/// Contains the functions required for a `fmt::Display` implementation. +/// +/// Does not include the `Impl` section since it gets very complicated when it comes to generics. +macro_rules! impl_from_str { + () => { + type Err = String; + + fn from_str(s: &str) -> Result<Self, Self::Err> { + if s.starts_with("0x") { + let bytes = hex::decode(&s[2..]).map_err(|e| e.to_string())?; + Self::deserialize(&bytes[..]).map_err(|e| format!("{:?}", e)) + } else { + Err("must start with 0x".to_string()) + } + } + }; +} + /// Contains the functions required for a `serde::Serialize` implementation. /// /// Does not include the `Impl` section since it gets very complicated when it comes to generics. @@ -85,7 +114,7 @@ macro_rules! impl_serde_serialize { where S: Serializer, { - serializer.serialize_str(&hex_encode(self.serialize().to_vec())) + serializer.serialize_str(&self.to_string()) } }; } @@ -99,9 +128,25 @@ macro_rules! impl_serde_deserialize { where D: Deserializer<'de>, { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::deserialize(&bytes[..]) - .map_err(|e| serde::de::Error::custom(format!("invalid pubkey ({:?})", e))) + pub struct StringVisitor; + + impl<'de> serde::de::Visitor<'de> for StringVisitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a hex string with 0x prefix") + } + + fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + Ok(value.to_string()) + } + } + + let string = deserializer.deserialize_str(StringVisitor)?; + <Self as std::str::FromStr>::from_str(&string).map_err(serde::de::Error::custom) } }; } diff --git a/crypto/bls/src/zeroize_hash.rs b/crypto/bls/src/zeroize_hash.rs index 3d81df1d8..41136f97a 100644 --- a/crypto/bls/src/zeroize_hash.rs +++ b/crypto/bls/src/zeroize_hash.rs @@ -1,9 +1,11 @@ use super::SECRET_KEY_BYTES_LEN; +use serde_derive::{Deserialize, Serialize}; use zeroize::Zeroize; /// Provides a wrapper around a `[u8; SECRET_KEY_BYTES_LEN]` that implements `Zeroize` on `Drop`. -#[derive(Zeroize)] +#[derive(Zeroize, Serialize, Deserialize)] #[zeroize(drop)] +#[serde(transparent)] pub struct ZeroizeHash([u8; SECRET_KEY_BYTES_LEN]); impl ZeroizeHash { diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml index 6f265b64d..21d1a9e9f 100644 --- a/crypto/eth2_hashing/Cargo.toml +++ b/crypto/eth2_hashing/Cargo.toml @@ -19,7 +19,7 @@ sha2 = "0.9.1" rustc-hex = "2.1.0" [target.'cfg(target_arch = "wasm32")'.dev-dependencies] -wasm-bindgen-test = "0.3.12" +wasm-bindgen-test = "0.3.18" [features] default = ["zero_hash_cache"] diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index 610f7a566..fea5eb67e 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -7,10 +7,10 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -sha2 = "0.9.0" -zeroize = { version = "1.0.0", features = ["zeroize_derive"] } +sha2 = "0.9.1" +zeroize = { version = "1.1.1", features = ["zeroize_derive"] } num-bigint-dig = { version = "0.6.0", features = ["zeroize"] } -ring = "0.16.9" +ring = "0.16.12" bls = { path = "../bls" } [dev-dependencies] diff --git a/crypto/eth2_key_derivation/src/derived_key.rs b/crypto/eth2_key_derivation/src/derived_key.rs index 74dfcfbf3..8ed6c9bd4 100644 --- a/crypto/eth2_key_derivation/src/derived_key.rs +++ b/crypto/eth2_key_derivation/src/derived_key.rs @@ -2,6 +2,7 @@ use crate::{lamport_secret_key::LamportSecretKey, secret_bytes::SecretBytes, Zer use num_bigint_dig::BigUint; use ring::hkdf::{KeyType, Prk, Salt, HKDF_SHA256}; use sha2::{Digest, Sha256}; +use std::convert::TryFrom; use zeroize::Zeroize; /// The byte size of a SHA256 hash. @@ -21,7 +22,7 @@ pub const R: &str = "52435875175126190479447740508185965837690552500527637822603 /// /// In EIP-2333 this value is defined as: /// -/// `ceil((1.5 * ceil(log2(r))) / 8)` +/// `ceil((3 * ceil(log2(r))) / 16)` pub const MOD_R_L: usize = 48; /// A BLS secret key that is derived from some `seed`, or generated as a child from some other @@ -81,9 +82,30 @@ fn derive_child_sk(parent_sk: &[u8], index: u32) -> ZeroizeHash { /// /// Equivalent to `HKDF_mod_r` in EIP-2333. fn hkdf_mod_r(ikm: &[u8]) -> ZeroizeHash { - let prk = hkdf_extract(b"BLS-SIG-KEYGEN-SALT-", ikm); - let okm = &hkdf_expand(prk, MOD_R_L); - mod_r(okm.as_bytes()) + // ikm = ikm + I2OSP(0,1) + let mut ikm_with_postfix = SecretBytes::zero(ikm.len() + 1); + ikm_with_postfix.as_mut_bytes()[..ikm.len()].copy_from_slice(ikm); + + // info = "" + I2OSP(L, 2) + let info = u16::try_from(MOD_R_L) + .expect("MOD_R_L too large") + .to_be_bytes(); + + let mut output = ZeroizeHash::zero(); + let zero_hash = ZeroizeHash::zero(); + + let mut salt = b"BLS-SIG-KEYGEN-SALT-".to_vec(); + while output.as_bytes() == zero_hash.as_bytes() { + let mut hasher = Sha256::new(); + hasher.update(salt.as_slice()); + salt = hasher.finalize().to_vec(); + + let prk = hkdf_extract(&salt, ikm_with_postfix.as_bytes()); + let okm = &hkdf_expand(prk, &info, MOD_R_L); + + output = mod_r(okm.as_bytes()); + } + output } /// Interprets `bytes` as a big-endian integer and returns that integer modulo the order of the @@ -145,7 +167,7 @@ fn parent_sk_to_lamport_pk(ikm: &[u8], index: u32) -> ZeroizeHash { /// Equivalent to `IKM_to_lamport_SK` in EIP-2333. fn ikm_to_lamport_sk(salt: &[u8], ikm: &[u8]) -> LamportSecretKey { let prk = hkdf_extract(salt, ikm); - let okm = hkdf_expand(prk, HASH_SIZE * LAMPORT_ARRAY_SIZE as usize); + let okm = hkdf_expand(prk, &[], HASH_SIZE * LAMPORT_ARRAY_SIZE as usize); LamportSecretKey::from_bytes(okm.as_bytes()) } @@ -159,7 +181,7 @@ fn hkdf_extract(salt: &[u8], ikm: &[u8]) -> Prk { /// Peforms a `HKDF-Expand` on the `pkr` (pseudo-random key), returning `l` bytes. /// /// Defined in [RFC5869](https://tools.ietf.org/html/rfc5869). -fn hkdf_expand(prk: Prk, l: usize) -> SecretBytes { +fn hkdf_expand(prk: Prk, info: &[u8], l: usize) -> SecretBytes { struct ExpandLen(usize); impl KeyType for ExpandLen { @@ -169,7 +191,7 @@ fn hkdf_expand(prk: Prk, l: usize) -> SecretBytes { } let mut okm = SecretBytes::zero(l); - prk.expand(&[], ExpandLen(l)) + prk.expand(&[info], ExpandLen(l)) .expect("expand len is constant and cannot be too large") .fill(okm.as_mut_bytes()) .expect("fill len is constant and cannot be too large"); @@ -307,528 +329,528 @@ mod test { /// Returns the copy-paste values from the spec. fn get_raw_vector() -> RawTestVector { RawTestVector { - seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", - master_sk: - "12513733877922233913083619867448865075222526338446857121953625441395088009793", - child_index: 0, - lamport_0: vec![ - "0x7b4a587eac94d7f56843e718a04965d4832ef826419b4001a3ad0ba77eb44a3b", - "0x90f45a712112122429412921ece5c30eb2a6daf739dc9034fc79424daeb5eff6", - "0xd061c2799de00b2be90eb1cc295f4c31e22d4b45c59a9b9b2554379bea7783cb", - "0x3ad17e4cda2913b5180557fbe7db04b5ba440ce8bb035ae27878d66fbfa50d2c", - "0xf5b954490933ad47f8bf612d4a4f329b3aa8914b1b83d59e15e271e2a087e002", - "0x95d68d505bf4ff3e5149bc5499cf4b2f00686c674a29a8d903f70e569557d867", - "0x1b59c76d9bb2170b220a87833582ede5970d4a336d91c99a812825afe963e056", - "0x4310ff73cfbbf7b81c39ecbf1412da33e9388c1a95d71a75e51fe12256551ceb", - "0xee696343f823e5716e16747f3bbae2fc6de233fe10eea8e45b4579018da0874f", - "0xae12a437aaa7ae59f7d8328944b6a2b973a43565c55d5807dc2faf223a33aa73", - "0x2a3ae0b47f145bab629452661ff7741f111272e33ec571030d0eb222e1ed1390", - "0x1a3ea396e8cbd1d97733ef4753d6840b42c0795d2d693f18e6f0e7b3fff2beb2", - "0x472429d0643c888bfdfe6e6ccfdeee6d345d60c6710859ac29fc289fd3656347", - "0xa32d4d955949b8bed0eb20f586d8fd516d6ddec84fbbc36998d692633c349822", - "0xe5ac8ac5ee1d40e53a7abf36e8269d5d5fce450a87feae8e59f432a44bcc7666", - "0xddf9e497ed78032fbd72d9b8abd5204d81c3475f29afa44cdf1ded8ea72dd1dc", - "0x945c62e88fb1e5f3c15ff57cd5eb1586ee93ec5ec80154c5a9c50241c5adae0a", - "0xc8868b50fc8423c96b7efa1ede4d3203a6b835dbeb6b2ababc58397e6b31d9dd", - "0x66de9bd86b50e2b6a755310520af655759c1753bff34b79a5cd63d6811fc8c65", - "0x5b13786c6068df7735343e5591393bea8aee92ac5826d6132bf4f5ebf1098776", - "0xa2038fc7d8e3cb2eda2bd303cfa76a9e5d8b88293918bec8b2fc03be75684f14", - "0x47a13f6b2308a50eded830fdee7c504bf49d1fe6a95e337b0825d0d77a520129", - "0xb534cdddcf1aa1c6b4cbba46d1db31b766d958e0a0306450bc031d1e3ed79d97", - "0x54aa051b754c31658377f7bff00b7deaa861e74cb12e1eb84216666e19b23d69", - "0x0220d57f63435948818eb376367b113c188e37451c216380f65d1ad55f73f527", - "0xf9dd2e391565534a4db84980433bf5a56250f45fe294fce2679bcf115522c081", - "0x1166591ee2ca59b9f4e525900f085141be8879c66ef18529968babeb87c44814", - "0xf4fa2e8de39bdbeb29b64d8b440d3a6c9a6ca5bdce543877eaee93c11bd70ab8", - "0x07f466d73b93db283b3f7bfaf9c39ae296adc376ab307ef12312631d0926790e", - "0xb2ecff93acb4fa44c1dbf8464b81734a863b6d7142b02f5c008907ea4dc9aaa1", - "0xa1d9c342f6c293ac6ef8b5013cba82c4bad6ed7024d782948cb23cd490039ba1", - "0xc7d04a639ba00517ece4dbc5ef4aaf20e0ccde6e4a24c28936fabe93dec594db", - "0xe3cbb9810472d9dd1cdb5eed2f74b67ea60e973d2d2e897bd64728c9b1aa0679", - "0xe36884703413958ff2aba7a1f138a26d0ac0a371270f0169219beb00a5add5f0", - "0xe5ea300a09895b3f98de5232d92a36d5611cbcf9aaf9e7bb20cf6d1696ad1cb4", - "0xc136cda884e18175ab45148ed4f9d0d1a3c5e11ad0275058e61ae48eb151a81f", - "0x3ee1101e944c040021187e93b6e0beb1048c75fb74f3fdd67756b1c8517a311f", - "0x016964fd6fc32b9ad07a630949596715dee84d78230640368ff0929a280cf3a2", - "0xe33865fc03120b94333bb754fd097dc0f90e69ff6fd221d6aae59fcf2d762d76", - "0xe80bb3515a09ac6ecb4ec59de22701cdf954b1ae8a677fd85508c5b041f28058", - "0x3889af7cd325141ec288021ede136652a0411d20364005b9d3ca9102cb368f57", - "0x18dad0bc975cf8800addd54c7867389d3f7fe1b97d348bd8412a6cbfb75c520a", - "0x09035218686061ee91bd2ad57dc6fb6da7243b8177a153484524b2b228da5314", - "0x688fd7a97551c64eae33f91abb073a46eafbbacd5595c6bac2e57dd536acdfe2", - "0x1fc164dce565a1d0da59cc8048b334cc5eb84bf04de2399ddb847c22a7e32ab7", - "0xa2a340ba05c8a30dd1cab886a926b761758eba0e41b5c4c5dfd4a42f249655c1", - "0xc43dffe01479db836a6a1a74564b297fad0d69c6b06cf593f6db9f26b4f307d5", - "0x73cef7f3ff724a30a79e1dca74cef74954afeefa2e476c4dec65afe50c16c5c4", - "0xa54002253ab7b95cc5b664b3f08976400475cc56f170b939f6792e730ff5170b", - "0x9ade43053d41afebc002f09476dffd1b13ecbf67f810791540b92ca56d5e63e4", - "0x234e7cbfbe45b22a871db26738fa05de09213a925439d7f3e5108132e521b280", - "0x066b712417332c7cfca871fb1bb5839f0341acf9266229603a3eddbc8a93b59f", - "0xb5857acdcf636330da2cfcc99c81d9fdbd20c506a3c0e4f4f6a139d2a64f051c", - "0xe119908a150a49704b6bbba2c470cd619a0ae10dd9736e8d491890e3c8509fff", - "0xb8a5c5dbb51e6cb73cca95b4ad63ea3c7399cd16b05ab6261535495b3af2ca51", - "0x05624a1d4d2d2a31160bc48a6314bbf13eaddf56cddb0f0aa4ed3fb87f8b479f", - "0x483daceff1c3baa0ed0f3be7e534eebf5f4aed424ecd804edfbf5c56b3476b50", - "0x424d04694e7ae673707c77eb1c6d0996d250cfab6832ee3506a12e0384a3c5c9", - "0xa11fed0ed8057966bfe7136a15a814d06a516fbc9d44aeef87c509137a26190e", - "0x3694d22d1bc64658f3adbe2cc9f1716aee889066e0950e0b7a2fd576ed36bb76", - "0x49a13000a87f39f93d0ae9c3a4cfccbf440c0a75cce4c9d70dac627b6d6958b3", - "0xb3ff0cdd878d5ac1cb12e7d0b300d649fdd008800d498ae4f9fbf9510c74249a", - "0xe52a867cfb87d2fe7102d23d8d64925f7b75ca3f7d6bb763f7337352c255e0be", - "0x6513b372e4e557cca59979e48ec27620e9d7cdb238fcf4a9f19c3ba502963be0", - "0x9f69d82d4d51736902a987c8b5c30c2b25a895f2af5d2c846667ff6768bcc774", - "0x049a220dbe3340749f94643a429cb3cba3c92b561dc756a733d652d838728ab3", - "0x4fa2cd877aa115b476082b11053309f3537fa03d9158085f5f3f4bab6083e6da", - "0xed12db4069eb9f347735816afcee3fe43d4a6999fef8240b91bf4b05447d734f", - "0x3ecbe5eda469278f68548c450836a05cc500864664c7dda9b7526f084a891032", - "0x690d8f928fc61949c22e18cceaa2a446f8e1b65bd2e7af9e0a8e8284134ab3d2", - "0x99e09167a09f8261e7e8571d19148b7d7a75990d0702d9d582a2e4a96ac34f8e", - "0x6d33931693ed7c2e1d080b6a37da52c279a06cec5f534305819f7adf7db0afe3", - "0xc4b735462a9a656e28a52b1d4992ea9dea826b858971d698453a4be534d6bb70", - "0xedf92b10302dc41f8d362b360f4c2ef551d50e2ded012312c964002d2afc46d7", - "0x58f6691cca081ae5c3661dd171b87cc49c90359bb03cc0e57e503f7fcf14aefc", - "0x5d29b8b4ee295a73c4a8618927b3d14b76c7da049133a2257192b10be8c17a6a", - "0x646802fa42801e0ae24011fb4f62e87219ef1da01f7fc14bf8d6bd2d9e7c21f1", - "0x23abf45eee65cc4c1e95ccab42ad280a00bb3b14d243e2021a684075f900141e", - "0x2b1ae95c975bf9c387eae506fdb5e58afd2d198f00a21cd3fddb5855e8021e4d", - "0x0ef9f6e1c0583493d343e75f9c0c557fa6da0dc12b17a96c5757292916b72ee3", - "0x04c7fc76195c64a3285af14161077c045ff6ddbb67c0ff91b080f98eb6781e5c", - "0xba12679b97027d0e7076e6d19086c07792eaa7f78350842fbef8ddf5bcd3ecc0", - "0xcead458e6799df4d2f6cbf7f13cb3afec3441a354816e3071856ed49cbdbb1a7", - "0xbe6c56256556bb5c6727a1d9cb641d969677f56bb5ad7f8f7a7c9cfd128427b4", - "0xc80f11963ff40cb1888054b83c0463d32f737f2e7d42098e639023db0dfc84d4", - "0xac80006c1296bcfde86697efebb87fb0fddfb70dd34dd2ee4c152482af4687eb", - "0xbb7d13ce184249df4576fc3d13351e1683500e48726cd4198423f14f9094068b", - "0x1b2d9c40c55bd7362664fa46c1268e094d56c8193e3d991c08dc7a6e4ca14fa1", - "0x9bd236254d0565f5b2d24552d4b4d732de43b0adaa64ecd8be3efc6508577591", - "0x38078cefccc04e8312d79e0636e0e3157434c50a2ad4e3e87cc6584c41eec8b5", - "0xb5d15a8527ff3fa254ba61ffceb02d2570b53361894f351a9e839c0bb716857d", - "0x6763dad684bf2e914f40ae0a7ee0cdf12c97f41fc05a485d5991b4daad21a3f8", - "0xc80363c20df589333ecbe05bd5f2c19942ebc2593626dc50d00835c40fb8d005", - "0x48502b56ae93acd2794f847cbe825525d5d5f59f0f75c67aff84e5338776b3af", - "0xfd8e033493ba8af264a855a78ab07f37d936351d2879b95928909ed8df1b4f91", - "0x11f75bee9eac7356e65ebc7f004ccdc1da80807380d69143293d1421f50b1c97", - "0x903a88a3ebe84ca1c52a752b1faffa9ca1daedac9cbf1aa70942efc9beb44b79", - "0x2c0dcd68837f32a69da651045ad836b8cd6b48f2c8c5d73a3bd3bba6148d345a", - "0x0aa0f49b3476f3fdb6393f2ab601e0009586090b72ee54a525734f51598960d5", - "0xf7a789f013f702731656c562caa15b04cb7c9957376c4d80b8839167bb7fa626", - "0x4e0be1b19e305d82db3fd8affd67b0d2559da3edbfb08d19632a5cc46a90ed07", - "0x3caaccfc546d84d543eaf4f4c50c9c8fd831c12a8de56fdb9dfd04cc082882fe", - "0x894f6a01fd34f0642077e22981752011678548eb70eb55e8072c1caffc16fe02", - "0xae7eb54adaa68679348ea3537a49be669d1d61001fbab9fac259ba727dbc9a1a", - "0x291a1cbdceff957b5a65440ab67fb8672de881230fe3108a15ca487c2662c2c7", - "0x891d43b867137bf8beb9df4da2d951b5984a266a8cd74ec1593801d005f83f08", - "0xc558407f6491b37a10835e0ad7ce74f4e368aa49157a28873f7229310cb2d7fd", - "0x9ce061b0a072e1fe645f3479dac089b5bfb78cfa6cfbe5fd603bcdb504711315", - "0xa8e30d07b09275115dd96472ecf9bc316581caf307735176ca226d4cd9022925", - "0x918ee6d2efba7757266577691203f973cf4f4cac10f7d5f86acd2a797ff66583", - "0xfa31ba95e15d1635d087522f3d0da9cf7acac4ed6d0ac672654032a3c39244a6", - "0xf2952b58f015d6733af06938cd1f82fbddb3b796823bee7a3dbffa04efc117c2", - "0x46f8f742d3683de010ede528128d1181e8819f4252474f51371a177bfa518fa4", - "0x4ca1cc80094f2910cf83a9e65ad70e234690ffb9142793911ec7cf71663545b3", - "0x381965037b5725c71bfa6989d4c432f6611de8e8ec387f3cfc0dcb1a15191b73", - "0x2562b88ed3b86ba188be056805a3b7a47cb1a3f630d0e2f39647b0792ec6b7d8", - "0x565f6d14e7f22724f06d40f54465ad40d265b6de072b34a09d6e37a97a118cd8", - "0xc2982c861ad3278063b4a5f584eaf866db684cc4e712d64230fc9ee33bb4253b", - "0xfd806c91927e549d8d400ab7aa68dbe60af988fbabf228483ab0c8de7dab7eee", - "0xafae6ff16c168a3a3b5c2f1742d3f89fa4777c4bd0108f174014debf8f4d629c", - "0xaf5a4be694de5e53632be9f1a49bd582bf76002259460719197079c8c4be7e66", - "0xa8df4a4b4c5bf7a4498a11186f8bb7679137395f28e5c2179589e1c1f26504b5", - "0xce8b77c64c646bb6023f3efaed21ca2e928e21517422b124362cf8f4d9667405", - "0x62e67a8c423bc6c6c73e6cd8939c5c1b110f1a38b2ab75566988823762087693", - "0x7e778f29937daaa272d06c62d6bf3c9c0112d45a3df1689c602d828b5a315a9f", - "0xe9b5abd46c2377e602ff329050afa08afe152f4b0861db8a887be910ff1570bf", - "0xa267b1b2ccd5d96ae8a916b0316f06fafb886b3bb41286b20763a656e3ca0052", - "0xb8ed85a67a64b3453888a10dedf4705bd27719664deff0996a51bb82bc07194f", - "0x57907c3c88848f9e27bc21dd8e7b9d61de48765f64d0e943e7a6bb94cc2021ab", - "0xd2f6f1141a3b76bf9bf581d49091142944c7f9f323578f5bdd5522ba32291243", - "0xc89f104200ed4c5d5f7046d99e68ae6f8ec31e2eeceb568eb05087e3aa546a74", - "0xc9f367fae45c39299693b134229bb6dd0da112fd1a7d19b7f4772c01e5cbe479", - "0x64e2d4ad51948764dd578d26357e29e8e4d076d65c05cffdf8211b624fefe9ac", - "0xf9a9b4e6d5be7fc051df8ecd9c389d16b1af86c749308e6a23f7ff4871f0ba9a", - "0x0d2b2a228b86ebf9499e1bf7674335087ced2eb35ce0eb90954a0f75751a2bf4", - "0xff8531b45420a960d6e48ca75d77758c25733abde83cd4a6160beae978aa735e", - "0xd6d412bd1cb96a2b568d30e7986b7e8994ca92fd65756a758295499e11ea52b6", - "0xad8533fccbecdd4a0b00d648bfe992360d265f7be70c41d9631cefad5d4fe2f6", - "0x31fbf2afb8d5cc896d517cfc5201ee24527e8d283f9c37ca10233bef01000a20", - "0x2fd67b7365efc258131eb410f46bf3b1cbd3e9c76fd6e9c3e86c9ff1054116ff", - "0xab6aa29f33d18244be26b23abadb39679a8aa56dafc0dd7b87b672df5f5f5db6", - "0xbad3b0f401ca0a53a3d465de5cecd57769ec9d4df2c04b78f8c342a7ed35bbee", - "0xbdc24d46e471835d83ce8c5b9ecbe675aab2fd8f7831c548e8efd268c2ee2232", - "0x87265fabd7397d08f0729f13a2f3a25bbc8c874b6b50f65715c92b62f665f925", - "0xa379fd268e7ff392c067c2dd823996f72714bf3f936d5eeded71298859f834cb", - "0xf3ab452c9599ebfbb234f72a86f3062aed12ae1f634abbe542ff60f5cefc1fcf", - "0x2b17ebb053a3034c07da36ed2ba42c25ad8e61dec87b5527f5e1c755eb55405a", - "0x305b40321bd67bf48bfd121ee4d5d347268578bd4b8344560046594771a11129", - "0xe7029c9bea020770d77fe06ca53b521b180ad6a9e747545aadc1c74beef7241c", - "0xabc357cec0f4351a5ada22483d3b103890392f8d8f9cb8073a61969ed1be4e08", - "0x97f88c301946508428044d05584dc41af2e6a0de946de7d7f5269c05468afe20", - "0xbdc08fe8d6f9a05ad8350626b622ad8eec80c52331d154a3860c98676719cfbd", - "0x161590fc9f7fcf4eaba2f950cf588e6da79e921f139d3c2d7ebe017003a4799e", - "0x91b658db75bc3d1954bfde2ef4bc12980ff1688e09d0537f170c9ab47c162320", - "0x76d995f121406a63ce26502e7ec2b653c221cda357694a8d53897a99e6ce731e", - "0x3d6b2009586aceb7232c01259bb9428523c02b0f42c2100ec0d392418260c403", - "0x14ca74ecbc8ec0c67444c6cb661a2bce907aa2a1453b11f16002b815b94a1c49", - "0x553b4dc88554ebe7b0a3bd0813104fd1165a1f950ceace11f5841aa74b756d85", - "0x4025bf4ad86751a156d447ce3cabafde9b688efcdafd8aa4be69e670f8a06d9e", - "0x74260cf266997d19225e9a0351a9acfa17471fccdf5edc9ccc3bb0d23ef551c5", - "0xf9dbca3e16d234e448cf03877746baeb62a8a25c261eff42498b1813565c752a", - "0x2652ec98e05c1b6920fb6ddc3b57e366d514ffa4b35d068f73b5603c47f68f2f", - "0x83f090efeb36db91eb3d4dfbb17335c733fce7c64317d0d3324d7caaaf880af5", - "0x1e86257f1151fb7022ed9ed00fb961a9a9989e58791fb72043bb63ed0811791c", - "0xd59e4dcc97cba88a48c2a9a2b29f79125099a39f74f4fb418547de8389cd5d15", - "0x875a19b152fe1eb3fe1de288fa9a84864a84a79bac30b1dbd70587b519a9770e", - "0x9c9dc2d3c8f2f6814cfc61b42ee0852bbaf3f523e0409dd5df3081b750a5b301", - "0xf6f7f81c51581c2e5861a00b66c476862424151dd750efeb20b7663d552a2e94", - "0x723fcb7ca43a42483b31443d4be9b756b34927176f91a391c71d0b774c73a299", - "0x2b02d8acf63bc8f528706ed4d5463a58e9428d5b71d577fd5daa13ba48ac56cf", - "0x2ff6911f574c0f0498fc6199da129446b40fca35ccbf362bc76534ba71c7ca22", - "0x1ef4b959b11bc87b11e4a5f84b4d757c6bdcfad874acec9a6c9eee23dc4bbe1b", - "0x68e2df9f512be9f64b7e3a2dee462149dac50780073d78b569a20256aea5f751", - "0xd1a3682e12b90ae1eab27fc5dc2aef3b8e4dbb813925e9a91e58d6c9832767b6", - "0x75778ccc102d98c5e0b4b83f7d4ef7fe8bc7263cc3317723001cb0b314d1e9e8", - "0xc7f44e2cead108dc167f0036ac8a278d3549cc3dd5cc067d074ccad9b1d9f8d4", - "0x4cba0223c5df2796b0ee9fbc084d69f10e6aedda8f0cf86171bebb156ede676c", - "0x628deda825661f586a5713e43c806fdd55e1a53fbe90a4ddb5f3786570740954", - "0xfc82a253bc7e0ac96252b238fbb411a54e0adf78d089f804a7fc83a4959b401e", - "0x72a6491f5daae0ceb85b61a5ed69009dd2a167c64cb35cabf38b846e27268e9d", - "0xee139a913d4fcf25ba54bb36fc8051b91f2ec73ba820cc193c46fb2f7c37a106", - "0x7f75021f2b1d0c78859478e27f6f40646b5776c060f1a5f6f0944c840a0121f8", - "0x5b60a1b78feca1d2602ac8110d263ad6b3663cbf49e6bdc1077b4b80af2feb6f", - "0xd61f15d80b1e88469b6a76ed6a6a2b94143b6acc3bd717357264818f9f2d5c6d", - "0xea85da1780b3879a4d81b685ba40b91c060866abd5080b30fbbb41730724a7dd", - "0xb9b9da9461e83153f3ae0af59fbd61febfde39eb6ac72db5ed014797495d4c26", - "0xf737762fe8665df8475ff341b3762aaeb90e52974fe5612f5efd0fc1c409d7f8", - "0xaaa25d934a1d5aa6b2a1863704d7a7f04794ed210883582c1f798be5ca046cf7", - "0x932f46d0b6444145221b647f9d3801b6cb8b1450a1a531a959abdaacf2b5656b", - "0xf4a8b0e52f843ad27635c4f5a467fbf98ba06ba9a2b93a8a97170b5c41bf4958", - "0x196ed380785ee2925307ec904161dc02a4596a55499e5b0a3897f95485b3e74a", - "0x772e829a405219e4f8cd93a1ef15c250be85c828c1e29ef6b3f7b46958a85b44", - "0xd66cfc9af9941515d788f9f5e3b56fddb92464173ddb67b83bf265e7ea502170", - "0xf5b040bfc246425278e2423b1953d8ad518de911cf04d16c67d8580a09f90e62", - "0xd2d18b2ae8a53dde14b4000e5e7e414505825f50401a3797dd8820cf510dc448", - "0xc01dcc064e644266739cd0ec7edf92fc2ef8e92e0beedf0e8aa30efcff1644fe", - "0x24720d325913ba137daf031924ad3bfaa1c8c00a53a2d048fe5667aef45efce3", - "0x70a24e1c89b3ea78d76ef458d498dcb5b8561d484853b2a8b2adcd61869857df", - "0x0ff3313997f14e1b1dcd80f1d62c58aaefb19efd7c0ea15dde21aa4e2a516e80", - "0x960c1f50062a4df851638f42c0259b6e0a0217300884f13a3c5c8d94adb34f21", - "0xb71ca7cc8578149da556131268f4625b51620dfc3a6e9fbd47f5df03afbd410e", - "0xa1a3eeec0addec7b9e15f416a07608a1b5d94f0b42d5c203b8ced03a07484f5b", - "0xa4bb8b059aa122ca4652115b83b17af80cfbea0d3e1e8979a396a667f94e85f3", - "0x31c4d2f252167fe2a4d41944224a80b2f1afaf76f8dd6a3d52d71751849e44bb", - "0x79642dd6a255f96c9efe569304d58c327a441448db0431aa81fe072d0d359b52", - "0x42a4b504714aba1b67defe9458fff0c8cb1f216dcab28263cef67a65693b2036", - "0xe3d2f6a9d882d0f026ef316940dfcbf131342060ea28944475fe1f56392c9ad2", - "0x986af9aeff236394a0afa83823e643e76f7624e9bfd47d5468f9b83758a86caa", - "0xafe2de6ede50ee351d63ed38d1f2ae5203174c731f41bbed95db467461ad5492", - "0x9ad40f0785fe1c8a5e4c3342b3c91987cd47a862ece6573674b52fa0456f697a", - "0xde4cde6d0fc6def3a89b79da0e01accdbec049f1c9471d13a5d59286bd679af1", - "0xecd0d1f70116d6b3ae21c57fb06ad90eed33d040e2c5c3d12714b3be934fa5ce", - "0x3c53c5bf2d1b1d4038e1f0e8a2e6d12e0d4613d5cd12562578b6909921224c10", - "0x36087382b37e9e306642cc6e867e0fb2971b6b2b28b6caf2f9c96b790e8db70a", - "0xa957496d6a4218a19998f90282d05bd93e6baabf55e55e8a5f74a933a4dec045", - "0x077d6f094e8467a21f02c67753565ec5755156015d4e86f1f82a22f9cf21c869", - "0x12dd3b1f29e1462ca392c12388a77c58044151154cf86f23873f92a99b6bb762", - "0x7fdbcdedcc02ecf16657792bd8ef4fa4adeee497f30207d4cc060eb0d528b26b", - "0x245554b12bf8edf9e9732d6e2fa50958376e355cb695515c94676e64c6e97009", - "0xccd3b1841b517f7853e35f85471710777e437a8665e352a0b61c7d7083c3babc", - "0xd970545a326dcd92e31310d1fdce3703dff8ef7c0f3411dfa74fab8b4b0763ac", - "0xd24163068918e2783f9e79c8f2dcc1c5ebac7796ce63070c364837aac91ee239", - "0x256a330055357e20691e53ca5be846507c2f02cfde09cafb5809106f0af9180e", - "0xfa446a5d1876c2051811af2a341a35dbcd3f7f8e2e4f816f501139d27dd7cd82", - "0xbafbc7a8f871d95736a41e5721605d37e7532e41eb1426897e33a72ed2f0bf1d", - "0x8055af9a105b6cf17cfeb3f5320e7dab1a6480500ff03a16c437dfec0724c290", - "0x1de6ee3e989497c1cc7ca1d16b7b01b2f336524aa2f75a823eaa1716c3a1a294", - "0x12bb9508d646dda515745d104199f71276d188b3e164083ad27dfdcdc68e290b", - "0x7ea9f9939ad4f3b44fe7b780e0587da4417c34459b2996b3a449bb5b3ff8c8cb", - "0xa88d2f8f35bc669aa6480ce82571df65fea366834670b4084910c7bb6a735dde", - "0x9486e045adb387a550b3c7a603c30e07ed8625d322d1158f4c424d30befe4a65", - "0xb283a70ba539fe1945be096cb90edb993fac77e8bf53616bde35cdcaa04ab732", - "0xab39a81558e9309831a2caf03e9df22e8233e20b1769f16e613debcdb8e2610f", - "0x1fc12540473fbbad97c08770c41f517ce19dc7106aa2be2e9b77867046627509", - "0xec33dbec9d655c4c581e07d1c40a587cf3217bc8168a81521b2d0021bd0ec133", - "0xc8699e3b41846bc291209bbb9c06f565f66c6ccecbf03ebc27593e798c21fe94", - "0x240d7eae209c19d453b666c669190db22db06279386aa30710b6edb885f6df94", - "0xb181c07071a750fc7638dd67e868dddbeeee8e8e0dcbc862539ee2084674a89e", - "0xb8792555c891b3cbfddda308749122a105938a80909c2013637289e115429625", - "0xfe3e9e5b4a5271d19a569fee6faee31814e55f156ba843b6e8f8dc439d60e67a", - "0x912e9ba3b996717f89d58f1e64243d9cca133614394e6ae776e2936cf1a9a859", - "0xa0671c91a21fdfd50e877afa9fe3974aa3913855a2a478ae2c242bcdb71c73d7", - "0x5b55d171b346db9ba27b67105b2b4800ca5ba06931ed6bd1bafb89d31e6472e6", - "0x68438458f1af7bd0103ef33f8bc5853fa857b8c1f84b843882d8c328c595940d", - "0x21fe319fe8c08c1d00f977d33d4a6f18aecaa1fc7855b157b653d2d3cbd8357f", - "0x23cce560bc31f68e699ece60f21dd7951c53c292b3f5522b9683eb2b3c85fc53", - "0x917fa32d172c352e5a77ac079df84401cdd960110c93aa9df51046d1525a9b49", - "0x3fc397180b65585305b88fe500f2ec17bc4dccb2ec254dbb72ffb40979f14641", - "0xf35fb569e7a78a1443b673251ac70384abea7f92432953ca9c0f31c356be9bd9", - "0x7955afa3cd34deb909cd031415e1079f44b76f3d6b0aaf772088445aaff77d08", - "0x45c0ca029356bf6ecfc845065054c06024977786b6fbfaea74b773d9b26f0e6c", - "0xe5c1dac2a6181f7c46ab77f2e99a719504cb1f3e3c89d720428d019cb142c156", - "0x677b0e575afcccf9ddefc9470e96a6cfff155e626600b660247b7121b17b030a", - "0xbeed763e9a38277efe57b834a946d05964844b1f51dba2c92a5f3b8d0b7c67d0", - "0x962b17ed1a9343d8ebfae3873162eef13734985f528ca06c90b0c1e68adfdd89", - ], - lamport_1: vec![ - "0xb3a3a79f061862f46825c00fec4005fb8c8c3462a1eb0416d0ebe9028436d3a9", - "0x6692676ce3b07f4c5ad4c67dc2cf1dfa784043a0e95dd6965e59dc00b9eaff2d", - "0xbf7b849feb312db230e6e2383681b9e35c064e2d037cbc3c9cc9cd49220e80c9", - "0xa54e391dd3b717ea818f5954eec17b4a393a12830e28fabd62cbcecf509c17dc", - "0x8d26d800ac3d4453c211ef35e9e5bb23d3b9ede74f26c1c417d6549c3110314d", - "0xbb8153e24a52398d92480553236850974576876c7da561651bc551498f184d10", - "0x0d30e0e203dc4197f01f0c1aba409321fbf94ec7216e47ab89a66fb45e295eff", - "0x01dc81417e36e527776bf37a3f9d74a4cf01a7fb8e1f407f6bd525743865791d", - "0xa6318e8a57bec438245a6834f44eb9b7fb77def1554d137ea12320fc572f42c9", - "0xd25db9df4575b595130b6159a2e8040d3879c1d877743d960bf9aa88363fbf9f", - "0x61bb8baeb2b92a4f47bb2c8569a1c68df31b3469e634d5e74221bc7065f07a96", - "0xb18962aee4db140c237c24fec7fd073b400b2e56b0d503f8bc74a9114bf183bf", - "0x205473cc0cdab4c8d0c6aeceda9262c225b9db2b7033babfe48b7e919751a2c6", - "0xc5aa7df7552e5bb17a08497b82d8b119f93463ccb67282960aee306e0787f228", - "0x36da99e7d38ce6d7eab90ea109ba26615ad75233f65b3ae5056fba79c0c6682a", - "0xd68b71bba6266b68aec0df39b7c2311e54d46a3eab35f07a9fe60d70f52eec58", - "0xbbe56f1274ada484277add5cb8c90ef687d0b69a4c95da29e32730d90a2d059f", - "0x0982d1d1c15a560339d9151dae5c05e995647624261022bbedce5dce8a220a31", - "0x8ef54ad546d2c6144fc26e1e2ef92919c676d7a76cfdfb5c6a64f09a54e82e71", - "0x1e3ac0133eef9cdbeb590f14685ce86180d02b0eea3ef600fd515c38992b1f26", - "0x642e6b1c4bec3d4ba0ff2f15fbd69dcb57e4ba8785582e1bc2b452f0c139b590", - "0xca713c8cf4afa9c5d0c2db4fc684a8a233b3b01c219b577f0a053548bedf8201", - "0xd0569ba4e1f6c02c69018b9877d6a409659cb5e0aa086df107c2cc57aaba62da", - "0x4ebe68755e14b74973e7f0fa374b87cee9c370439318f5783c734f00bb13e4b5", - "0x788b5292dc5295ae4d0ea0be345034af97a61eec206fda885bbc0f049678c574", - "0x0ebd88acd4ae195d1d3982038ced5af1b6f32a07349cf7fffbff3ce410c10df2", - "0xc7faf0a49234d149036c151381d38427b74bae9bd1601fc71663e603bc15a690", - "0xc5247bf09ebe9fa4e1013240a1f88c703f25a1437196c71ee02ca3033a61f946", - "0x719f8c68113d9f9118b4281e1f42c16060def3e3eeef15f0a10620e886dc988f", - "0x28da4f8d9051a8b4d6158503402bdb6c49ba2fb1174344f97b569c8f640504e6", - "0x96f6773576af69f7888b40b0a15bc18cc9ec8ca5e1bb88a5de58795c6ddf678e", - "0x8d80d188a4e7b85607deccf654a58616b6607a0299dd8c3f1165c453fd33d2e4", - "0x9c08dcc4f914486d33aa24d10b89fd0aabcc635aa2f1715dfb1a18bf4e66692a", - "0x0ff7045b5f6584cc22c140f064dec0692762aa7b9dfa1defc7535e9a76a83e35", - "0x8e2dae66fa93857b39929b8fc531a230a7cfdd2c449f9f52675ab5b5176461d5", - "0xf449017c5d429f9a671d9cc6983aafd0c70dd39b26a142a1d7f0773de091ac41", - "0xed3d4cab2d44fec0d5125a97b3e365a77620db671ecdda1b3c429048e2ebdae6", - "0x836a332a84ee2f4f5bf24697df79ed4680b4f3a9d87c50665f46edaeed309144", - "0x7a79278754a4788e5c1cf3b9145edb55a2ba0428ac1c867912b5406bb7c4ce96", - "0x51e6e2ba81958328b38fd0f052208178cec82a9c9abd403311234e93aff7fa70", - "0x217ec3ec7021599e4f34410d2c14a8552fff0bc8f6894ebb52ec79bf6ec80dc9", - "0x8a95bf197d8e359edabab1a77f5a6d04851263352aa46830f287d4e0564f0be0", - "0x60d0cbfb87340b7c92831872b48997ce715da91c576296df215070c6c20046d4", - "0x1739fbca476c540d081b3f699a97387b68af5d14be52a0768d5185bc9b26961b", - "0xac277974f945a02d89a0f8275e02de9353e960e319879a4ef137676b537a7240", - "0x959b7640821904ba10efe8561e442fbdf137ccb030aee7472d10095223e320ba", - "0xdba61c8785a64cb332342ab0510126c92a7d61f6a8178c5860d018d3dad571c6", - "0xc191fb6a92eb1f1fb9e7eb2bdecd7ec3b2380dd79c3198b3620ea00968f2bd74", - "0x16ef4e88e182dfc03e17dc9efaa4a9fbf4ff8cb143304a4a7a9c75d306729832", - "0x39080e4124ca577ff2718dfbcb3415a4220c5a7a4108729e0d87bd05adda5970", - "0xa29a740eef233956baff06e5b11c90ed7500d7947bada6da1c6b5d9336fc37b6", - "0x7fda7050e6be2675251d35376bacc895813620d245397ab57812391d503716ee", - "0x401e0bf36af9992deb87efb6a64aaf0a4bc9f5ad7b9241456b3d5cd650418337", - "0x814e70c57410e62593ebc351fdeb91522fe011db310fcf07e54ac3f6fefe6be5", - "0x03c1e52ecbef0d79a4682af142f012dc6b037a51f972a284fc7973b1b2c66dcf", - "0x57b22fb091447c279f8d47bdcc6a801a946ce78339e8cd2665423dfcdd58c671", - "0x53aeb39ab6d7d4375dc4880985233cba6a1be144289e13cf0bd04c203257d51b", - "0x795e5d1af4becbca66c8f1a2e751dcc8e15d7055b6fc09d0e053fa026f16f48f", - "0x1cd02dcd183103796f7961add835a7ad0ba636842f412643967c58fe9545bee4", - "0x55fc1550be9abf92cacb630acf58bad11bf734114ebe502978a261cc38a4dd70", - "0x6a044e0ea5c361d3fb2ca1ba795301e7eb63db4e8a0314638f42e358ea9cfc3e", - "0x57d9f15d4db199cbcb7cbd6524c52a1b799d52b0277b5a270d2985fcee1e2acb", - "0x66c78c412e586bd01febc3e4d909cc278134e74d51d6f60e0a55b35df6fb5b09", - "0x1076799e15a49d6b15c2486032f5e0b50f43c11bc076c401e0779d224e33f6fc", - "0x5f70e3a2714d8b4483cf3155865ba792197e957f5b3a6234e4c408bf2e55119d", - "0x9b105b0f89a05eb1ff7caed74cf9573dc55ac8bc4881529487b3700f5842de16", - "0x1753571b3cfadca4277c59aee89f607d1b1e3a6aa515d9051bafb2f0d8ce0daa", - "0x4014fff940b0950706926a19906a370ccbd652836dab678c82c539c00989201a", - "0x0423fa59ee58035a0beb9653841036101b2d5903ddeabddabf697dbc6f168e61", - "0x78f6781673d991f9138aa1f5142214232d6e3d6986acb6cc7fb000e1a055f425", - "0x21b8a1f6733b5762499bf2de90c9ef06af1c6c8b3ddb3a04cce949caad723197", - "0x83847957e909153312b5bd9a1a37db0bd6c72a417024a69df3e18512973a18b4", - "0x948addf423afd0c813647cfe32725bc55773167d5065539e6a3b50e6ebbdab38", - "0x0b0485d1bec07504a2e5e3a89addd6f25d497cd37a0c04bc38355f8bdb01cd48", - "0x31be8bda5143d39ea2655e9eca6a294791ca7854a829904d8574bedc5057ddc4", - "0x16a0d2d657fadce0d81264320e42e504f4d39b931dff9888f861f3cc78753f99", - "0xb43786061420c5231bf1ff638cb210f89bf4cd2d3e8bafbf34f497c9a298a13b", - "0x1f5986cbd7107d2a3cbc1826ec6908d976addbf9ae78f647c1d159cd5397e1bd", - "0xa883ccdbfd91fad436be7a4e2e74b7796c0aadfe03b7eea036d492eaf74a1a6f", - "0x5bc9eb77bbbf589db48bca436360d5fc1d74b9195237f11946349951f2a9f7f6", - "0xb6bc86de74a887a5dceb012d58c62399897141cbcc51bad9cb882f53991f499c", - "0xa6c3260e7c2dd13f26cf22bf4cd667688142ff7a3511ec895bc8f92ebfa694b6", - "0xb97da27e17d26608ef3607d83634d6e55736af10cc7e4744940a3e35d926c2ad", - "0x9df44067c2dc947c2f8e07ecc90ba54db11eac891569061a8a8821f8f9773694", - "0x865cc98e373800825e2b5ead6c21ac9112ff25a0dc2ab0ed61b16dc30a4a7cd7", - "0xe06a5b157570c5e010a52f332cacd4e131b7aed9555a5f4b5a1c9c4606caca75", - "0x824eccb5cf079b5943c4d17771d7f77555a964a106245607cedac33b7a14922e", - "0xe86f721d7a3b52524057862547fc72de58d88728868f395887057153bccaa566", - "0x3344e76d79f019459188344fb1744c93565c7a35799621d7f4505f5b6119ac82", - "0x401b3589bdd1b0407854565329e3f22251657912e27e1fb2d978bf41c435c3ac", - "0xb12fd0b2567eb14a562e710a6e46eef5e280187bf1411f5573bb86ecbe05e328", - "0xe6dc27bab027cbd9fbb5d80054a3f25b576bd0b4902527a0fc6d0de0e45a3f9f", - "0x1de222f0e731001c60518fc8d2be7d7a48cc84e0570f03516c70975fdf7dc882", - "0xb8ff6563e719fc182e15bbe678cf045696711244aacc7ce4833c72d2d108b1b9", - "0x53e28ac2df219bcbbc9b90272e623d3f6ca3221e57113023064426eff0e2f4f2", - "0x8a4e0776f03819e1f35b3325f20f793d026ccae9a769d6e0f987466e00bd1ce7", - "0x2f65f20089a31f79c2c0ce668991f4440b576ecf05776c1f6abea5e9b14b570f", - "0x448e124079a48f62d0d79b96d5ed1ffb86610561b10d5c4236280b01f8f1f406", - "0x419b34eca1440c847f7bff9e948c9913075d8e13c270e67f64380a3f31de9bb2", - "0x2f6e4fee667acaa81ba8e51172b8329ed936d57e9756fb31f635632dbc2709b7", - "0xdd5afc79e8540fcee6a896c43887bd59c9de5d61b3d1b86539faeb41a14b251d", - "0xc707bed926a46cc451a6b05e642b6098368dbdbf14528c4c28733d5d005af516", - "0x153e850b606eb8a05eacecc04db4b560d007305e664bbfe01595cb69d26b8597", - "0x1b91cc07570c812bb329d025e85ef520132981337d7ffc3d84003f81a90bf7a7", - "0x4ca32e77a12951a95356ca348639ebc451170280d979e91b13316844f65ed42a", - "0xe49ea1998e360bd68771bd69c3cd4cf406b41ccca4386378bec66ea210c40084", - "0x01aaffbde1a672d253e0e317603c2dc1d0f752100d9e853f840bca96e57f314c", - "0x170d0befcbbaafb317c8684213a4989368332f66e889824cc4becf148f808146", - "0x56f973308edf5732a60aa3e7899ae1162c7a2c7b528c3315237e20f9125b34e0", - "0x66c54fd5f6d480cab0640e9f3ec1a4eafbafc0501528f57bb0d5c78fd03068ef", - "0xaca6c83f665c64d76fbc4858da9f264ead3b6ecdc3d7437bb800ef7240abffb9", - "0xf1d4e02e7c85a92d634d16b12dc99e1d6ec9eae3d8dfbca77e7c609e226d0ce7", - "0x094352545250e843ced1d3c6c7957e78c7d8ff80c470974778930adbe9a4ed1a", - "0x76efa93070d78b73e12eb1efa7f36d49e7944ddcc3a043b916466ee83dca52ce", - "0x1772a2970588ddb584eadf02178cdb52a98ab6ea8a4036d29e59f179d7ba0543", - "0xe4bbf2d97d65331ac9f680f864208a9074d1def3c2433458c808427e0d1d3167", - "0x8ccfb5252b22c77ea631e03d491ea76eb9b74bc02072c3749f3e9d63323b44df", - "0x9e212a9bdf4e7ac0730a0cecd0f6cc49afc7e3eca7a15d0f5f5a68f72e45363b", - "0x52e548ea6445aae3f75509782a7ab1f4f02c2a85cdd0dc928370f8c76ae8802d", - "0xb62e7d73bf76c07e1a6f822a8544b78c96a6ba4f5c9b792546d94b56ca12c8b9", - "0x595cb0e985bae9c59af151bc748a50923921a195bbec226a02157f3b2e066f5b", - "0x1c7aa6b36f402cec990bafefbdbb845fc6c185c7e08b6114a71dd388fe236d32", - "0x01ee2ff1a1e88858934a420258e9478585b059c587024e5ec0a77944821f798c", - "0x420a963a139637bffa43cb007360b9f7d305ee46b6a694b0db91db09618fc2e5", - "0x5a8e2ad20f8da35f7c885e9af93e50009929357f1f4b38a6c3073e8f58fae49e", - "0x52a405fdd84c9dd01d1da5e9d1c4ba95cb261b53bf714c651767ffa2f9e9ad81", - "0xa1a334c901a6d5adc8bac20b7df025e906f7c4cfc0996bfe2c62144691c21990", - "0xb789a00252f0b34bded3cb14ae969effcf3eb29d97b05a578c3be8a9e479c213", - "0xb9dbf7e9ddb638a515da245845bea53d07becdf3f8d1ec17de11d495624c8eab", - "0xaf566b41f5ed0c026fa8bc709533d3fa7a5c5d69b03c39971f32e14ab523fa3d", - "0x8121e0b2d9b106bb2aefd364fd6a450d88b88ee1f5e4aad7c0fcd8508653a112", - "0x8581c1be74279216b93e0a0d7272f4d6385f6f68be3eef3758d5f68b62ee7b6c", - "0x85386f009278f9a1f828404fa1bbfa02dfb9d896554f0a52678eb6ec8feadc55", - "0xf483ed167d92a0035ac65a1cfdb7906e4952f74ae3a1d86324d21f241daffcb7", - "0x3872485e2a520a350884accd990a1860e789dd0d0664ad14f50186a92c7be7be", - "0xc6c1a3301933019105f5650cabcb22bfbf221965ffcfc1329315b24ea3d77fd4", - "0xcee901330a60d212a867805ce0c28f53c6cc718f52156c9e74390d18f5df6280", - "0xa67ae793b1cd1a828a607bae418755c84dbb61adf00833d4c61a94665363284f", - "0x80d8159873b517aa6815ccd7c8ed7cfb74f84298d703a6c5a2f9d7d4d984ddde", - "0x1de5a8b915f2d9b45c97a8e134871e2effb576d05f4922b577ade8e3cd747a79", - "0x6ea17c5ece9b97dddb8b2101b923941a91e4b35e33d536ab4ff15b647579e1f5", - "0xcb78631e09bc1d79908ce1d3e0b6768c54b272a1a5f8b3b52485f98d6bba9245", - "0xd7c38f9d3ffdc626fe996218c008f5c69498a8a899c7fd1d63fbb03e1d2a073f", - "0x72cdef54267088d466244a92e4e6f10742ae5e6f7f6a615eef0da049a82068f9", - "0x60b3c490ba8c502656f9c0ed37c47283e74fe1bc7f0e9f651cbc76552a0d88eb", - "0x56bd0c66987a6f3761d677097be9440ea192c1cb0f5ec38f42789abe347e0ea9", - "0x3caac3e480f62320028f6f938ee147b4c78e88a183c464a0c9fb0df937ae30c1", - "0x7a4d2f11bddda1281aba5a160df4b814d23aef07669affe421a861fac2b4ec0f", - "0x9bb4d11299922dc309a4523959298a666ebe4063a9ee3bad1b93988ed59fb933", - "0x957323fffbaf8f938354662452115ae5acba1290f0d3f7b2a671f0359c109292", - "0x877624e31497d32e83559e67057c7a605fb888ed8e31ba68e89e02220eac7096", - "0x8456546ae97470ff6ea98daf8ae632e59b309bd3ff8e9211f7d21728620ed1e5", - "0xbacb26f574a00f466ce354e846718ffe3f3a64897d14d5ffb01afcf22f95e72b", - "0x0228743a6e543004c6617bf2c9a7eba1f92ebd0072fb0383cb2700c3aed38ba0", - "0x04f093f0f93c594549436860058371fb44e8daf78d6e5f563ba63a46b61ddbf0", - "0x0ba17c1ec93429ceaff08eb81195c9844821b64f2b5363926c2a6662f83fb930", - "0xd71605d8446878c677f146837090797e888416cfc9dc4e79ab11776cc6639d3f", - "0x33dde958dc5a6796138c453224d4d6e7f2ae740cceef3b52a8b669eb4b9691a1", - "0x3c39838295d1495e90e61ce59f6fcc693b31c292d02d31759719df6fe3214559", - "0x8aecc66f38644296cf0e6693863d57a243a31a4929130e22ab44cb6157b1af41", - "0xdf7153a7eab9521f2b37124067166c72de8f342249ac0e0f5350bd32f1251053", - "0xa498840b58897cf3bed3981b94c86d85536dfebbc437d276031ebd9352e171eb", - "0xb1df15a081042ab665458223a0449ffc71a10f85f3d977beb20380958fd92262", - "0x15d3bdbdee2a61b01d7a6b72a5482f6714358eedf4bece7bb8458e100caf8fba", - "0x0c96b7a0ea09c3ef758424ffb93654ce1520571e32e1f83aecbeded2388c3a7a", - "0xb4a3a8023266d141ecd7c8a7ca5282a825410b263bc11c7d6cab0587c9b5446e", - "0xf38f535969d9592416d8329932b3a571c6eacf1763de10fb7b309d3078b9b8d4", - "0x5a1e7b1c3b3943158341ce6d7f9f74ae481975250d89ae4d69b2fcd4c092eb4e", - "0xdad31e707d352f6cca78840f402f2ac9292094b51f55048abf0d2badfeff5463", - "0x097e290170068e014ceda3dd47b28ede57ff7f916940294a13c9d4aa2dc98aad", - "0x22e2dcedb6bb7f8ace1e43facaa502daa7513e523be98daf82163d2a76a1e0be", - "0x7ef2b211ab710137e3e8c78b72744bf9de81c2adde007aef6e9ce92a05e7a2c5", - "0x49b427805fc5186f31fdd1df9d4c3f51962ab74e15229e813072ec481c18c717", - "0xe60f6caa09fa803d97613d58762e4ff7f22f47d5c30b9d0116cdc6a357de4464", - "0xab3507b37ee92f026c72cc1559331630bc1c7335b374e4418d0d02687df1a9dd", - "0x50825ae74319c9adebc8909ed7fc461702db8230c59975e8add09ad5e7a647ab", - "0x0ee8e9c1d8a527a42fb8c2c8e9e51faf727cffc23ee22b5a95828f2790e87a29", - "0x675c21c290ddb40bec0302f36fbcd2d1832717a4bc05d113c6118a62bc8f9aca", - "0x580bafab24f673317b533148d7226d485e211eaa3d6e2be2529a83ca842b58a7", - "0x540e474776cae597af24c147dc1ae0f70a6233e98cf5c3ce31f38b830b75c99a", - "0x36eaf9f286e0f356eaaf8d81f71cc52c81d9ebc838c3b4859009f8567a224d16", - "0x0e2cbbb40954be047d02b1450a3dbd2350506448425dc25fd5faf3a66ee8f5c4", - "0x7eb0390cfe4c4eb120bbe693e87adc8ecab51d5fd8ce8f911c8ff07fad8cbe20", - "0xbf77589f5c2ebb465b8d7936f6260a18a243f59bd87390ee22cf579f6f020285", - "0x695b96bb28693f6928777591ef64146466d27521280a295936a52ec60707c565", - "0x22a0d018cbd4274caa8b9e7fb132e0a7ed787874046ca683a7d81d1c7c8b8f15", - "0x84092b122bb35e5ad85407b4b55f33707b86e0238c7970a8583f3c44308ed1d9", - "0xea346067ca67255235f9cae949f06e4b6c93846a7abc7c8c8cd786e9c4b3e4bc", - "0xa6df0716b125dc696b5d0e520cb49c1c089397c754efc146792e95bc58cc7159", - "0x7377b5d3953029fc597fb10bb6479ee34133d38f08783fbb61c7d070f34ea66f", - "0x7d79b00ffb976a10cd24476a394c8ed22f93837c51a58a3ddc7418153a5a8ea1", - "0x01e55182e80dff26cc3e06bb736b4a63745bde8ae28c604fa7fb97d99de5f416", - "0x062a2d5a207f8d540764d09648afecbf5033b13aec239f722b9033a762acf18b", - "0x48be60a3221d98b4d62f0b89d3bef74c70878dd65c6f79b34c2c36d0ddaa1da0", - "0x41e11f33543cf045c1a99419379ea31523d153bdf664549286b16207b9648c85", - "0xeef4d30b4700813414763a199e7cc6ab0faec65ef8b514faa01c6aa520c76334", - "0xea7cfe990422663417715e7859fc935ca47f47c943a1254044b6bc5934c94bc8", - "0xbbd3c834e5403b98a0ca346c915a23310f3d58880786628bc6cfbe05ba29c3c5", - "0xe216379f385bc9995ae0f37f1409a78d475c56b8aeb4ee434326724ec20124f7", - "0xdd328a1eee19d09b6fef06e252f8ad0ae328fbf900ef745f5950896803a3899d", - "0xa16fde34b0d743919feb0781eca0c525a499d279119af823cb3a8817000335db", - "0x7a28d108c59b83b12c85cd9aabc1d1d994a9a0329ae7b64a32aadcd61ebe50e3", - "0xb28bc82fceae74312eb837a805f0a8a01c0f669b99bb03fde31c4d58bedff89b", - "0x1b0d8f37d349781e846900b51a90c828aa384afe9b8ee1f88aeb8dba4b3168f2", - "0xbfd0301ff964c286c3331a30e09e0916da6f484e9c9596dbf1cae3cc902dbf9e", - "0xbb8254cb9ef6b485b8fb6caeafe45f920affc30f6b9d671e9a454530536f4fef", - "0xcad2317cf63dfa7147ded5c7e15f5f72e78f42d635e638f1ece6bc722ca3638b", - "0xb6c6e856fd45117f54775142f2b38f31114539d8943bcbcf823f6c7650c001e4", - "0x869f1baa35684c8f67a5bc99b294187852e6c85243a2f36481d0891d8b043020", - "0x14c6ccf145ee40ff56e3810058d2fba9a943ffc7c7087c48a08b2451c13dc788", - "0x263c1bcb712890f155b7e256cefa4abf92fe4380f3ffc11c627d5e4e30864d18", - "0x69f4eaf655e31ad7f7a725cd415ce7e45dd4a8396ac416950d42ed33155c3487", - "0x47e8eec2c5e33c9a54fe1f9b09e7744b614fb16531c36b862aa899424be13b05", - "0x5c985de270e62c44f0b49157882e8e83641b906ce47959e337fe8423e125a2eb", - "0x4e13b11e13202439bb5de5eea3bb75d2d7bf90f91411163ade06161a9cf424db", - "0x583a8fa159bb74fa175d72f4e1705e9a3b8ffe26ec5ad6e720444b99288f1213", - "0x903d2a746a98dfe2ee2632606d57a9b0fa6d8ccd895bb18c2245fd91f8a43676", - "0xa35a51330316012d81ec7249e3f2b0c9d7fcbb99dd98c62fe880d0a152587f51", - "0x33818a7beb91730c7b359b5e23f68a27b429967ea646d1ea99c314353f644218", - "0x183650af1e0b67f0e7acb59f8c72cc0e60acc13896184db2a3e4613f65b70a8b", - "0x857ff2974bef960e520937481c2047938a718cea0b709282ed4c2b0dbe2ef8fa", - "0x95a367ecb9a401e98a4f66f964fb0ece783da86536410a2082c5dbb3fc865799", - "0x56c606a736ac8268aedadd330d2681e7c7919af0fe855f6c1c3d5c837aa92338", - "0x5c97f7abf30c6d0d4c23e762c026b94a6052a444df4ed942e91975419f68a3a4", - "0x0b571de27d2022158a3128ae44d23a8136e7dd2dee74421aa4d6ed15ee1090a0", - "0xa17f6bc934a2f3c33cea594fee8c96c1290feec934316ebbbd9efab4937bf9f9", - "0x9ff57d70f27aad7281841e76435285fd27f10dad256b3f5cabde4ddc51b70eff", - "0xafa3071a847215b3ccdf51954aa7cb3dd2e6e2a39800042fc42009da705508b2", - "0x5e3bea33e4ac6f7c50a077d19571b1796e403549b1ce7b15e09905a0cc5a4acf", - "0x0dc7ba994e632ab95f3ecb7848312798810cf761d1c776181882d17fd6dda075", - "0xb4f7158679dad9f7370a2f64fbe617a40092849d17453b4f50a93ca8c6885844", - "0x094564b00f53c6f27c121fd8adfe1685b258b259e585a67b57c85efb804c57b2", - "0x9cd21a4249ba3fccffad550cdb8409dc12d8b74a7192874b6bafe2363886f318", - "0xbb22e0dad55cb315c564c038686419d40ef7f13af2143a28455bf445f6e10393", - "0x2a71d5e00821178c2cd39e7501e07da5cca6680eb7cdbe996f52dccafadb3735", - "0x9619406093b121e044a5b403bb1713ae160aeb52ad441f82dc6c63e4b323b969", - "0x3b8bd1d82c6d67ae707e19b889f1cb1f7bba912f12ae4284298f3a70c3644c79", - "0xd7a70c50d47d48785b299dbea01bf03ef18b8495de3c35cb265bc8f3295c4e15", - "0x8802ecce8dd6b6190af8ac79aafda3479c29f548d65e5798c0ca51a529b19108", - "0x4b630e1df52ec5fd650f4a4e76b3eeddda39e1e9eab996f6d3f02eefdf690990", - "0x0bfbff60fcf7f411d469f7f6f0a58ca305fd84eb529ee3ac73c00174793d723e", - "0x535f78b5f3a99a1c498e2c19dc1acb0fbbaba8972ba1d7d66936c28ab3667ebe", - "0x06ba92d8129db98fec1b75f9489a394022854f22f2e9b9450b187a6fc0d94a86", - "0xb7ae275ba10f80fb618a2cf949d5ad2e3ae24eb2eb37dcf1ec8c8b148d3ba27f", - "0xb275579bcf2584d9794dd3fc7f999902b13d33a9095e1980d506678e9c263de1", - "0x843ccd52a81e33d03ad2702b4ef68f07ca0419d4495df848bff16d4965689e48", - "0xde8b779ca7250f0eb867d5abdffd1d28c72a5a884d794383fc93ca40e5bf6276", - "0x6b789a2befccb8788941c9b006e496b7f1b03dbb8e530ba339db0247a78a2850", - "0xfccd4dca80bc52f9418f26b0528690255e320055327a34b50caf088235d2f660", - "0x18479ebfbe86c1e94cd05c70cb6cace6443bd9fdac7e01e9c9535a9e85141f2f", - "0x5350c8f3296441db954a261238c88a3a0c51ab418a234d566985f2809e211148", - "0xa5636614135361d03a381ba9f6168e2fd0bd2c1105f9b4e347c414df8759dea3", - "0xe7bb69e600992e6bd41c88a714f50f450153f1a05d0ddb4213a3fc4ba1f48c3f", - "0x17b42e81bae19591e22aa2510be06803bcb5c39946c928c977d78f346d3ca86b", - "0x30a10c07dc9646b7cbb3e1ab722a94d2c53e04c0c19efaaea7dccba1b00f2a20", - ], - compressed_lamport_pk: - "0x672ba456d0257fe01910d3a799c068550e84881c8d441f8f5f833cbd6c1a9356", - child_sk: - "7419543105316279183937430842449358701327973165530407166294956473095303972104" + seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", + master_sk: + "6083874454709270928345386274498605044986640685124978867557563392430687146096", + child_index: 0, + lamport_0: vec![ + "0xe345d0ad7be270737de05cf036f688f385d5f99c7fddb054837658bdd2ebd519", + "0x65050bd4db9c77c051f67dcc801bf1cdf33d81131e608505bb3e4523868eb76c", + "0xc4f8e8d251fbdaed41bdd9c135b9ed5f83a614f49c38fffad67775a16575645a", + "0x638ad0feace7567255120a4165a687829ca97e0205108b8b73a204fba6a66faa", + "0xb29f95f64d0fcd0f45f265f15ff7209106ab5f5ce6a566eaa5b4a6f733139936", + "0xbcfbdd744c391229f340f02c4f2d092b28fe9f1201d4253b9045838dd341a6bf", + "0x8b9cf3531bfcf0e4acbfd4d7b4ed614fa2be7f81e9f4eaef53bedb509d0b186f", + "0xb32fcc5c4e2a95fb674fa629f3e2e7d85335f6a4eafe7f0e6bb83246a7eced5f", + "0xb4fe80f7ac23065e30c3398623b2761ac443902616e67ce55649aaa685d769ce", + "0xb99354f04cfe5f393193c699b8a93e5e11e6be40ec16f04c739d9b58c1f55bf3", + "0x93963f58802099ededb7843219efc66a097fab997c1501f8c7491991c780f169", + "0x430f3b027dbe9bd6136c0f0524a0848dad67b253a11a0e4301b44074ebf82894", + "0xd635c39b4a40ad8a54d9d49fc8111bd9d11fb65c3b30d8d3eaef7d7556aac805", + "0x1f7253a6474cf0b2c05b02a7e91269137acddedcb548144821f9a90b10eccbab", + "0x6e3bdb270b00e7b6eb8b044dbfae07b51ea7806e0d24218c59a807a7fd099c18", + "0x895488ad2169d8eaae332ce5b0fe1e60ffab70e62e1cb15a2a1487544af0a6e8", + "0x32d45a99d458c90e173a3087ea3661ab62d429b285089e92806a9663ba825342", + "0xc15c52106c3177f5848a173076a20d46600ca65958a1e3c7d45a593aaa9670ed", + "0xd8180c550fbe4cd6d5b676ff75e0728729d8e28a3b521d56152594ac6959d563", + "0x58fe153fac8f4213aaf175e458435e06304548024bcb845844212c774bdffb2a", + "0x10fff610a50f4bee5c978f512efa6ab4fafacb65929606951ba5b93eeb617b5a", + "0x78ac9819799b52eba329f13dd52cf0f6148a80bf04f93341814c4b47bb4aa5ec", + "0xa5c3339caa433fc11e74d1765bec577a13b054381a44b23c2482e750696876a9", + "0x9f716640ab5cdc2a5eb016235cddca2dc41fa4ec5acd7e58af628dade99ec376", + "0x2544364320e67577c4fed8c7c7c839deed93c24076d5343c5b8faca4cc6dc2d8", + "0x62553e782541f822c589796be5d5c83bfc814819100b2be0710b246f5aa7149c", + "0x229fb761c46c04b22ba5479f2696be0f936fded68d54dd74bcd736b8ba512afb", + "0x0af23996a65b98a0ebaf19f3ec0b3ef20177d1bfd6eb958b3bd36e0bdbe04c8c", + "0x6f0954f9deab52fd4c8d2daba69f73a80dea143dd49d9705c98db3d653adf98c", + "0xfa9221dd8823919a95b35196c1faeb59713735827f3e84298c25c83ac700c480", + "0x70c428e3ff9e5e3cda92d6bb85018fb89475c19f526461cca7cda64ebb2ff544", + "0xdcaac3413e22314f0f402f8058a719b62966b3a7429f890d947be952f2e314ba", + "0xb6b383cb5ec25afa701234824491916bfe6b09d28cf88185637e2367f0cf6edc", + "0x7b0d91488fc916aba3e9cb61a5a5645b9def3b02e4884603542f679f602afb8d", + "0xe9c20abca284acfde70c59584b9852b85c52fa7c263bb981389ff8d638429cd7", + "0x838524f798daee6507652877feb9597f5c47e9bb5f9aa52a35fb6fff796813b9", + "0xbe1ca18faf9bf322474fad1b3d9b4f1bc76ae9076e38e6dd2b16e2faf487742b", + "0xbf02d70f1a8519343a16d24bade7f7222912fd57fe4f739f367dfd99d0337e8e", + "0xc979eb67c107ff7ab257d1c0f4871adf327a4f2a69e01c42828ea27407caf058", + "0xf769123d3a3f19eb7b5c3fd4f467a042944a7c5ff8834cebe427f47dbd71460c", + "0xaefc8edc23257e1168a35999fe3832bcbc25053888cc89c38667482d6748095b", + "0x8ff399f364d3a2428b1c92213e4fdc5341e7998007da46a5a2f671929b42aaab", + "0xcf2a3d9e6963b24c5001fbba1e5ae7f45dd6cf520fd24861f745552db86bab48", + "0xb380e272d7f3091e5c887fa2e7c690c67d59f4d95f8376d150e555da8c738559", + "0xc006a749b091d91204dbb64f59059d284899de5986a7f84f8877afd5e0e4c253", + "0x818d8bb9b7da2dafa2ef059f91975e7b6257f5e199d217320de0a576f020de5c", + "0x7aabf4a1297d2e550a2ee20acb44c1033569e51b6ec09d95b22a8d131e30fd32", + "0xdd01c80964a5d682418a616fb10810647c9425d150df643c8ddbbe1bfb2768b7", + "0x1e2354e1d97d1b06eb6cfe9b3e611e8d75b5c57a444523e28a8f72a767eff115", + "0x989c9a649dca0580256113e49ea0dd232bbfd312f68c272fe7c878acc5da7a2c", + "0x14ee1efe512826fff9c028f8c7c86708b841f9dbf47ce4598298b01134ebdc1a", + "0x6f861dba4503f85762d9741fa8b652ce441373f0ef2b7ebbd5a794e48cdab51b", + "0xda110c9492ffdb87efe790214b7c9f707655a5ec08e5af19fb2ab2acc428e7dc", + "0x5576aa898f6448d16e40473fcb24c46c609a3fc46a404559faa2d0d34d7d49ce", + "0x9bd9a35675f2857792bc45893655bfdf905ffeaee942d93ad39fbcadd4ca9e11", + "0xfa95e4c37db9303d5213890fd984034089cbc9c6d754741625da0aa59cc45ccf", + "0xfef7d2079713f17b47239b76c8681bf7f800b1bfeac7a53265147579572ddf29", + "0x39aa7c0fecf9a1ed037c685144745fda16da36f6d2004844cf0e2d608ef6ed0e", + "0x5530654d502d6ba30f2b16f49cc5818279697308778fd8d40db8e84938144fb6", + "0xb1beaa36397ba1521d7bf7df16536969d8a716e63510b1b82a715940180eb29f", + "0x21abe342789f7c15a137afa373f686330c0db8c861572935a3cd8dcf9e4e1d45", + "0x27b5a1acda55b4e0658887bd884d3203696fcae0e94f19e31bfe931342b1c257", + "0x58401a02502d7708a812c0c72725f768f5a556480517258069f2d72543cda888", + "0x4b38f291548f51bee7e4cf8cc5c8aa8f4ad3ec2461dba4ccbab70f1c1bfd7feb", + "0x9b39a53fdafaaf1d23378e0aa8ae65d38480de69821de2910873eefc9f508568", + "0x932200566a3563ee9141913d12fd1812cb008cb735724e8610890e101ec10112", + "0x6a72f70b4ec5491f04780b17c4776a335fcc5bff5073d775150e08521dc74c91", + "0x86d5c60e627a4b7d5d075b0ba33e779c45f3f46d22ed51f31360afd140851b67", + "0x5ca2a736bb642abc4104faa781c9aff13d692a400d91dc961aec073889836946", + "0xa14bca5a262ac46ceac21388a763561fc85fb9db343148d786826930f3e510cd", + "0x87be03a87a9211504aa70ec149634ee1b97f7732c96377a3c04e98643dcba915", + "0x8fe283bc19a377823377e9c326374ebb3f29527c12ea77bfb809c18eef8943b0", + "0x8f519078b39a3969f7e4caeca9839d4e0eccc883b89e4a86d0e1731bfc5e33fc", + "0x33d7c28c3d26fdfc015a8c2131920e1392ef0aea55505637b54ea63069c7858e", + "0xe57de7c189fcc9170320c7acedb38798562a48dbc9943b2a8cd3441d58431128", + "0x513dac46017050f82751a07b6c890f14ec43cadf687f7d202d2369e35b1836b4", + "0xfd967d9f805bb7e78f7b7caa7692fdd3d6b5109c41ad239a08ad0a38eeb0ac4c", + "0xf2013e4da9abcc0f03ca505ed94ec097556dbfd659088cd24ec223e02ac43329", + "0xe0dcfac50633f7417f36231df2c81fa1203d358d5f57e896e1ab4b512196556b", + "0xf022848130e73fe556490754ef0ecfcdaaf3b9ff16ae1eda7d38c95c4f159ded", + "0x2147163a3339591ec7831d2412fb2d0588c38da3cd074fa2a4d3e5d21f9f1d2d", + "0x11ee2404731962bf3238dca0d9759e06d1a5851308b4e6321090886ec5190b69", + "0xf7679ecd07143f8ac166b66790fa09aed39352c09c0b4766bbe500b1ebace5a5", + "0xc7a0e95f09076472e101813a95e6ea463c35bd5ee9cfda3e5d5dbccb35888ef0", + "0xde625d3b547eb71bea5325a0191a592fa92a72e4b718a499fdba32e245ddf37e", + "0x7e5bdccd95df216e8c59665073249072cb3c9d0aef6b341afc0ca90456942639", + "0xc27f65fd9f797ede374e06b4ddb6e8aa59c7d6f36301f18b42c48b1889552fe3", + "0x8175730a52ea571677b035f8e2482239dda1cfbff6bc5cde00603963511a81af", + "0x09e440f2612dad1259012983dc6a1e24a73581feb1bd69d8a356eea16ba5fd0e", + "0x59dcc81d594cbe735a495e38953e8133f8b3825fd84767af9e4ea06c49dbabfa", + "0x6c8480b59a1a958c434b9680edea73b1207077fb9a8a19ea5f9fbbf6f47c4124", + "0x81f5c89601893b7a5a231a7d37d6ab9aa4c57f174fcfc6b40002fa808714c3a1", + "0x41ba4d6b4da141fcc1ee0f4b47a209cfd143d34e74fc7016e9956cedeb2db329", + "0x5e0b5b404c60e9892040feacfb4a84a09c2bc4a8a5f54f3dad5dca4acdc899dc", + "0xe922eebf1f5f15000d8967d16862ed274390cde808c75137d2fb9c2c0a80e391", + "0xbf49d31a59a20484f0c08990b2345dfa954509aa1f8901566ab9da052b826745", + "0xb84e07da828ae668c95d6aa31d4087504c372dbf4b5f8a8e4ded1bcf279fd52b", + "0x89288bf52d8c4a9561421ad199204d794038c5d19ae9fee765ee2b5470e68e7e", + "0xf6f618be99b85ec9a80b728454a417c647842215e2160c6fe547dd5a69bd9302", + "0xdd9adc002f98c9a47c7b704fc0ce0a5c7861a5e2795b6014749cde8bcb8a034b", + "0xd119a4b2c0db41fe01119115bcc35c4b7dbfdb42ad3cf2cc3f01c83732acb561", + "0x9c66bc84d416b9193bad9349d8c665a9a06b835f82dc93ae0cccc218f808aad0", + "0xd4b50eefcd2b5df075f14716cf6f2d26dfc8ae02e3993d711f4a287313038fde", + "0xaf72bfb346c2f336b8bc100bff4ba35d006a3dad1c5952a0adb40789447f2704", + "0xc43ca166f01dc955e7b4330227635feb1b0e0076a9c5633ca5c614a620244e5b", + "0x5efca76970629521cfa053fbbbda8d3679cadc018e2e891043b0f52989cc2603", + "0x35c57de1c788947f187051ce032ad1e899d9887d865266ec6fcfda49a8578b2b", + "0x56d4be8a65b257216eab7e756ee547db5a882b4edcd12a84ed114fbd4f5be1f1", + "0x257e858f8a4c07a41e6987aabaa425747af8b56546f2a3406f60d610bcc1f269", + "0x40bd9ee36d52717ab22f1f6b0ee4fb38b594f58399e0bf680574570f1b4b8c90", + "0xcb6ac01c21fc288c12973427c5df6eb8f6aefe64b92a6420c6388acdf36bc096", + "0xa5716441312151a5f0deb52993a293884c6c8f445054ce1e395c96adeee66c6d", + "0xe15696477f90113a10e04ba8225c28ad338c3b6bdd7bdeb95c0722921115ec85", + "0x8faeaa52ca2f1d791cd6843330d16c75eaf6257e4ba236e3dda2bc1a644aee00", + "0xc847fe595713bf136637ce8b43f9de238762953fed16798878344da909cc76ae", + "0xb5740dc579594dd110078ce430b9696e6a308078022dde2d7cfe0ef7647b904e", + "0x551a06d0771fcd3c53aea15aa8bf700047138ef1aa22265bee7fb965a84c9615", + "0x9a65397a5907d604030508d41477de621ce4a0d79b772e81112d634455e7a4da", + "0x6462d4cc2262d7faf8856812248dc608ae3d197bf2ef410f00c3ae43f2040995", + "0x6782b1bd319568e30d54b324ab9ed8fdeac6515e36b609e428a60785e15fb301", + "0x8bcdcf82c7eb2a07e14db20d80d9d2efea8d40320e121923784c92bf38250a8e", + "0x46ed84fa17d226d5895e44685747ab82a97246e97d6237014611aaaba65ed268", + "0x147e87981673326c5a2bdb06f5e90eaaa9583857129451eed6dde0c117fb061f", + "0x4141d6fe070104c29879523ba6669552f3d457c0929bb878d2751f4ff059b895", + "0xd866ce4ef226d74841f950fc28cdf2235db21e0e3f07a0c8f807704464db2210", + "0xa804f9118bf92558f684f90c2bda832a4f51ef771ffb2765cde3ec6f48124f32", + "0xc436d4a65910124e00cded9a637178914a8fbc090400f3f031c03eac4d0295a5", + "0x643fdb9243656512316528de04dcc7344ca33783580ad0c3debf8c4a6e7c8bc4", + "0x7f4a345b41706b281b2de998e91ff62d908eb29fc333ee336221757753c96e23", + "0x6bdc086a5b11de950cabea33b72d98db886b291c4c2f02d3e997edc36785d249", + "0xfb10b5b47d374078c0a52bff7174bf1cd14d872c7d20b4a009e2afd3017a9a17", + "0x1e07e605312db5380afad8f3d7bd602998102fdd39565b618ac177b13a6527e6", + "0xc3161b5a7b93aabf05652088b0e5b4803a18be693f590744c42c24c7aaaeef48", + "0xa47e4f25112a7d276313f153d359bc11268b397933a5d5375d30151766bc689a", + "0xb24260e2eff88716b5bf5cb75ea171ac030f5641a37ea89b3ac45acb30aae519", + "0x2bcacbebc0a7f34406db2c088390b92ee34ae0f2922dedc51f9227b9afb46636", + "0xc78c304f6dbe882c99c5e1354ce6077824cd42ed876db6706654551c7472a564", + "0x6e2ee19d3ee440c78491f4e354a84fa593202e152d623ed899e700728744ac85", + "0x2a3f438c5dc012aa0997b66f661b8c10f4a0cd7aa5b6e5922b1d73020561b27f", + "0xd804f755d93173408988b95e9ea0e9feae10d404a090f73d9ff84df96f081cf7", + "0xe06fda941b6936b8b33f00ffa02c8b05fd78fbec953da61da2043f5644b30a50", + "0x45ee279b465d53148850a16cc7f6bd33e7627aef554a9418ed012ca8f9717f80", + "0x9c79348c1bcd6aa2135452491d73564413a247ea8cc38fa7dcc6c43f8a2d61d5", + "0x7c91e056f89f2a77d3e3642e595bcf4973c3bca68dd2b10f51ca0d8945e4255e", + "0x669f976ebe38cbd22c5b1f785e14b76809d673d2cb1458983dbda41f5adf966b", + "0x8bc71e99ffcc119fd8bd604af54c0663b0325a3203a214810fa2c588089ed5a7", + "0x36b3f1ffeae5d9855e0965eef33f4c5133d99685802ac5ce5e1bb288d308f889", + "0x0aad33df38b3f31598e04a42ec22f20bf2e2e9472d02371eb1f8a06434621180", + "0x38c5632b81f90efbc51a729dcae03626a3063aa1f0a102fd0e4326e86a08a732", + "0x6ea721753348ed799c98ffa330d801e6760c882f720125250889f107915e270a", + "0xe700dd57ce8a653ce4269e6b1593a673d04d3de8b79b813354ac7c59d1b99adc", + "0xe9294a24b560d62649ca898088dea35a644d0796906d41673e29e4ea8cd16021", + "0xf20bb60d13a498a0ec01166bf630246c2f3b7481919b92019e2cfccb331f2791", + "0xf639a667209acdd66301c8e8c2385e1189b755f00348d614dc92da14e6866b38", + "0x49041904ee65c412ce2cd66d35570464882f60ac4e3dea40a97dd52ffc7b37a2", + "0xdb36b16d3a1010ad172fc55976d45df7c03b05eab5432a77be41c2f739b361f8", + "0x71400cdd2ea78ac1bf568c25a908e989f6d7e2a3690bc869c7c14e09c255d911", + "0xf0d920b2d8a00b88f78e7894873a189c580747405beef5998912fc9266220d98", + "0x1a2baefbbd41aa9f1cc5b10e0a7325c9798ba87de6a1302cf668a5de17bc926a", + "0x449538a20e52fd61777c45d35ff6c2bcb9d9165c7eb02244d521317f07af6691", + "0x97006755b9050b24c1855a58c4f4d52f01db4633baff4b4ef3d9c44013c5c665", + "0xe441363a27b26d1fff3288222fa8ed540f8ca5d949ddcc5ff8afc634eec05336", + "0xed587aa8752a42657fea1e68bc9616c40c68dcbbd5cb8d781e8574043e29ef28", + "0x47d896133ba81299b8949fbadef1c00313d466827d6b13598685bcbb8776c1d2", + "0x7786bc2cb2d619d07585e2ea4875f15efa22110e166af87b29d22af37b6c047d", + "0x956b76194075fe3daf3ca508a6fad161deb05d0026a652929e37c2317239cbc6", + "0xec9577cb7b85554b2383cc4239d043d14c08d005f0549af0eca6994e203cb4e7", + "0x0722d0c68d38b23b83330b972254bbf9bfcf32104cc6416c2dad67224ac52887", + "0x532b19d54fb6d77d96452d3e562b79bfd65175526cd793f26054c5f6f965df39", + "0x4d62e065e57cbf60f975134a360da29cabdcea7fcfc664cf2014d23c733ab3b4", + "0x09be0ea6b363fd746b303e482cb4e15ef25f8ae57b7143e64cbd5c4a1d069ebe", + "0x69dcddc3e05147860d8d0e90d602ac454b609a82ae7bb960ee2ecd1627d77777", + "0xa5e2ae69d902971000b1855b8066a4227a5be7234ac9513b3c769af79d997df4", + "0xc287d4bc953dcff359d707caf2ccba8cc8312156eca8aafa261fb72412a0ea28", + "0xb27584fd151fb30ed338f9cba28cf570f7ca39ebb03eb2e23140423af940bd96", + "0x7e02928194441a5047af89a6b6555fea218f1df78bcdb5f274911b48d847f5f8", + "0x9ba611add61ea6ba0d6d494c0c4edd03df9e6c03cafe10738cee8b7f45ce9476", + "0x62647ec3109ac3db3f3d9ea78516859f0677cdde3ba2f27f00d7fda3a447dd01", + "0xfa93ff6c25bfd9e17d520addf5ed2a60f1930278ff23866216584853f1287ac1", + "0x3b391c2aa79c2a42888102cd99f1d2760b74f772c207a39a8515b6d18e66888a", + "0xcc9ae3c14cbfb40bf01a09bcde913a3ed208e13e4b4edf54549eba2c0c948517", + "0xc2b8bce78dd4e876da04c54a7053ca8b2bedc8c639cee82ee257c754c0bea2b2", + "0xdb186f42871f438dba4d43755c59b81a6788cb3b544c0e1a3e463f6c2b6f7548", + "0xb7f8ba137c7783137c0729de14855e20c2ac4416c33f5cac3b235d05acbab634", + "0x282987e1f47e254e86d62bf681b0803df61340fdc9a8cf625ef2274f67fc6b5a", + "0x04aa195b1aa736bf8875777e0aebf88147346d347613b5ab77bef8d1b502c08c", + "0x3f732c559aee2b1e1117cf1dec4216a070259e4fa573a7dcadfa6aab74aec704", + "0x72699d1351a59aa73fcede3856838953ee90c6aa5ef5f1f7e21c703fc0089083", + "0x6d9ce1b8587e16a02218d5d5bed8e8d7da4ac40e1a8b46eeb412df35755c372c", + "0x4f9c19b411c9a74b8616db1357dc0a7eaf213cb8cd2455a39eb7ae4515e7ff34", + "0x9163dafa55b2b673fa7770b419a8ede4c7122e07919381225c240d1e90d90470", + "0x268ff4507b42e623e423494d3bb0bc5c0917ee24996fb6d0ebedec9ce8cd9d5c", + "0xff6e6169d233171ddc834e572024586eeb5b1bda9cb81e5ad1866dbc53dc75fe", + "0xb379a9c8279205e8753b6a5c865fbbf70eb998f9005cd7cbde1511f81aed5256", + "0x3a6b145e35a592e037c0992c9d259ef3212e17dca81045e446db2f3686380558", + "0x60fb781d7b3137481c601871c1c3631992f4e01d415841b7f5414743dcb4cfd7", + "0x90541b20b0c2ea49bca847e2db9b7bba5ce15b74e1d29194a12780e73686f3dd", + "0xe2b0507c13ab66b4b769ad1a1a86834e385b315da2f716f7a7a8ff35a9e8f98c", + "0xeefe54bc9fa94b921b20e7590979c28a97d8191d1074c7c68a656953e2836a72", + "0x8676e7f59d6f2ebb0edda746fc1589ef55e07feab00d7008a0f2f6f129b7bb3a", + "0x78a3d93181b40152bd5a8d84d0df7f2adde5db7529325c13bc24a5b388aed3c4", + "0xcc0e2d0cba7aaa19c874dbf0393d847086a980628f7459e9204fda39fad375c0", + "0x6e46a52cd7745f84048998df1a966736d2ac09a95a1c553016fef6b9ec156575", + "0x204ac2831d2376d4f9c1f5c106760851da968dbfc488dc8a715d1c764c238263", + "0xbdb8cc7b7e5042a947fca6c000c10b9b584e965c3590f92f6af3fe4fb23e1358", + "0x4a55e4b8a138e8508e7b11726f617dcf4155714d4600e7d593fd965657fcbd89", + "0xdfe064bb37f28d97b16d58b575844964205e7606dce914a661f2afa89157c45b", + "0x560e374fc0edda5848eef7ff06471545fcbdd8aefb2ecddd35dfbb4cb03b7ddf", + "0x10a66c82e146da5ec6f48b614080741bc51322a60d208a87090ad7c7bf6b71c6", + "0x62534c7dc682cbf356e6081fc397c0a17221b88508eaeff798d5977f85630d4f", + "0x0138bba8de2331861275356f6302b0e7424bbc74d88d8c534479e17a3494a15b", + "0x580c7768bf151175714b4a6f2685dc5bcfeb088706ee7ed5236604888b84d3e4", + "0xd290adb1a5dfc69da431c1c0c13da3be788363238d7b46bc20185edb45ab9139", + "0x1689879db6c78eb4d3038ed81be1bc106f8cfa70a7c6245bd4be642bfa02ebd7", + "0x6064c384002c8b1594e738954ed4088a0430316738def62822d08b2285514918", + "0x01fd23493f4f1cc3c5ff4e96a9ee386b2a144b50a428a6b5db654072bddadfe7", + "0xd5d05bb7f23ab0fa2b82fb1fb14ac29c2477d81a85423d0a45a4b7d5bfd81619", + "0xd72b9a73ae7b24db03b84e01106cea734d4b9d9850b0b7e9d65d6001d859c772", + "0x156317cb64578db93fee2123749aff58c81eae82b189b0d6f466f91de02b59df", + "0x5fba299f3b2c099edbac18d785be61852225890fc004bf6be0787d62926a79b3", + "0x004154f28f685bdbf0f0d6571e7a962a4c29b6c3ebedaaaf66097dfe8ae5f756", + "0x4b45816f9834c3b289affce7a3dc80056c2b7ffd3e3c250d6dff7f923e7af695", + "0x6ca53bc37816fff82346946d83bef87860626bbee7fd6ee9a4aeb904d893a11f", + "0xf48b2f43184358d66d5b5f7dd2b14a741c7441cc7a33ba3ebcc94a7b0192d496", + "0x3cb98f4baa429250311f93b46e745174f65f901fab4eb8075d380908aaaef650", + "0x343dfc26b4473b3a20e706a8e87e5202a4e6b96b53ed448afb9180c3f766e5f8", + "0x1ace0e8a735073bcbaea001af75b681298ef3b84f1dbab46ea52cee95ab0e7f9", + "0xd239b110dd71460cdbc41ddc99494a7531186c09da2a697d6351c116e667733b", + "0x22d6955236bd275969b8a6a30c23932670a6067f68e236d2869b6a8b4b493b83", + "0x53c1c01f8d061ac89187e5815ef924751412e6a6aa4dc8e3abafb1807506b4e0", + "0x2f56dd20c44d7370b713e7d7a1bfb1a800cac33f8a6157f278e17a943806a1f7", + "0xc99773d8a5b3e60115896a65ac1d6c15863317d403ef58b90cb89846f4715a7f", + "0x9f4b6b77c254094621cd336da06fbc6cbb7b8b1d2afa8e537ceca1053c561ef5", + "0x87944d0b210ae0a6c201cba04e293f606c42ebaed8b4a5d1c33f56863ae7e1b5", + "0xa7d116d962d03ca31a455f9cda90f33638fb36d3e3506605aa19ead554487a37", + "0x4042e32e224889efd724899c9edb57a703e63a404129ec99858048fbc12f2ce0", + "0x36759f7a0faeea1cd4cb91e404e4bf09908de6e53739603d5f0db52b664158a3", + "0xa4d50d005fb7b9fea8f86f1c92439cc9b8446efef7333ca03a8f6a35b2d49c38", + "0x80cb7c3e20f619006542edbe71837cdadc12161890a69eea8f41be2ee14c08a3", + "0xbb3c44e1df45f2bb93fb80e7f82cee886c153ab484c0095b1c18df03523629b4", + "0x04cb749e70fac3ac60dea779fceb0730b2ec5b915b0f8cf28a6246cf6da5db29", + "0x4f5189b8f650687e65a962ef3372645432b0c1727563777433ade7fa26f8a728", + "0x322eddddf0898513697599b68987be5f88c0258841affec48eb17cf3f61248e8", + "0x6416be41cda27711d9ec22b3c0ed4364ff6975a24a774179c52ef7e6de9718d6", + "0x0622d31b8c4ac7f2e30448bdadfebd5baddc865e0759057a6bf7d2a2c8b527e2", + "0x40f096513588cc19c08a69e4a48ab6a43739df4450b86d3ec2fb3c6a743b5485", + "0x09fcf7d49290785c9ea2d54c3d63f84f6ea0a2e9acfcdbb0cc3a281ce438250e", + "0x2000a519bf3da827f580982d449b5c70fcc0d4fa232addabe47bb8b1c471e62e", + "0xf4f80008518e200c40b043f34fb87a6f61b82f8c737bd784292911af3740245e", + "0x939eaab59f3d2ad49e50a0220080882319db7633274a978ced03489870945a65", + "0xadcad043d8c753fb10689280b7670f313253f5d719039e250a673d94441ee17c", + "0x58b7b75f090166b8954c61057074707d7e38d55ce39d9b2251bbc3d72be458f8", + "0xf61031890c94c5f87229ec608f2a9aa0a3f455ba8094b78395ae312cbfa04087", + "0x356a55def50139f94945e4ea432e7a9defa5db7975462ebb6ca99601c614ea1d", + "0x65963bb743d5db080005c4db59e29c4a4e86f92ab1dd7a59f69ea7eaf8e9aa79", + ], + lamport_1: vec![ + "0x9c0bfb14de8d2779f88fc8d5b016f8668be9e231e745640096d35dd5f53b0ae2", + "0x756586b0f3227ab0df6f4b7362786916bd89f353d0739fffa534368d8d793816", + "0x710108dddc39e579dcf0819f9ad107b3c56d1713530dd94325db1d853a675a37", + "0x8862b5f428ce5da50c89afb50aa779bb2c4dfe60e6f6a070b3a0208a4a970fe5", + "0x54a9cd342fa3a4bf685c01d1ce84f3068b0d5b6a58ee22dda8fbac4908bb9560", + "0x0fa3800efeaddd28247e114a1cf0f86b9014ccae9c3ee5f8488168b1103c1b44", + "0xbb393428b7ebfe2eda218730f93925d2e80c020d41a29f4746dcbb9138f7233a", + "0x7b42710942ef38ef2ff8fe44848335f26189c88c22a49fda84a51512ac68cd5d", + "0x90e99786a3e8b04db95ccd44d01e75558d75f3ddd12a1e9a2c2ce76258bf4813", + "0x3f6f71e40251728aa760763d25deeae54dc3a9b53807c737deee219120a2230a", + "0xe56081a7933c6eaf4ef2c5a04e21ab8a3897785dd83a34719d1b62d82cfd00c2", + "0x76cc54fa15f53e326575a9a2ac0b8ed2869403b6b6488ce4f3934f17db0f6bee", + "0x1cd9cd1d882ea3830e95162b5de4beb5ddff34fdbf7aec64e83b82a6d11b417c", + "0xb8ca8ae36d717c448aa27405037e44d9ee28bb8c6cc538a5d22e4535c8befd84", + "0x5c4492108c25f873a23d5fd7957b3229edc22858e8894febe7428c0831601982", + "0x907bcd75e7465e9791dc34e684742a2c0dc7007736313a95070a7e6b961c9c46", + "0xe7134b1511559e6b2440672073fa303ec3915398e75086149eb004f55e893214", + "0x2ddc2415e4753bfc383d48733e8b2a3f082883595edc5515514ebb872119af09", + "0xf2ad0f76b08ffa1eee62228ba76f4982fab4fbede5d4752c282c3541900bcd5b", + "0x0a84a6b15abd1cbc2da7092bf7bac418b8002b7000236dfba7c8335f27e0f1d4", + "0x97404e02b9ff5478c928e1e211850c08cc553ebac5d4754d13efd92588b1f20d", + "0xfa6ca3bcff1f45b557cdec34cb465ab06ade397e9d9470a658901e1f0f124659", + "0x5bd972d55f5472e5b08988ee4bccc7240a8019a5ba338405528cc8a38b29bc21", + "0x52952e4f96c803bb76749800891e3bfe55f7372facd5b5a587a39ac10b161bcc", + "0xf96731ae09abcad016fd81dc4218bbb5b2cb5fe2e177a715113f381814007314", + "0xe7d79e07cf9f2b52623491519a21a0a3d045401a5e7e10dd8873a85076616326", + "0xe4892f3777a4614ee6770b22098eaa0a3f32c5c44b54ecedacd69789d676dffe", + "0x20c932574779e2cc57780933d1dc6ce51a5ef920ce5bf681f7647ac751106367", + "0x057252c573908e227cc07797117701623a4835f4b047dcaa9678105299e48e70", + "0x20bad780930fa2a036fe1dea4ccbf46ac5b3c489818cdb0f97ae49d6e2f11fbf", + "0xc0d7dd26ffecdb098585a1694e45a54029bb1e31c7c5209289058efebb4cc91b", + "0x9a8744beb1935c0abe4b11812fc02748ef7c8cb650db3024dde3c5463e9d8714", + "0x8ce6eea4585bbeb657b326daa4f01f6aef34954338b3ca42074aedd1110ba495", + "0x1c85b43f5488b370721290d2faea19d9918d094c99963d6863acdfeeca564363", + "0xe88a244347e448349e32d0525b40b18533ea227a9d3e9b78a9ff14ce0a586061", + "0x352ca61efc5b8ff9ee78e738e749142dd1606154801a1449bbb278fa6bcc3dbe", + "0xa066926f9209220b24ea586fb20eb8199a05a247c82d7af60b380f6237429be7", + "0x3052337ccc990bfbae26d2f9fe5d7a4eb8edfb83a03203dca406fba9f4509b6e", + "0x343ce573a93c272688a068d758df53c0161aa7f9b55dec8beced363a38b33069", + "0x0f16b5593f133b58d706fe1793113a10750e8111eadee65301df7a1e84f782d3", + "0x808ae8539357e85b648020f1e9d255bc4114bee731a6220d7c5bcb5b85224e03", + "0x3b2bd97e31909251752ac57eda6015bb05b85f2838d475095cfd146677430625", + "0xe4f857c93b2d8b250050c7381a6c7c660bd29066195806c8ef11a2e6a6640236", + "0x23d91589b5070f443ddcefa0838c596518d54928119251ecf3ec0946a8128f52", + "0xb72736dfad52503c7f5f0c59827fb6ef4ef75909ff9526268abc0f296ee37296", + "0x80a8c66436d86b8afe87dde7e53a53ef87e057a5d4995963e76d159286de61b6", + "0xbec92c09ee5e0c84d5a8ba6ca329683ff550ace34631ea607a3a21f99cd36d67", + "0x83c97c9807b9ba6d9d914ae49dabdb4c55e12e35013f9b179e6bc92d5d62222b", + "0x8d9c79f6af3920672dc4cf97a297c186e75083d099aeb5c1051207bad0c98964", + "0x2aaa5944a2bd852b0b1be3166e88f357db097b001c1a71ba92040b473b30a607", + "0x46693d27ec4b764fbb516017c037c441f4558aebfe972cdcd03da67c98404e19", + "0x903b25d9e12208438f203c9ae2615b87f41633d5ffda9cf3f124c1c3922ba08f", + "0x3ec23dc8bc1b49f5c7160d78008f3f235252086a0a0fa3a7a5a3a53ad29ec410", + "0xa1fe74ceaf3cccd992001583a0783d7d7b7a245ea374f369133585b576b9c6d8", + "0xb2d6b0fe4932a2e06b99531232398f39a45b0f64c3d4ebeaaebc8f8e50a80607", + "0xe19893353f9214eebf08e5d83c6d44c24bffe0eceee4dc2e840d42eab0642536", + "0x5b798e4bc099fa2e2b4b5b90335c51befc9bbab31b4dd02451b0abd09c06ee79", + "0xbab2cdec1553a408cac8e61d9e6e19fb8ccfb48efe6d02bd49467a26eeeca920", + "0x1c1a544c28c38e5c423fe701506693511b3bc5f2af9771b9b2243cd8d41bebfc", + "0x704d6549d99be8cdefeec9a58957f75a2be4af7bc3dc4655fa606e7f3e03b030", + "0x051330f43fe39b08ed7d82d68c49b36a8bfa31357b546bfb32068712df89d190", + "0xe69174c7b03896461cab2dfaab33d549e3aac15e6b0f6f6f466fb31dae709b9b", + "0xe5f668603e0ddbbcde585ac41c54c3c4a681fffb7a5deb205344de294758e6ac", + "0xca70d5e4c3a81c1f21f246a3f52c41eaef9a683f38eb7c512eac8b385f46cbcd", + "0x3173a6b882b21cd147f0fc60ef8f24bbc42104caed4f9b154f2d2eafc3a56907", + "0xc71469c192bf5cc36242f6365727f57a19f924618b8a908ef885d8f459833cc3", + "0x59c596fc388afd8508bd0f5a1e767f3dda9ed30f6646d15bc59f0b07c4de646f", + "0xb200faf29368581f551bd351d357b6fa8cbf90bdc73b37335e51cad36b4cba83", + "0x275cede69b67a9ee0fff1a762345261cb20fa8191470159cc65c7885cfb8313c", + "0x0ce4ef84916efbe1ba9a0589bed098793b1ea529758ea089fd79151cc9dc7494", + "0x0f08483bb720e766d60a3cbd902ce7c9d835d3f7fdf6dbe1f37bcf2f0d4764a2", + "0xb30a73e5db2464e6da47d10667c82926fa91fceb337d89a52db5169008bc6726", + "0x6b9c50fed1cc404bf2dd6fffbfd18e30a4caa1500bfeb080aa93f78d10331aaf", + "0xf17c84286df03ce175966f560600dd562e0f59f18f1d1276b4d8aca545d57856", + "0x11455f2ef96a6b2be69854431ee219806008eb80ea38c81e45b2e58b3f975a20", + "0x9a61e03e2157a5c403dfcde690f7b7d704dd56ea1716cf14cf7111075a8d6491", + "0x30312c910ce6b39e00dbaa669f0fb7823a51f20e83eaeb5afa63fb57668cc2f4", + "0x17c18d261d94fba82886853a4f262b9c8b915ed3263b0052ece5826fd7e7d906", + "0x2d8f6ea0f5b9d0e4bc1478161f5ed2ad3d8495938b414dcaec9548adbe572671", + "0x19954625f13d9bab758074bf6dee47484260d29ee118347c1701aaa74abd9848", + "0x842ef2ad456e6f53d75e91e8744b96398df80350cf7af90b145fea51fbbcf067", + "0x34a8b0a76ac20308aa5175710fb3e75c275b1ff25dba17c04e3a3e3c48ca222c", + "0x58efcbe75f32577afe5e9ff827624368b1559c32fcca0cf4fd704af8ce019c63", + "0x411b4d242ef8f14d92bd8b0b01cb4fa3ca6f29c6f9073cfdd3ce614fa717463b", + "0xf76dbda66ede5e789314a88cff87ecb4bd9ca418c75417d4d920e0d21a523257", + "0xd801821a0f87b4520c1b003fe4936b6852c410ee00b46fb0f81621c9ac6bf6b4", + "0x97ad11d6a29c8cf3c548c094c92f077014de3629d1e9053a25dbfaf7eb55f72d", + "0xa87012090cd19886d49521d564ab2ad0f18fd489599050c42213bb960c9ee8ff", + "0x8868d8a26e758d50913f2bf228da0444a206e52853bb42dd8f90f09abe9c859a", + "0xc257fb0cc9970e02830571bf062a14540556abad2a1a158f17a18f14b8bcbe95", + "0xfe611ce27238541b14dc174b652dd06719dfbcda846a027f9d1a9e8e9df2c065", + "0xc9b25ea410f420cc2d4fc6057801d180c6cab959bce56bf6120f555966e6de6d", + "0x95437f0524ec3c04d4132c83be7f1a603e6f4743a85ede25aa97a1a4e3f3f8fc", + "0x82a12910104065f35e983699c4b9187aed0ab0ec6146f91728901efecc7e2e20", + "0x6622dd11e09252004fb5aaa39e283333c0686065f228c48a5b55ee2060dbd139", + "0x89a2879f25733dab254e4fa6fddb4f04b8ddf018bf9ad5c162aea5c858e6faaa", + "0x8a71b62075a6011fd9b65d956108fa79cc9ebb8f194d64d3105a164e01cf43a6", + "0x103f4fe9ce211b6452181371f0dc4a30a557064b684645a4495136f4ebd0936a", + "0x97914adc5d7ce80147c2f44a6b29d0b495d38dedd8cc299064abcc62ed1ddabc", + "0x825c481da6c836a8696d7fda4b0563d204a9e7d9e4c47b46ded26db3e2d7d734", + "0xf8c0637ba4c0a383229f1d730db733bc11d6a4e33214216c23f69ec965dcaaad", + "0xaed3bdaf0cb12d37764d243ee0e8acdefc399be2cabbf1e51dc43454efd79cbd", + "0xe8427f56cc5cec8554e2f5f586b57adccbea97d5fc3ef7b8bbe97c2097cf848c", + "0xba4ad0abd5c14d526357fd0b6f8676ef6126aeb4a6d80cabe1f1281b9d28246c", + "0x4cff20b72e2ab5af3fafbf9222146949527c25f485ec032f22d94567ff91b22f", + "0x0d32925d89dd8fed989912afcbe830a4b5f8f7ae1a3e08ff1d3a575a77071d99", + "0xe51a1cbeae0be5d2fdbc7941aea904d3eade273f7477f60d5dd6a12807246030", + "0xfb8615046c969ef0fa5e6dc9628c8a9880e86a5dc2f6fc87aff216ea83fcf161", + "0x64dd705e105c88861470d112c64ca3d038f67660a02d3050ea36c34a9ebf47f9", + "0xb6ad148095c97528180f60fa7e8609bf5ce92bd562682092d79228c2e6f0750c", + "0x5bae0cd81f3bd0384ca3143a72068e6010b946462a73299e746ca639c026781c", + "0xc39a0fc7764fcfc0402b12fb0bbe78fe3633cbfb33c7f849279585a878a26d7c", + "0x2b752fda1c0c53d685cc91144f78d371db6b766725872b62cc99e1234cca8c1a", + "0x40ee6b9635d87c95a528757729212a261843ecb06d975de91352d43ca3c7f196", + "0x75e2005d3726cf8a4bb97ea5287849a361e3f8fdfadc3c1372feed1208c89f6b", + "0x0976f8ab556153964b58158678a5297da4d6ad92e284da46052a791ee667aee4", + "0xdbeef07841e41e0672771fb550a5b9233ae8e9256e23fa0d34d5ae5efe067ec8", + "0xa890f412ab6061c0c5ee661e80d4edc5c36b22fb79ac172ddd5ff26a7dbe9751", + "0xb666ae07f9276f6d0a33f9efeb3c5cfcba314fbc06e947563db92a40d7a341e8", + "0x83a082cf97ee78fbd7f31a01ae72e40c2e980a6dab756161544c27da86043528", + "0xfa726a919c6f8840c456dc77b0fec5adbed729e0efbb9317b75f77ed479c0f44", + "0xa8606800c54faeab2cbc9d85ff556c49dd7e1a0476027e0f7ce2c1dc2ba7ccbf", + "0x2796277836ab4c17a584c9f6c7778d10912cb19e541fb75453796841e1f6cd1c", + "0xf648b8b3c7be06f1f8d9cda13fd6d60f913e5048a8e0b283b110ca427eeb715f", + "0xa21d00b8fdcd77295d4064e00fbc30bed579d8255e9cf3a9016911d832390717", + "0xe741afcd98cbb3bb140737ed77bb968ac60d5c00022d722f9f04f56e97235dc9", + "0xbeecc9638fac39708ec16910e5b02c91f83f6321f6eb658cf8a96353cfb49806", + "0x912eee6cabeb0fed8d6e6ca0ba61977fd8e09ea0780ff8fbec995e2a85e08b52", + "0xc665bc0bb121a1229bc56ecc07a7e234fd24c523ea14700aa09e569b5f53ad33", + "0x39501621c2bdff2f62ab8d8e3fe47fe1701a98c665697c5b750ee1892f11846e", + "0x03d32e16c3a6c913daefb139f131e1e95a742b7be8e20ee39b785b4772a50e44", + "0x4f504eb46a82d440f1c952a06f143994bc66eb9e3ed865080cd9dfc6d652b69c", + "0xad753dc8710a46a70e19189d8fc7f4c773e4d9ccc7a70c354b574fe377328741", + "0xf7f5464a2d723b81502adb9133a0a4f0589b4134ca595a82e660987c6b011610", + "0x216b60b1c3e3bb4213ab5d43e04619d13e1ecedbdd65a1752bda326223e3ca3e", + "0x763664aa96d27b6e2ac7974e3ca9c9d2a702911bc5d550d246631965cf2bd4a2", + "0x292b5c8c8431b040c04d631f313d4e6b67b5fd3d4b8ac9f2edb09d13ec61f088", + "0x80db43c2b9e56eb540592f15f5900222faf3f75ce62e78189b5aa98c54568a5e", + "0x1b5fdf8969bcd4d65e86a2cefb3a673e18d587843f4f50db4e3ee77a0ba2ef1c", + "0x11e237953fff3e95e6572da50a92768467ffdfd0640d3384aa1c486357e7c24a", + "0x1fabd4faa8dba44808cc87d0bc389654a98496745578f3d17d134adc7f7b10f3", + "0x5eca4aa96f20a56197772ae6b600762154ca9d2702cab12664ea47cbff1a440c", + "0x0b4234f5bb02abcf3b5ce6c44ea85f55ec7db98fa5a7b90abef6dd0df034743c", + "0x316761e295bf350313c4c92efea591b522f1df4211ce94b22e601f30aefa51ef", + "0xe93a55ddb4d7dfe02598e8f909ff34b3de40a1c0ac8c7fba48cb604ea60631fb", + "0xe6e6c877b996857637f8a71d0cd9a6d47fdeb03752c8965766f010073332b087", + "0xa4f95c8874e611eddd2c4502e4e1196f0f1be90bfc37db35f8588e7d81d34aeb", + "0x9351710a5633714bb8b2d226e15ba4caa6f50f56c5508e5fa1239d5cc6a7e1aa", + "0x8d0aef52ec7266f37adb572913a6213b8448caaf0384008373dec525ae6cdff1", + "0x718e24c3970c85bcb14d2763201812c43abac0a7f16fc5787a7a7b2f37288586", + "0x3600ce44cebc3ee46b39734532128eaf715c0f3596b554f8478b961b0d6e389a", + "0x50dd1db7b0a5f6bd2d16252f43254d0f5d009e59f61ebc817c4bbf388519a46b", + "0x67861ed00f5fef446e1f4e671950ac2ddae1f3b564f1a6fe945e91678724ef03", + "0x0e332c26e169648bc20b4f430fbf8c26c6edf1a235f978d09d4a74c7b8754aad", + "0x6c9901015adf56e564dfb51d41a82bde43fb67273b6911c9ef7fa817555c9557", + "0x53c83391e5e0a024f68d5ade39b7a769f10664e12e4942c236398dd5dbce47a1", + "0x78619564f0b2399a9fcb229d938bf1e298d62b03b7a37fe6486034185d7f7d27", + "0x4625f15381a8723452ec80f3dd0293c213ae35de737c508f42427e1735398c3a", + "0x69542425ddb39d3d3981e76b41173eb1a09500f11164658a3536bf3e292f8b6a", + "0x82ac4f5bb40aece7d6706f1bdf4dfba5c835c09afba6446ef408d8ec6c09300f", + "0x740f9180671091b4c5b3ca59b9515bd0fc751f48e488a9f7f4b6848602490e21", + "0x9a04b08b4115986d8848e80960ad67490923154617cb82b3d88656ec1176c24c", + "0xf9ffe528eccffad519819d9eef70cef317af33899bcaee16f1e720caf9a98744", + "0x46da5e1a14b582b237f75556a0fd108c4ea0d55c0edd8f5d06c59a42e57410df", + "0x098f3429c8ccda60c3b5b9755e5632dd6a3f5297ee819bec8de2d8d37893968a", + "0x1a5b91af6025c11911ac072a98b8a44ed81f1f3c76ae752bd28004915db6f554", + "0x8bed50c7cae549ed4f8e05e02aa09b2a614c0af8eec719e4c6f7aee975ec3ec7", + "0xd86130f624b5dcc116f2dfbb5219b1afde4b7780780decd0b42694e15c1f8d8b", + "0x4167aa9bc0075f624d25d40eb29139dd2c452ebf17739fab859e14ac6765337a", + "0xa258ce5db20e91fb2ea30d607ac2f588bdc1924b21bbe39dc881e19889a7f5c6", + "0xe5ef8b5ab3cc8894452d16dc875b69a55fd925808ac7cafef1cd19485d0bb50a", + "0x120df2b3975d85b6dfca56bb98a82025ade5ac1d33e4319d2e0105b8de9ebf58", + "0xc964291dd2e0807a468396ebba3d59cfe385d949f6d6215976fc9a0a11de209a", + "0xf23f14cb709074b79abe166f159bc52b50de687464df6a5ebf112aa953c95ad5", + "0x622c092c9bd7e30f880043762e26d8e9c73ab7c0d0806f3c5e472a4152b35a93", + "0x8a5f090662731e7422bf651187fb89812419ab6808f2c62da213d6944fccfe9f", + "0xfbea3c0d92e061fd2399606f42647d65cc54191fa46d57b325103a75f5c22ba6", + "0x2babfbcc08d69b52c3747ddc8dcad4ea5511edabf24496f3ff96a1194d6f680e", + "0x4d3d019c28c779496b616d85aee201a3d79d9eecf35f728d00bcb12245ace703", + "0xe76fcee1f08325110436f8d4a95476251326b4827399f9b2ef7e12b7fb9c4ba1", + "0x4884d9c0bb4a9454ea37926591fc3eed2a28356e0506106a18f093035638da93", + "0x74c3f303d93d4cc4f0c1eb1b4378d34139220eb836628b82b649d1deb519b1d3", + "0xacb806670b278d3f0c84ba9c7a68c7df3b89e3451731a55d7351468c7c864c1c", + "0x8660fb8cd97e585ea7a41bccb22dd46e07eee8bbf34d90f0f0ca854b93b1ebee", + "0x2fc9c89cdca71a1c0224d469d0c364c96bbd99c1067a7ebe8ef412c645357a76", + "0x8ec6d5ab6ad7135d66091b8bf269be44c20af1d828694cd8650b5479156fd700", + "0x50ab4776e8cabe3d864fb7a1637de83f8fbb45d6e49645555ffe9526b27ebd66", + "0xbf39f5e17082983da4f409f91c7d9059acd02ccbefa69694aca475bb8d40b224", + "0x3135b3b981c850cc3fe9754ec6af117459d355ad6b0915beb61e84ea735c31bf", + "0xa7971dab52ce4bf45813223b0695f8e87f64b614c9c5499faac6f842e5c41be9", + "0x9e480f5617323ab104b4087ac4ef849a5da03427712fb302ac085507c77d8f37", + "0x57a6d474654d5e8d408159be39ad0e7026e6a4c6a6543e23a63d30610dc8dfc1", + "0x09eb3e01a5915a4e26d90b4c58bf0cf1e560fdc8ba53faed9d946ad3e9bc78fa", + "0x29c6d25da80a772310226b1b89d845c7916e4a4bc94d75aa330ec3eaa14b1e28", + "0x1a1ccfee11edeb989ca02e3cb89f062612a22a69ec816a625835d79370173987", + "0x1cb63dc541cf7f71c1c4e8cabd2619c3503c0ea1362dec75eccdf1e9efdbfcfc", + "0xac9dff32a69e75b396a2c250e206b36c34c63b955c9e5732e65eaf7ccca03c62", + "0x3e1b4f0c3ebd3d38cec389720147746774fc01ff6bdd065f0baf2906b16766a8", + "0x5cc8bed25574463026205e90aad828521f8e3d440970d7e810d1b46849681db5", + "0x255185d264509bd3a768bb0d50b568e66eb1fec96d573e33aaacc716d7c8fb93", + "0xe81b86ba631973918a859ff5995d7840b12511184c2865401f2693a71b9fa07e", + "0x61e67e42616598da8d36e865b282127c761380d3a56d26b8d35fbbc7641433c5", + "0x60c62ffef83fe603a34ca20b549522394e650dad5510ae68b6e074f0cd209a56", + "0x78577f2caf4a54f6065593535d76216f5f4075af7e7a98b79571d33b1822920c", + "0xfd4cb354f2869c8650200de0fe06f3d39e4dbebf19b0c1c2677da916ea84f44d", + "0x453769cef6ff9ba2d5c917982a1ad3e2f7e947d9ea228857556af0005665e0b0", + "0xe567f93f8f88bf1a6b33214f17f5d60c5dbbb531b4ab21b8c0b799b6416891e0", + "0x7e65a39a17f902a30ceb2469fe21cba8d4e0da9740fcefd5c647c81ff1ae95fa", + "0x03e4a7eea0cd6fc02b987138ef88e8795b5f839636ca07f6665bbae9e5878931", + "0xc3558e2b437cf0347cabc63c95fa2710d3f43c65d380feb998511903f9f4dcf0", + "0xe3a615f80882fb5dfbd08c1d7a8b0a4d3b651d5e8221f99b879cb01d97037a9c", + "0xb56db4a5fea85cbffaee41f05304689ea321c40d4c108b1146fa69118431d9b2", + "0xab28e1f077f18117945910c235bc9c6f9b6d2b45e9ef03009053006c637e3e26", + "0xefcabc1d5659fd6e48430dbfcc9fb4e08e8a9b895f7bf9b3d6c7661bfc44ada2", + "0xc7547496f212873e7c3631dafaca62a6e95ac39272acf25a7394bac6ea1ae357", + "0xc482013cb01bd69e0ea9f447b611b06623352e321469f4adc739e3ee189298eb", + "0x5942f42e91e391bb44bb2c4d40da1906164dbb6d1c184f00fa62899baa0dba2c", + "0xb4bcb46c80ad4cd603aff2c1baf8f2c896a628a46cc5786f0e58dae846694677", + "0xd0a7305b995fa8c317c330118fee4bfef9f65f70b54558c0988945b08e90ff08", + "0x687f801b7f32fdfa7d50274cc7b126efedbdae8de154d36395d33967216f3086", + "0xeb19ec10ac6c15ffa619fa46792971ee22a9328fa53bd69a10ed6e9617dd1bbf", + "0xa2bb3f0367f62abdb3a9fa6da34b20697cf214a4ff14fd42826da140ee025213", + "0x070a76511f32c882374400af59b22d88974a06fbc10d786dd07ca7527ebd8b90", + "0x8f195689537b446e946b376ec1e9eb5af5b4542ab47be550a5700fa5d81440d5", + "0x10cc09778699fc8ac109e7e6773f83391eeba2a6db5226fbe953dd8d99126ca5", + "0x8cc839cb7dc84fd3b8c0c7ca637e86a2f72a8715cc16c7afb597d12da717530b", + "0xa32504e6cc6fd0ee441440f213f082fcf76f72d36b5e2a0f3b6bdd50cdd825a2", + "0x8f45151db8878e51eec12c450b69fa92176af21a4543bb78c0d4c27286e74469", + "0x23f5c465bd35bcd4353216dc9505df68324a27990df9825a242e1288e40a13bb", + "0x35f409ce748af33c20a6ae693b8a48ba4623de9686f9834e22be4410e637d24f", + "0xb962e5845c1db624532562597a99e2acc5e434b97d8db0725bdeddd71a98e737", + "0x0f8364f99f43dd52b4cfa9e426c48f7b6ab18dc40a896e96a09eceebb3363afe", + "0xa842746868da7644fccdbb07ae5e08c71a6287ab307c4f9717eadb414c9c99f4", + "0xa59064c6b7fe7d2407792d99ed1218d2dc2f240185fbd8f767997438241b92e9", + "0xb6ea0d58e8d48e05b9ff4d75b2ebe0bd9752c0e2691882f754be66cdec7628d3", + "0xf16b78c9d14c52b2b5156690b6ce37a5e09661f49674ad22604c7d3755e564d1", + "0xbfa8ef74e8a37cd64b8b4a4260c4fc162140603f9c2494b9cf4c1e13de522ed9", + "0xf4b89f1776ebf30640dc5ec99e43de22136b6ef936a85193ef940931108e408a", + "0xefb9a4555d495a584dbcc2a50938f6b9827eb014ffae2d2d0aae356a57894de8", + "0x0627a466d42a26aca72cf531d4722e0e5fc5d491f4527786be4e1b641e693ac2", + "0x7d10d21542de3d8f074dbfd1a6e11b3df32c36272891aae54053029d39ebae10", + "0x0f21118ee9763f46cc175a21de876da233b2b3b62c6f06fa2df73f6deccf37f3", + "0x143213b96f8519c15164742e2350cc66e814c9570634e871a8c1ddae4d31b6b5", + "0x8d2877120abae3854e00ae8cf5c8c95b3ede10590ab79ce2be7127239507e18d", + "0xaccd0005d59472ac04192c059ed9c10aea42c4dabec9e581f6cb10b261746573", + "0x67bc8dd5422f39e741b9995e6e60686e75d6620aa0d745b84191f5dba9b5bb18", + "0x11b8e95f6a654d4373cefbbac29a90fdd8ae098043d1969b9fa7885318376b34", + "0x431a0b8a6f08760c942eeff5791e7088fd210f877825ce4dcabe365e03e4a65c", + "0x704007f11bae513f428c9b0d23593fd2809d0dbc4c331009856135dafec23ce4", + "0xc06dee39a33a05e30c522061c1d9272381bde3f9e42fa9bd7d5a5c8ef11ec6ec", + "0x66b4157baaae85db0948ad72882287a80b286df2c40080b8da4d5d3db0a61bd2", + "0xef1983b1906239b490baaaa8e4527f78a57a0a767d731f062dd09efb59ae8e3d", + "0xf26d0d5c520cce6688ca5d51dee285af26f150794f2ea9f1d73f6df213d78338", + "0x8b28838382e6892f59c42a7709d6d38396495d3af5a8d5b0a60f172a6a8940bd", + "0x261a605fa5f2a9bdc7cffac530edcf976e7ea7af4e443b625fe01ed39dad44b6", + ], + compressed_lamport_pk: + "0xdd635d27d1d52b9a49df9e5c0c622360a4dd17cba7db4e89bce3cb048fb721a5", + child_sk: + "20397789859736650942317412262472558107875392172444076792671091975210932703118", } } } diff --git a/crypto/eth2_key_derivation/tests/eip2333_vectors.rs b/crypto/eth2_key_derivation/tests/eip2333_vectors.rs index 6995bd087..e4406ab1f 100644 --- a/crypto/eth2_key_derivation/tests/eip2333_vectors.rs +++ b/crypto/eth2_key_derivation/tests/eip2333_vectors.rs @@ -65,9 +65,9 @@ fn assert_vector_passes(raw: RawTestVector) { fn eip2333_test_case_0() { assert_vector_passes(RawTestVector { seed: "0xc55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04", - master_sk: "12513733877922233913083619867448865075222526338446857121953625441395088009793", + master_sk: "6083874454709270928345386274498605044986640685124978867557563392430687146096", child_index: 0, - child_sk: "7419543105316279183937430842449358701327973165530407166294956473095303972104" + child_sk: "20397789859736650942317412262472558107875392172444076792671091975210932703118", }) } @@ -75,9 +75,9 @@ fn eip2333_test_case_0() { fn eip2333_test_case_1() { assert_vector_passes(RawTestVector { seed: "0x3141592653589793238462643383279502884197169399375105820974944592", - master_sk: "46029459550803682895343812821003080589696405386150182061394330539196052371668", + master_sk: "29757020647961307431480504535336562678282505419141012933316116377660817309383", child_index: 3141592653, - child_sk: "43469287647733616183478983885105537266268532274998688773496918571876759327260", + child_sk: "25457201688850691947727629385191704516744796114925897962676248250929345014287", }) } @@ -85,9 +85,9 @@ fn eip2333_test_case_1() { fn eip2333_test_case_2() { assert_vector_passes(RawTestVector { seed: "0x0099FF991111002299DD7744EE3355BBDD8844115566CC55663355668888CC00", - master_sk: "45379166311535261329029945990467475187325618028073620882733843918126031931161", + master_sk: "27580842291869792442942448775674722299803720648445448686099262467207037398656", child_index: 4294967295, - child_sk: "46475244006136701976831062271444482037125148379128114617927607151318277762946", + child_sk: "29358610794459428860402234341874281240803786294062035874021252734817515685787", }) } @@ -95,8 +95,8 @@ fn eip2333_test_case_2() { fn eip2333_test_case_3() { assert_vector_passes(RawTestVector { seed: "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - master_sk: "31740500954810567003972734830331791822878290325762596213711963944729383643688", + master_sk: "19022158461524446591288038168518313374041767046816487870552872741050760015818", child_index: 42, - child_sk: "51041472511529980987749393477251359993058329222191894694692317000136653813011", + child_sk: "31372231650479070279774297061823572166496564838472787488249775572789064611981", }) } diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index 3a3eb4a80..63508b5e5 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -7,20 +7,20 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -rand = "0.7.2" -aes-ctr = "0.4.0" -hmac = "0.8.0" -pbkdf2 = { version = "0.4.0", default-features = false } -scrypt = { version = "0.3.0", default-features = false } -sha2 = "0.9.0" -uuid = { version = "0.8", features = ["serde", "v4"] } -zeroize = { version = "1.0.0", features = ["zeroize_derive"] } -serde = "1.0.110" -serde_repr = "0.1" +rand = "0.7.3" +aes-ctr = "0.5.0" +hmac = "0.9.0" +pbkdf2 = { version = "0.5.0", default-features = false } +scrypt = { version = "0.4.1", default-features = false } +sha2 = "0.9.1" +uuid = { version = "0.8.1", features = ["serde", "v4"] } +zeroize = { version = "1.1.1", features = ["zeroize_derive"] } +serde = "1.0.116" +serde_repr = "0.1.6" hex = "0.4.2" bls = { path = "../bls" } -eth2_ssz = { path = "../../consensus/ssz" } -serde_json = "1.0.41" +eth2_ssz = "0.1.2" +serde_json = "1.0.58" eth2_key_derivation = { path = "../eth2_key_derivation" } [dev-dependencies] tempfile = "3.1.0" diff --git a/crypto/eth2_keystore/src/keystore.rs b/crypto/eth2_keystore/src/keystore.rs index 6e3128b00..c1997680d 100644 --- a/crypto/eth2_keystore/src/keystore.rs +++ b/crypto/eth2_keystore/src/keystore.rs @@ -81,6 +81,7 @@ pub struct KeystoreBuilder<'a> { cipher: Cipher, uuid: Uuid, path: String, + description: String, } impl<'a> KeystoreBuilder<'a> { @@ -105,10 +106,17 @@ impl<'a> KeystoreBuilder<'a> { cipher: Cipher::Aes128Ctr(Aes128Ctr { iv }), uuid: Uuid::new_v4(), path, + description: "".to_string(), }) } } + /// Build the keystore with a specific description instead of an empty string. + pub fn description(mut self, description: String) -> Self { + self.description = description; + self + } + /// Build the keystore using the supplied `kdf` instead of `crate::default_kdf`. pub fn kdf(mut self, kdf: Kdf) -> Self { self.kdf = kdf; @@ -124,6 +132,7 @@ impl<'a> KeystoreBuilder<'a> { self.cipher, self.uuid, self.path, + self.description, ) } } @@ -147,6 +156,7 @@ impl Keystore { cipher: Cipher, uuid: Uuid, path: String, + description: String, ) -> Result<Self, Error> { let secret: ZeroizeHash = keypair.sk.serialize(); @@ -175,7 +185,7 @@ impl Keystore { path: Some(path), pubkey: keypair.pk.to_hex_string()[2..].to_string(), version: Version::four(), - description: None, + description: Some(description), name: None, }, }) @@ -228,6 +238,18 @@ impl Keystore { &self.json.pubkey } + /// Returns the description for the keystore, if the field is present. + pub fn description(&self) -> Option<&str> { + self.json.description.as_deref() + } + + /// Sets the description for the keystore. + /// + /// Note: this does not save the keystore to disk. + pub fn set_description(&mut self, description: String) { + self.json.description = Some(description) + } + /// Returns the pubkey for the keystore, parsed as a `PublicKey` if it parses. pub fn public_key(&self) -> Option<PublicKey> { serde_json::from_str(&format!("\"0x{}\"", &self.json.pubkey)).ok() diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index 47e6e02d9..e34ee1730 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -7,11 +7,11 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde = "1.0.110" -serde_json = "1.0.41" -serde_repr = "0.1" -uuid = { version = "0.8", features = ["serde", "v4"] } -rand = "0.7.2" +serde = "1.0.116" +serde_json = "1.0.58" +serde_repr = "0.1.6" +uuid = { version = "0.8.1", features = ["serde", "v4"] } +rand = "0.7.3" eth2_keystore = { path = "../eth2_keystore" } eth2_key_derivation = { path = "../eth2_key_derivation" } tiny-bip39 = { git = "https://github.com/sigp/tiny-bip39.git", rev = "1137c32da91bd5e75db4305a84ddd15255423f7f" } diff --git a/crypto/eth2_wallet/src/json_wallet/mod.rs b/crypto/eth2_wallet/src/json_wallet/mod.rs index 6c430e50d..834716fba 100644 --- a/crypto/eth2_wallet/src/json_wallet/mod.rs +++ b/crypto/eth2_wallet/src/json_wallet/mod.rs @@ -13,11 +13,6 @@ pub use uuid::Uuid; pub struct JsonWallet { pub crypto: Crypto, pub name: String, - // TODO: confirm if this field is optional or not. - // - // Reference: - // - // https://github.com/sigp/lighthouse/pull/1117#discussion_r422892396 pub nextaccount: u32, pub uuid: Uuid, pub version: Version, diff --git a/crypto/eth2_wallet/src/wallet.rs b/crypto/eth2_wallet/src/wallet.rs index 47b2d329d..39ab816e1 100644 --- a/crypto/eth2_wallet/src/wallet.rs +++ b/crypto/eth2_wallet/src/wallet.rs @@ -66,7 +66,6 @@ impl<'a> WalletBuilder<'a> { password: &'a [u8], name: String, ) -> Result<Self, Error> { - // TODO: `bip39` does not use zeroize. Perhaps we should make a PR upstream? let seed = Bip39Seed::new(mnemonic, ""); Self::from_seed_bytes(seed.as_bytes(), password, name) @@ -215,6 +214,23 @@ impl Wallet { self.json.nextaccount } + /// Sets the value of the JSON wallet `nextaccount` field. + /// + /// This will be the index of the next wallet generated with `Self::next_validator`. + /// + /// ## Errors + /// + /// Returns `Err(())` if `nextaccount` is less than `self.nextaccount()` without mutating + /// `self`. This is to protect against duplicate validator generation. + pub fn set_nextaccount(&mut self, nextaccount: u32) -> Result<(), ()> { + if nextaccount >= self.nextaccount() { + self.json.nextaccount = nextaccount; + Ok(()) + } else { + Err(()) + } + } + /// Returns the value of the JSON wallet `name` field. pub fn name(&self) -> &str { &self.json.name diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 70a17e237..57364aa18 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "0.2.13" +version = "0.3.0" authors = ["Paul Hauner <paul@paulhauner.com>"] edition = "2018" @@ -10,28 +10,29 @@ portable = ["bls/supranational-portable"] [dependencies] bls = { path = "../crypto/bls" } -clap = "2.33.0" +clap = "2.33.3" hex = "0.4.2" -log = "0.4.8" -serde = "1.0.110" -serde_yaml = "0.8.11" -simple_logger = "1.6.0" +log = "0.4.11" +serde = "1.0.116" +serde_yaml = "0.8.13" +simple_logger = "1.10.0" types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } eth2_ssz = "0.1.2" -regex = "1.3.7" +regex = "1.3.9" futures = { version = "0.3.5", features = ["compat"] } environment = { path = "../lighthouse/environment" } web3 = "0.11.0" eth2_testnet_config = { path = "../common/eth2_testnet_config" } -dirs = "2.0.2" +dirs = "3.0.1" genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } -tree_hash = "0.1.0" +tree_hash = "0.1.1" tokio = { version = "0.2.22", features = ["full"] } clap_utils = { path = "../common/clap_utils" } eth2_libp2p = { path = "../beacon_node/eth2_libp2p" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } -rand = "0.7.2" +rand = "0.7.3" eth2_keystore = { path = "../crypto/eth2_keystore" } lighthouse_version = { path = "../common/lighthouse_version" } +directory = { path = "../common/directory" } diff --git a/lcli/src/eth1_genesis.rs b/lcli/src/eth1_genesis.rs index 2c6f7d8cf..9fd0757d8 100644 --- a/lcli/src/eth1_genesis.rs +++ b/lcli/src/eth1_genesis.rs @@ -20,7 +20,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res .and_then(|dir| dir.parse::<PathBuf>().map_err(|_| ())) .unwrap_or_else(|_| { dirs::home_dir() - .map(|home| home.join(".lighthouse").join("testnet")) + .map(|home| home.join(directory::DEFAULT_ROOT_DIR).join("testnet")) .expect("should locate home directory") }); diff --git a/lcli/src/insecure_validators.rs b/lcli/src/insecure_validators.rs index c04f854a5..2a604cffe 100644 --- a/lcli/src/insecure_validators.rs +++ b/lcli/src/insecure_validators.rs @@ -21,7 +21,8 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { for i in 0..validator_count { println!("Validator {}/{}", i + 1, validator_count); - ValidatorBuilder::new(validators_dir.clone(), secrets_dir.clone()) + ValidatorBuilder::new(validators_dir.clone()) + .password_dir(secrets_dir.clone()) .store_withdrawal_keystore(false) .insecure_voting_keypair(i) .map_err(|e| format!("Unable to generate keys: {:?}", e))? diff --git a/lcli/src/interop_genesis.rs b/lcli/src/interop_genesis.rs index 9c8609b5c..28cd2625b 100644 --- a/lcli/src/interop_genesis.rs +++ b/lcli/src/interop_genesis.rs @@ -31,7 +31,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches) -> Result< .and_then(|dir| dir.parse::<PathBuf>().map_err(|_| ())) .unwrap_or_else(|_| { dirs::home_dir() - .map(|home| home.join(".lighthouse").join("testnet")) + .map(|home| home.join(directory::DEFAULT_ROOT_DIR).join("testnet")) .expect("should locate home directory") }); diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 918426e74..fc60e8c98 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -10,7 +10,7 @@ pub fn run<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> { let testnet_dir_path = parse_path_with_default_in_home_dir( matches, "testnet-dir", - PathBuf::from(".lighthouse/testnet"), + PathBuf::from(directory::DEFAULT_ROOT_DIR).join("testnet"), )?; let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 5673bde53..4421d50eb 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "0.2.13" +version = "0.3.0" authors = ["Sigma Prime <contact@sigmaprime.io>"] edition = "2018" @@ -16,13 +16,13 @@ milagro = ["bls/milagro"] beacon_node = { "path" = "../beacon_node" } tokio = "0.2.22" slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = "1.0.0" +sloggers = "1.0.1" types = { "path" = "../consensus/types" } bls = { path = "../crypto/bls" } -clap = "2.33.0" +clap = "2.33.3" env_logger = "0.7.1" logging = { path = "../common/logging" } -slog-term = "2.5.0" +slog-term = "2.6.0" slog-async = "2.5.0" environment = { path = "./environment" } boot_node = { path = "../boot_node" } @@ -31,9 +31,11 @@ validator_client = { "path" = "../validator_client" } account_manager = { "path" = "../account_manager" } clap_utils = { path = "../common/clap_utils" } eth2_testnet_config = { path = "../common/eth2_testnet_config" } +directory = { path = "../common/directory" } lighthouse_version = { path = "../common/lighthouse_version" } +account_utils = { path = "../common/account_utils" } [dev-dependencies] tempfile = "3.1.0" validator_dir = { path = "../common/validator_dir" } -account_utils = { path = "../common/account_utils" } +slashing_protection = { path = "../validator_client/slashing_protection" } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 54a1f1f18..7bb3faa6a 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -7,18 +7,16 @@ edition = "2018" [dependencies] tokio = { version = "0.2.22", features = ["macros"] } slog = { version = "2.5.2", features = ["max_level_trace"] } -sloggers = "1.0.0" +sloggers = "1.0.1" types = { "path" = "../../consensus/types" } eth2_config = { "path" = "../../common/eth2_config" } +task_executor = { "path" = "../../common/task_executor" } eth2_testnet_config = { path = "../../common/eth2_testnet_config" } logging = { path = "../../common/logging" } -slog-term = "2.5.0" +slog-term = "2.6.0" slog-async = "2.5.0" -ctrlc = { version = "3.1.4", features = ["termination"] } +ctrlc = { version = "3.1.6", features = ["termination"] } futures = "0.3.5" parking_lot = "0.11.0" slog-json = "2.3.0" exit-future = "0.2.0" -lazy_static = "1.4.0" -lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -discv5 = { version = "0.1.0-alpha.12", features = ["libp2p"] } diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index c175e6cab..a58930842 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -15,7 +15,6 @@ use futures::channel::{ }; use futures::{future, StreamExt}; -pub use executor::TaskExecutor; use slog::{info, o, Drain, Level, Logger}; use sloggers::{null::NullLoggerBuilder, Build}; use std::cell::RefCell; @@ -23,10 +22,9 @@ use std::ffi::OsStr; use std::fs::{rename as FsRename, OpenOptions}; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; +use task_executor::TaskExecutor; use tokio::runtime::{Builder as RuntimeBuilder, Runtime}; use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; -mod executor; -mod metrics; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; const LOG_CHANNEL_SIZE: usize = 2048; @@ -311,12 +309,7 @@ impl<E: EthSpec> RuntimeContext<E> { /// The generated service will have the `service_name` in all it's logs. pub fn service_context(&self, service_name: String) -> Self { Self { - executor: TaskExecutor { - handle: self.executor.handle.clone(), - signal_tx: self.executor.signal_tx.clone(), - exit: self.executor.exit.clone(), - log: self.executor.log.new(o!("service" => service_name)), - }, + executor: self.executor.clone_with_name(service_name), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), } @@ -361,12 +354,12 @@ impl<E: EthSpec> Environment<E> { /// Returns a `Context` where no "service" has been added to the logger output. pub fn core_context(&mut self) -> RuntimeContext<E> { RuntimeContext { - executor: TaskExecutor { - exit: self.exit.clone(), - signal_tx: self.signal_tx.clone(), - handle: self.runtime().handle().clone(), - log: self.log.clone(), - }, + executor: TaskExecutor::new( + self.runtime().handle().clone(), + self.exit.clone(), + self.log.clone(), + self.signal_tx.clone(), + ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), } @@ -375,12 +368,12 @@ impl<E: EthSpec> Environment<E> { /// Returns a `Context` where the `service_name` is added to the logger output. pub fn service_context(&mut self, service_name: String) -> RuntimeContext<E> { RuntimeContext { - executor: TaskExecutor { - exit: self.exit.clone(), - signal_tx: self.signal_tx.clone(), - handle: self.runtime().handle().clone(), - log: self.log.new(o!("service" => service_name)), - }, + executor: TaskExecutor::new( + self.runtime().handle().clone(), + self.exit.clone(), + self.log.new(o!("service" => service_name)), + self.signal_tx.clone(), + ), eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 2df5c3539..714209b9c 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -10,7 +10,6 @@ use std::process::exit; use types::EthSpec; use validator_client::ProductionValidatorClient; -pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; fn bls_library_name() -> &'static str { @@ -91,7 +90,10 @@ fn main() { .short("d") .value_name("DIR") .global(true) - .help("Data directory for lighthouse keys and databases.") + .help( + "Root data directory for lighthouse keys and databases. \ + Defaults to $HOME/.lighthouse/{default-testnet}, \ + currently, $HOME/.lighthouse/medalla") .takes_value(true), ) .arg( @@ -112,7 +114,7 @@ fn main() { .long("testnet") .value_name("testnet") .help("Name of network lighthouse will connect to") - .possible_values(&["medalla", "altona", "spadina"]) + .possible_values(&["medalla", "altona", "spadina", "zinken"]) .conflicts_with("testnet-dir") .takes_value(true) .global(true) @@ -255,61 +257,63 @@ fn run<E: EthSpec>( "name" => testnet_name ); - let beacon_node = if let Some(sub_matches) = matches.subcommand_matches("beacon_node") { - let runtime_context = environment.core_context(); + match matches.subcommand() { + ("beacon_node", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = beacon_node::get_config::<E>( + matches, + &context.eth2_config.spec_constants, + &context.eth2_config().spec, + context.log().clone(), + )?; + environment.runtime().spawn(async move { + if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { + crit!(log, "Failed to start beacon node"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start beacon node"); + } + }) + } + ("validator_client", Some(matches)) => { + let context = environment.core_context(); + let log = context.log().clone(); + let executor = context.executor.clone(); + let config = validator_client::Config::from_cli(&matches, context.log()) + .map_err(|e| format!("Unable to initialize validator config: {}", e))?; + environment.runtime().spawn(async move { + let run = async { + ProductionValidatorClient::new(context, config) + .await? + .start_service()?; - let beacon = environment - .runtime() - .block_on(ProductionBeaconNode::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to start beacon node: {}", e))?; - - Some(beacon) - } else { - None + Ok::<(), String>(()) + }; + if let Err(e) = run.await { + crit!(log, "Failed to start validator client"; "reason" => e); + // Ignore the error since it always occurs during normal operation when + // shutting down. + let _ = executor + .shutdown_sender() + .try_send("Failed to start validator client"); + } + }) + } + _ => { + crit!(log, "No subcommand supplied. See --help ."); + return Err("No subcommand supplied.".into()); + } }; - let validator_client = if let Some(sub_matches) = matches.subcommand_matches("validator_client") - { - let runtime_context = environment.core_context(); - - let mut validator = environment - .runtime() - .block_on(ProductionValidatorClient::new_from_cli( - runtime_context, - sub_matches, - )) - .map_err(|e| format!("Failed to init validator client: {}", e))?; - - environment - .core_context() - .executor - .runtime_handle() - .enter(|| { - validator - .start_service() - .map_err(|e| format!("Failed to start validator client service: {}", e)) - })?; - - Some(validator) - } else { - None - }; - - if beacon_node.is_none() && validator_client.is_none() { - crit!(log, "No subcommand supplied. See --help ."); - return Err("No subcommand supplied.".into()); - } - // Block this thread until we get a ctrl-c or a task sends a shutdown signal. environment.block_until_shutdown_requested()?; info!(log, "Shutting down.."); environment.fire_signal(); - drop(beacon_node); - drop(validator_client); // Shutdown the environment once all tasks have completed. environment.shutdown_on_idle(); diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index f5c473034..4ce7bb8ac 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -11,13 +11,14 @@ use account_manager::{ list::CMD as LIST_CMD, CMD as WALLET_CMD, }, - BASE_DIR_FLAG, CMD as ACCOUNT_CMD, *, + CMD as ACCOUNT_CMD, WALLETS_DIR_FLAG, *, }; use account_utils::{ eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, ZeroizeString, }; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::env; use std::fs::{self, File}; use std::io::{BufRead, BufReader, Write}; @@ -25,7 +26,7 @@ use std::path::{Path, PathBuf}; use std::process::{Command, Output, Stdio}; use std::str::from_utf8; use tempfile::{tempdir, TempDir}; -use types::Keypair; +use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; // TODO: create tests for the `lighthouse account validator deposit` command. This involves getting @@ -69,11 +70,28 @@ fn dir_child_count<P: AsRef<Path>>(dir: P) -> usize { fs::read_dir(dir).expect("should read dir").count() } +/// Returns the number of 0x-prefixed children in a directory +/// i.e. validators in the validators dir. +fn dir_validator_count<P: AsRef<Path>>(dir: P) -> usize { + fs::read_dir(dir) + .unwrap() + .filter(|c| { + c.as_ref() + .unwrap() + .path() + .file_name() + .unwrap() + .to_string_lossy() + .starts_with("0x") + }) + .count() +} + /// Uses `lighthouse account wallet list` to list all wallets. fn list_wallets<P: AsRef<Path>>(base_dir: P) -> Vec<String> { let output = output_result( wallet_cmd() - .arg(format!("--{}", BASE_DIR_FLAG)) + .arg(format!("--{}", WALLETS_DIR_FLAG)) .arg(base_dir.as_ref().as_os_str()) .arg(LIST_CMD), ) @@ -97,7 +115,7 @@ fn create_wallet<P: AsRef<Path>>( ) -> Result<Output, String> { output_result( wallet_cmd() - .arg(format!("--{}", BASE_DIR_FLAG)) + .arg(format!("--{}", WALLETS_DIR_FLAG)) .arg(base_dir.as_ref().as_os_str()) .arg(CREATE_CMD) .arg(format!("--{}", NAME_FLAG)) @@ -233,15 +251,15 @@ impl TestValidator { store_withdrawal_key: bool, ) -> Result<Vec<String>, String> { let mut cmd = validator_cmd(); - cmd.arg(format!("--{}", BASE_DIR_FLAG)) - .arg(self.wallet.base_dir().into_os_string()) + cmd.arg(format!("--{}", VALIDATOR_DIR_FLAG)) + .arg(self.validator_dir.clone().into_os_string()) .arg(CREATE_CMD) + .arg(format!("--{}", WALLETS_DIR_FLAG)) + .arg(self.wallet.base_dir().into_os_string()) .arg(format!("--{}", WALLET_NAME_FLAG)) .arg(&self.wallet.name) .arg(format!("--{}", WALLET_PASSWORD_FLAG)) .arg(self.wallet.password_path().into_os_string()) - .arg(format!("--{}", VALIDATOR_DIR_FLAG)) - .arg(self.validator_dir.clone().into_os_string()) .arg(format!("--{}", SECRETS_DIR_FLAG)) .arg(self.secrets_dir.clone().into_os_string()) .arg(format!("--{}", DEPOSIT_GWEI_FLAG)) @@ -328,19 +346,30 @@ fn validator_create() { let wallet = TestWallet::new(base_dir.path(), "wally"); wallet.create_expect_success(); - assert_eq!(dir_child_count(validator_dir.path()), 0); + assert_eq!(dir_validator_count(validator_dir.path()), 0); let validator = TestValidator::new(validator_dir.path(), secrets_dir.path(), wallet); // Create a validator _without_ storing the withdraw key. - validator.create_expect_success(COUNT_FLAG, 1, false); + let created_validators = validator.create_expect_success(COUNT_FLAG, 1, false); - assert_eq!(dir_child_count(validator_dir.path()), 1); + // Validator should be registered with slashing protection. + check_slashing_protection( + &validator_dir, + created_validators + .iter() + .map(|v| v.voting_keypair(&secrets_dir).unwrap().pk), + ); + drop(created_validators); + + // Number of dir entries should be #validators + 1 for the slashing protection DB + assert_eq!(dir_validator_count(validator_dir.path()), 1); + assert_eq!(dir_child_count(validator_dir.path()), 2); // Create a validator storing the withdraw key. validator.create_expect_success(COUNT_FLAG, 1, true); - assert_eq!(dir_child_count(validator_dir.path()), 2); + assert_eq!(dir_validator_count(validator_dir.path()), 2); // Use the at-most flag with less validators then are in the directory. assert_eq!( @@ -348,7 +377,7 @@ fn validator_create() { 0 ); - assert_eq!(dir_child_count(validator_dir.path()), 2); + assert_eq!(dir_validator_count(validator_dir.path()), 2); // Use the at-most flag with the same number of validators that are in the directory. assert_eq!( @@ -356,7 +385,7 @@ fn validator_create() { 0 ); - assert_eq!(dir_child_count(validator_dir.path()), 2); + assert_eq!(dir_validator_count(validator_dir.path()), 2); // Use the at-most flag with two more number of validators than are in the directory. assert_eq!( @@ -364,7 +393,7 @@ fn validator_create() { 2 ); - assert_eq!(dir_child_count(validator_dir.path()), 4); + assert_eq!(dir_validator_count(validator_dir.path()), 4); // Create multiple validators with the count flag. assert_eq!( @@ -372,14 +401,7 @@ fn validator_create() { 2 ); - assert_eq!(dir_child_count(validator_dir.path()), 6); -} - -/// Returns the `lighthouse account validator import` command. -fn validator_import_cmd() -> Command { - let mut cmd = validator_cmd(); - cmd.arg(IMPORT_CMD); - cmd + assert_eq!(dir_validator_count(validator_dir.path()), 6); } #[test] @@ -407,12 +429,13 @@ fn validator_import_launchpad() { // Create a not-keystore file in the src dir. File::create(src_dir.path().join(NOT_KEYSTORE_NAME)).unwrap(); - let mut child = validator_import_cmd() + let mut child = validator_cmd() + .arg(format!("--{}", VALIDATOR_DIR_FLAG)) + .arg(dst_dir.path().as_os_str()) + .arg(IMPORT_CMD) .arg(format!("--{}", STDIN_INPUTS_FLAG)) // Using tty does not work well with tests. .arg(format!("--{}", import::DIR_FLAG)) .arg(src_dir.path().as_os_str()) - .arg(format!("--{}", VALIDATOR_DIR_FLAG)) - .arg(dst_dir.path().as_os_str()) .stderr(Stdio::piped()) .stdin(Stdio::piped()) .spawn() @@ -451,10 +474,14 @@ fn validator_import_launchpad() { "not-keystore should not be present in dst dir" ); + // Validator should be registered with slashing protection. + check_slashing_protection(&dst_dir, std::iter::once(keystore.public_key().unwrap())); + let defs = ValidatorDefinitions::open(&dst_dir).unwrap(); let expected_def = ValidatorDefinition { enabled: true, + description: "".into(), voting_public_key: keystore.public_key().unwrap(), signing_definition: SigningDefinition::LocalKeystore { voting_keystore_path, @@ -468,3 +495,12 @@ fn validator_import_launchpad() { "validator defs file should be accurate" ); } + +/// Check that all of the given pubkeys have been registered with slashing protection. +fn check_slashing_protection(validator_dir: &TempDir, pubkeys: impl Iterator<Item = PublicKey>) { + let slashing_db_path = validator_dir.path().join(SLASHING_PROTECTION_FILENAME); + let slashing_db = SlashingDatabase::open(&slashing_db_path).unwrap(); + for validator_pk in pubkeys { + slashing_db.get_validator_id(&validator_pk).unwrap(); + } +} diff --git a/scripts/local_testnet/validator_client.sh b/scripts/local_testnet/validator_client.sh index aab44045a..b68993637 100755 --- a/scripts/local_testnet/validator_client.sh +++ b/scripts/local_testnet/validator_client.sh @@ -15,4 +15,4 @@ exec lighthouse \ --datadir $VALIDATORS_DIR \ --secrets-dir $SECRETS_DIR \ --testnet-dir $TESTNET_DIR \ - --auto-register + --init-slashing-protection diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 9d7dfd81c..0c701b9cd 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -13,16 +13,16 @@ fake_crypto = ["bls/fake_crypto"] [dependencies] bls = { path = "../../crypto/bls", default-features = false } compare_fields = { path = "../../common/compare_fields" } -ethereum-types = "0.9.1" +ethereum-types = "0.9.2" hex = "0.4.2" -rayon = "1.3.0" -serde = "1.0.110" -serde_derive = "1.0.110" -serde_repr = "0.1.5" -serde_yaml = "0.8.11" +rayon = "1.4.1" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_repr = "0.1.6" +serde_yaml = "0.8.13" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" -tree_hash = "0.1.0" +tree_hash = "0.1.1" tree_hash_derive = "0.2.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } state_processing = { path = "../../consensus/state_processing" } diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index cd0d1e858..d6a203b66 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -9,5 +9,5 @@ tokio = { version = "0.2.22", features = ["time"] } web3 = "0.11.0" futures = { version = "0.3.5", features = ["compat"] } types = { path = "../../consensus/types"} -serde_json = "1.0.52" +serde_json = "1.0.58" deposit_contract = { path = "../../common/deposit_contract"} diff --git a/testing/node_test_rig/Cargo.toml b/testing/node_test_rig/Cargo.toml index a48f24f3f..cfbd92620 100644 --- a/testing/node_test_rig/Cargo.toml +++ b/testing/node_test_rig/Cargo.toml @@ -10,11 +10,11 @@ beacon_node = { path = "../../beacon_node" } types = { path = "../../consensus/types" } eth2_config = { path = "../../common/eth2_config" } tempdir = "0.3.7" -reqwest = { version = "0.10.4", features = ["native-tls-vendored"] } +reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } url = "2.1.1" -serde = "1.0.110" +serde = "1.0.116" futures = "0.3.5" genesis = { path = "../../beacon_node/genesis" } -remote_beacon_node = { path = "../../common/remote_beacon_node" } +eth2 = { path = "../../common/eth2" } validator_client = { path = "../../validator_client" } validator_dir = { path = "../../common/validator_dir", features = ["insecure_keys"] } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index 9459a07b5..2c2a0e81f 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -4,7 +4,12 @@ use beacon_node::ProductionBeaconNode; use environment::RuntimeContext; +use eth2::{ + reqwest::{ClientBuilder, Url}, + BeaconNodeHttpClient, +}; use std::path::PathBuf; +use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempdir::TempDir; use types::EthSpec; @@ -13,9 +18,12 @@ use validator_dir::insecure_keys::build_deterministic_validator_dirs; pub use beacon_node::{ClientConfig, ClientGenesis, ProductionClient}; pub use environment; -pub use remote_beacon_node::RemoteBeaconNode; +pub use eth2; pub use validator_client::Config as ValidatorConfig; +/// The global timeout for HTTP requests to the beacon node. +const HTTP_TIMEOUT: Duration = Duration::from_secs(4); + /// Provides a beacon node that is running in the current process on a given tokio executor (it /// is _local_ to this process). /// @@ -52,16 +60,23 @@ impl<E: EthSpec> LocalBeaconNode<E> { impl<E: EthSpec> LocalBeaconNode<E> { /// Returns a `RemoteBeaconNode` that can connect to `self`. Useful for testing the node as if /// it were external this process. - pub fn remote_node(&self) -> Result<RemoteBeaconNode<E>, String> { - let socket_addr = self + pub fn remote_node(&self) -> Result<BeaconNodeHttpClient, String> { + let listen_addr = self .client - .http_listen_addr() + .http_api_listen_addr() .ok_or_else(|| "A remote beacon node must have a http server".to_string())?; - Ok(RemoteBeaconNode::new(format!( - "http://{}:{}", - socket_addr.ip(), - socket_addr.port() - ))?) + + let beacon_node_url: Url = format!("http://{}:{}", listen_addr.ip(), listen_addr.port()) + .parse() + .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; + let beacon_node_http_client = ClientBuilder::new() + .timeout(HTTP_TIMEOUT) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; + Ok(BeaconNodeHttpClient::from_components( + beacon_node_url, + beacon_node_http_client, + )) } } @@ -71,8 +86,8 @@ pub fn testing_client_config() -> ClientConfig { // Setting ports to `0` means that the OS will choose some available port. client_config.network.libp2p_port = 0; client_config.network.discovery_port = 0; - client_config.rest_api.enabled = true; - client_config.rest_api.port = 0; + client_config.http_api.enabled = true; + client_config.http_api.listen_port = 0; client_config.websocket_server.enabled = true; client_config.websocket_server.port = 0; @@ -91,12 +106,20 @@ pub fn testing_client_config() -> ClientConfig { client_config } +pub fn testing_validator_config() -> ValidatorConfig { + ValidatorConfig { + init_slashing_protection: true, + disable_auto_discover: false, + ..ValidatorConfig::default() + } +} + /// Contains the directories for a `LocalValidatorClient`. /// /// This struct is separate to `LocalValidatorClient` to allow for pre-computation of validator /// keypairs since the task is quite resource intensive. pub struct ValidatorFiles { - pub datadir: TempDir, + pub validator_dir: TempDir, pub secrets_dir: TempDir, } @@ -110,7 +133,7 @@ impl ValidatorFiles { .map_err(|e| format!("Unable to create VC secrets dir: {:?}", e))?; Ok(Self { - datadir, + validator_dir: datadir, secrets_dir, }) } @@ -120,7 +143,7 @@ impl ValidatorFiles { let this = Self::new()?; build_deterministic_validator_dirs( - this.datadir.path().into(), + this.validator_dir.path().into(), this.secrets_dir.path().into(), keypair_indices, ) @@ -170,7 +193,7 @@ impl<E: EthSpec> LocalValidatorClient<E> { mut config: ValidatorConfig, files: ValidatorFiles, ) -> Result<Self, String> { - config.data_dir = files.datadir.path().into(); + config.validator_dir = files.validator_dir.path().into(); config.secrets_dir = files.secrets_dir.path().into(); ProductionValidatorClient::new(context, config) diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index 773d56bcb..22174ba7f 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -16,5 +16,5 @@ futures = "0.3.5" tokio = "0.2.22" eth1_test_rig = { path = "../eth1_test_rig" } env_logger = "0.7.1" -clap = "2.33.0" -rayon = "1.3.0" +clap = "2.33.3" +rayon = "1.4.1" diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 43ceaa14f..e755c9005 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,4 +1,5 @@ use crate::local_network::LocalNetwork; +use node_test_rig::eth2::types::StateId; use std::time::Duration; use types::{Epoch, EthSpec, Slot, Unsigned}; @@ -65,11 +66,9 @@ pub async fn verify_all_finalized_at<E: EthSpec>( for remote_node in network.remote_nodes()? { epochs.push( remote_node - .http - .beacon() - .get_head() + .get_beacon_states_finality_checkpoints(StateId::Head) .await - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map(|body| body.unwrap().data.finalized.epoch) .map_err(|e| format!("Get head via http failed: {:?}", e))?, ); } @@ -95,17 +94,10 @@ async fn verify_validator_count<E: EthSpec>( let validator_counts = { let mut validator_counts = Vec::new(); for remote_node in network.remote_nodes()? { - let beacon = remote_node.http.beacon(); - - let head = beacon - .get_head() + let vc = remote_node + .get_debug_beacon_states::<E>(StateId::Head) .await - .map_err(|e| format!("Get head via http failed: {:?}", e))?; - - let vc = beacon - .get_state_by_root(head.state_root) - .await - .map(|(state, _root)| state) + .map(|body| body.unwrap().data) .map_err(|e| format!("Get state root via http failed: {:?}", e))? .validators .len(); diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index de78aaa05..81ee5abb5 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -34,8 +34,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speed_up_factor") .takes_value(true) - .default_value("4") - .help("Speed up factor")) + .default_value("3") + .help("Speed up factor. Please use a divisor of 12.")) .arg(Arg::with_name("continue_after_checks") .short("c") .long("continue_after_checks") @@ -62,7 +62,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speed_up_factor") .takes_value(true) - .default_value("4") + .default_value("3") .help("Speed up factor")) .arg(Arg::with_name("continue_after_checks") .short("c") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 75f2256a1..acf4fe845 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -4,8 +4,8 @@ use eth1::http::Eth1NetworkId; use eth1_test_rig::GanacheEth1Instance; use futures::prelude::*; use node_test_rig::{ - environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, - ValidatorFiles, + environment::EnvironmentBuilder, testing_client_config, testing_validator_config, + ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use std::net::{IpAddr, Ipv4Addr}; @@ -128,14 +128,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ for (i, files) in validator_files.into_iter().enumerate() { network - .add_validator_client( - ValidatorConfig { - disable_auto_discover: false, - ..ValidatorConfig::default() - }, - i, - files, - ) + .add_validator_client(testing_validator_config(), i, files) .await?; } diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 37ce3ab56..ed17f69ee 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -1,6 +1,7 @@ use node_test_rig::{ - environment::RuntimeContext, ClientConfig, LocalBeaconNode, LocalValidatorClient, - RemoteBeaconNode, ValidatorConfig, ValidatorFiles, + environment::RuntimeContext, + eth2::{types::StateId, BeaconNodeHttpClient}, + ClientConfig, LocalBeaconNode, LocalValidatorClient, ValidatorConfig, ValidatorFiles, }; use parking_lot::RwLock; use std::ops::Deref; @@ -123,11 +124,11 @@ impl<E: EthSpec> LocalNetwork<E> { .ok_or_else(|| format!("No beacon node for index {}", beacon_node))?; beacon_node .client - .http_listen_addr() + .http_api_listen_addr() .expect("Must have http started") }; - validator_config.http_server = + validator_config.beacon_node = format!("http://{}:{}", socket_addr.ip(), socket_addr.port()); let validator_client = LocalValidatorClient::production_with_insecure_keypairs( context, @@ -140,7 +141,7 @@ impl<E: EthSpec> LocalNetwork<E> { } /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. - pub fn remote_nodes(&self) -> Result<Vec<RemoteBeaconNode<E>>, String> { + pub fn remote_nodes(&self) -> Result<Vec<BeaconNodeHttpClient>, String> { let beacon_nodes = self.beacon_nodes.read(); beacon_nodes @@ -154,11 +155,9 @@ impl<E: EthSpec> LocalNetwork<E> { let nodes = self.remote_nodes().expect("Failed to get remote nodes"); let bootnode = nodes.first().expect("Should contain bootnode"); bootnode - .http - .beacon() - .get_head() + .get_beacon_states_finality_checkpoints(StateId::Head) .await .map_err(|e| format!("Cannot get head: {:?}", e)) - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) + .map(|body| body.unwrap().data.finalized.epoch) } } diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index 621b8ef3b..afcd96986 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -2,8 +2,8 @@ use crate::{checks, LocalNetwork}; use clap::ArgMatches; use futures::prelude::*; use node_test_rig::{ - environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, - ValidatorFiles, + environment::EnvironmentBuilder, testing_client_config, testing_validator_config, + ClientGenesis, ValidatorFiles, }; use rayon::prelude::*; use std::net::{IpAddr, Ipv4Addr}; @@ -99,14 +99,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let add_validators_fut = async { for (i, files) in validator_files.into_iter().enumerate() { network - .add_validator_client( - ValidatorConfig { - disable_auto_discover: false, - ..ValidatorConfig::default() - }, - i, - files, - ) + .add_validator_client(testing_validator_config(), i, files) .await?; } diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index 7583a6eab..47272f626 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -350,11 +350,9 @@ pub async fn check_still_syncing<E: EthSpec>(network: &LocalNetwork<E>) -> Resul for remote_node in network.remote_nodes()? { status.push( remote_node - .http - .node() - .syncing_status() + .get_node_syncing() .await - .map(|status| status.is_syncing) + .map(|body| body.data.is_syncing) .map_err(|e| format!("Get syncing status via http failed: {:?}", e))?, ) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 18c5cf4bf..873faca16 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "validator_client" -version = "0.2.13" +version = "0.3.0" authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@lukeanderson.com.au>"] edition = "2018" @@ -10,40 +10,52 @@ path = "src/lib.rs" [dev-dependencies] tokio = { version = "0.2.22", features = ["time", "rt-threaded", "macros"] } +tempfile = "3.1.0" +deposit_contract = { path = "../common/deposit_contract" } [dependencies] eth2_ssz = "0.1.2" eth2_config = { path = "../common/eth2_config" } -tree_hash = "0.1.0" -clap = "2.33.0" +tree_hash = "0.1.1" +clap = "2.33.3" eth2_interop_keypairs = { path = "../common/eth2_interop_keypairs" } slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } -rest_types = { path = "../common/rest_types" } types = { path = "../consensus/types" } -serde = "1.0.110" -serde_derive = "1.0.110" -serde_json = "1.0.52" +serde = "1.0.116" +serde_derive = "1.0.116" +bincode = "1.3.1" +serde_json = "1.0.58" serde_yaml = "0.8.13" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } slog-async = "2.5.0" -slog-term = "2.5.0" +slog-term = "2.6.0" tokio = { version = "0.2.22", features = ["time"] } futures = { version = "0.3.5", features = ["compat"] } -dirs = "2.0.2" +dirs = "3.0.1" +directory = {path = "../common/directory"} logging = { path = "../common/logging" } environment = { path = "../lighthouse/environment" } parking_lot = "0.11.0" exit-future = "0.2.0" -libc = "0.2.69" +libc = "0.2.79" eth2_ssz_derive = "0.1.0" hex = "0.4.2" deposit_contract = { path = "../common/deposit_contract" } bls = { path = "../crypto/bls" } -remote_beacon_node = { path = "../common/remote_beacon_node" } +eth2 = { path = "../common/eth2" } tempdir = "0.3.7" -rayon = "1.3.0" +rayon = "1.4.1" validator_dir = { path = "../common/validator_dir" } clap_utils = { path = "../common/clap_utils" } eth2_keystore = { path = "../crypto/eth2_keystore" } account_utils = { path = "../common/account_utils" } +lighthouse_version = { path = "../common/lighthouse_version" } +warp_utils = { path = "../common/warp_utils" } +warp = "0.2.5" +hyper = "0.13.8" +serde_utils = { path = "../consensus/serde_utils" } +libsecp256k1 = "0.3.5" +ring = "0.16.12" +rand = "0.7.3" +scrypt = { version = "0.3.0", default-features = false } diff --git a/validator_client/slashing_protection/.gitignore b/validator_client/slashing_protection/.gitignore new file mode 100644 index 000000000..10366122b --- /dev/null +++ b/validator_client/slashing_protection/.gitignore @@ -0,0 +1,2 @@ +interchange-tests +generated-tests diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 8966cb278..a1abb8556 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -7,11 +7,15 @@ edition = "2018" [dependencies] tempfile = "3.1.0" types = { path = "../../consensus/types" } -tree_hash = { path = "../../consensus/tree_hash" } -rusqlite = { version = "0.23.1", features = ["bundled"] } -r2d2 = "0.8.8" -r2d2_sqlite = "0.16.0" +tree_hash = "0.1.1" +rusqlite = { version = "0.24.0", features = ["bundled"] } +r2d2 = "0.8.9" +r2d2_sqlite = "0.17.0" parking_lot = "0.11.0" +serde = "1.0.116" +serde_derive = "1.0.116" +serde_json = "1.0.58" +serde_utils = { path = "../../consensus/serde_utils" } [dev-dependencies] -rayon = "1.3.0" +rayon = "1.4.1" diff --git a/validator_client/slashing_protection/Makefile b/validator_client/slashing_protection/Makefile new file mode 100644 index 000000000..5abb6c0a4 --- /dev/null +++ b/validator_client/slashing_protection/Makefile @@ -0,0 +1,28 @@ +TESTS_TAG := ac393b815b356c95569c028c215232b512df583d +GENERATE_DIR := generated-tests +OUTPUT_DIR := interchange-tests +TARBALL := $(OUTPUT_DIR)-$(TESTS_TAG).tar.gz +ARCHIVE_URL := https://github.com/eth2-clients/slashing-protection-interchange-tests/tarball/$(TESTS_TAG) + +$(OUTPUT_DIR): $(TARBALL) + rm -rf $@ + mkdir $@ + tar --strip-components=1 -xzf $^ -C $@ + +$(TARBALL): + wget $(ARCHIVE_URL) -O $@ + +clean-test-files: + rm -rf $(OUTPUT_DIR) + +clean-archives: + rm -f $(TARBALL) + +generate: + rm -rf $(GENERATE_DIR) + cargo run --release --bin test_generator -- $(GENERATE_DIR) + +clean: clean-test-files clean-archives + +.PHONY: clean clean-archives clean-test-files generate + diff --git a/validator_client/slashing_protection/build.rs b/validator_client/slashing_protection/build.rs new file mode 100644 index 000000000..03abb88b4 --- /dev/null +++ b/validator_client/slashing_protection/build.rs @@ -0,0 +1,7 @@ +fn main() { + let exit_status = std::process::Command::new("make") + .current_dir(std::env::var("CARGO_MANIFEST_DIR").unwrap()) + .status() + .unwrap(); + assert!(exit_status.success()); +} diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs new file mode 100644 index 000000000..3522adf3f --- /dev/null +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -0,0 +1,128 @@ +use slashing_protection::interchange::{ + CompleteInterchangeData, Interchange, InterchangeFormat, InterchangeMetadata, + SignedAttestation, SignedBlock, +}; +use slashing_protection::interchange_test::TestCase; +use slashing_protection::test_utils::{pubkey, DEFAULT_GENESIS_VALIDATORS_ROOT}; +use slashing_protection::SUPPORTED_INTERCHANGE_FORMAT_VERSION; +use std::fs::{self, File}; +use std::path::Path; +use types::{Epoch, Hash256, Slot}; + +fn metadata(genesis_validators_root: Hash256) -> InterchangeMetadata { + InterchangeMetadata { + interchange_format: InterchangeFormat::Complete, + interchange_format_version: SUPPORTED_INTERCHANGE_FORMAT_VERSION, + genesis_validators_root, + } +} + +#[allow(clippy::type_complexity)] +fn interchange(data: Vec<(usize, Vec<u64>, Vec<(u64, u64)>)>) -> Interchange { + let data = data + .into_iter() + .map(|(pk, blocks, attestations)| CompleteInterchangeData { + pubkey: pubkey(pk), + signed_blocks: blocks + .into_iter() + .map(|slot| SignedBlock { + slot: Slot::new(slot), + signing_root: None, + }) + .collect(), + signed_attestations: attestations + .into_iter() + .map(|(source, target)| SignedAttestation { + source_epoch: Epoch::new(source), + target_epoch: Epoch::new(target), + signing_root: None, + }) + .collect(), + }) + .collect(); + Interchange { + metadata: metadata(DEFAULT_GENESIS_VALIDATORS_ROOT), + data, + } +} + +fn main() { + let single_validator_blocks = + vec![(0, 32, false), (0, 33, true), (0, 31, false), (0, 1, false)]; + let single_validator_attestations = vec![ + (0, 3, 4, false), + (0, 14, 19, false), + (0, 15, 20, false), + (0, 16, 20, false), + (0, 15, 21, true), + ]; + + let tests = vec![ + TestCase::new( + "single_validator_import_only", + interchange(vec![(0, vec![22], vec![(0, 2)])]), + ), + TestCase::new( + "single_validator_single_block", + interchange(vec![(0, vec![32], vec![])]), + ) + .with_blocks(single_validator_blocks.clone()), + TestCase::new( + "single_validator_single_attestation", + interchange(vec![(0, vec![], vec![(15, 20)])]), + ) + .with_attestations(single_validator_attestations.clone()), + TestCase::new( + "single_validator_single_block_and_attestation", + interchange(vec![(0, vec![32], vec![(15, 20)])]), + ) + .with_blocks(single_validator_blocks) + .with_attestations(single_validator_attestations), + TestCase::new( + "single_validator_genesis_attestation", + interchange(vec![(0, vec![], vec![(0, 0)])]), + ) + .with_attestations(vec![(0, 0, 0, false)]), + TestCase::new( + "single_validator_multiple_blocks_and_attestations", + interchange(vec![( + 0, + vec![2, 3, 10, 1200], + vec![(10, 11), (12, 13), (20, 24)], + )]), + ) + .with_blocks(vec![ + (0, 1, false), + (0, 2, false), + (0, 3, false), + (0, 10, false), + (0, 1200, false), + (0, 4, true), + (0, 256, true), + (0, 1201, true), + ]) + .with_attestations(vec![ + (0, 9, 10, false), + (0, 12, 13, false), + (0, 11, 14, false), + (0, 21, 22, false), + (0, 10, 24, false), + (0, 11, 12, true), + (0, 20, 25, true), + ]), + TestCase::new("wrong_genesis_validators_root", interchange(vec![])) + .gvr(Hash256::from_low_u64_be(1)) + .should_fail(), + ]; + // TODO: multi-validator test + + let args = std::env::args().collect::<Vec<_>>(); + let output_dir = Path::new(&args[1]); + fs::create_dir_all(output_dir).unwrap(); + + for test in tests { + test.run(); + let f = File::create(output_dir.join(format!("{}.json", test.name))).unwrap(); + serde_json::to_writer(f, &test).unwrap(); + } +} diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs new file mode 100644 index 000000000..71f678c59 --- /dev/null +++ b/validator_client/slashing_protection/src/interchange.rs @@ -0,0 +1,84 @@ +use serde_derive::{Deserialize, Serialize}; +use std::collections::HashSet; +use std::iter::FromIterator; +use types::{Epoch, Hash256, PublicKey, Slot}; + +#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum InterchangeFormat { + Complete, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct InterchangeMetadata { + pub interchange_format: InterchangeFormat, + #[serde(with = "serde_utils::quoted_u64::require_quotes")] + pub interchange_format_version: u64, + pub genesis_validators_root: Hash256, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct CompleteInterchangeData { + pub pubkey: PublicKey, + pub signed_blocks: Vec<SignedBlock>, + pub signed_attestations: Vec<SignedAttestation>, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct SignedBlock { + #[serde(with = "serde_utils::quoted_u64::require_quotes")] + pub slot: Slot, + #[serde(skip_serializing_if = "Option::is_none")] + pub signing_root: Option<Hash256>, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct SignedAttestation { + #[serde(with = "serde_utils::quoted_u64::require_quotes")] + pub source_epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64::require_quotes")] + pub target_epoch: Epoch, + #[serde(skip_serializing_if = "Option::is_none")] + pub signing_root: Option<Hash256>, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct Interchange { + pub metadata: InterchangeMetadata, + pub data: Vec<CompleteInterchangeData>, +} + +impl Interchange { + pub fn from_json_str(json: &str) -> Result<Self, serde_json::Error> { + serde_json::from_str(json) + } + + pub fn from_json_reader(reader: impl std::io::Read) -> Result<Self, serde_json::Error> { + serde_json::from_reader(reader) + } + + pub fn write_to(&self, writer: impl std::io::Write) -> Result<(), serde_json::Error> { + serde_json::to_writer(writer, self) + } + + /// Do these two `Interchange`s contain the same data (ignoring ordering)? + pub fn equiv(&self, other: &Self) -> bool { + let self_set = HashSet::<_>::from_iter(self.data.iter()); + let other_set = HashSet::<_>::from_iter(other.data.iter()); + self.metadata == other.metadata && self_set == other_set + } + + /// The number of entries in `data`. + pub fn len(&self) -> usize { + self.data.len() + } + + /// Is the `data` part of the interchange completely empty? + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs new file mode 100644 index 000000000..cbb8c54a9 --- /dev/null +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -0,0 +1,151 @@ +use crate::{ + interchange::Interchange, + test_utils::{pubkey, DEFAULT_GENESIS_VALIDATORS_ROOT}, + SlashingDatabase, +}; +use serde_derive::{Deserialize, Serialize}; +use tempfile::tempdir; +use types::{Epoch, Hash256, PublicKey, Slot}; + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct TestCase { + pub name: String, + pub should_succeed: bool, + pub genesis_validators_root: Hash256, + pub interchange: Interchange, + pub blocks: Vec<TestBlock>, + pub attestations: Vec<TestAttestation>, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct TestBlock { + pub pubkey: PublicKey, + pub slot: Slot, + pub should_succeed: bool, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct TestAttestation { + pub pubkey: PublicKey, + pub source_epoch: Epoch, + pub target_epoch: Epoch, + pub should_succeed: bool, +} + +impl TestCase { + pub fn new(name: &str, interchange: Interchange) -> Self { + TestCase { + name: name.into(), + should_succeed: true, + genesis_validators_root: DEFAULT_GENESIS_VALIDATORS_ROOT, + interchange, + blocks: vec![], + attestations: vec![], + } + } + + pub fn gvr(mut self, genesis_validators_root: Hash256) -> Self { + self.genesis_validators_root = genesis_validators_root; + self + } + + pub fn should_fail(mut self) -> Self { + self.should_succeed = false; + self + } + + pub fn with_blocks(mut self, blocks: impl IntoIterator<Item = (usize, u64, bool)>) -> Self { + self.blocks.extend( + blocks + .into_iter() + .map(|(pk, slot, should_succeed)| TestBlock { + pubkey: pubkey(pk), + slot: Slot::new(slot), + should_succeed, + }), + ); + self + } + + pub fn with_attestations( + mut self, + attestations: impl IntoIterator<Item = (usize, u64, u64, bool)>, + ) -> Self { + self.attestations.extend(attestations.into_iter().map( + |(pk, source, target, should_succeed)| TestAttestation { + pubkey: pubkey(pk), + source_epoch: Epoch::new(source), + target_epoch: Epoch::new(target), + should_succeed, + }, + )); + self + } + + pub fn run(&self) { + let dir = tempdir().unwrap(); + let slashing_db_file = dir.path().join("slashing_protection.sqlite"); + let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); + + match slashing_db.import_interchange_info(&self.interchange, self.genesis_validators_root) { + Ok(()) if !self.should_succeed => { + panic!( + "test `{}` succeeded on import when it should have failed", + self.name + ); + } + Err(e) if self.should_succeed => { + panic!( + "test `{}` failed on import when it should have succeeded, error: {:?}", + self.name, e + ); + } + _ => (), + } + + for (i, block) in self.blocks.iter().enumerate() { + match slashing_db.check_and_insert_block_signing_root( + &block.pubkey, + block.slot, + Hash256::random(), + ) { + Ok(safe) if !block.should_succeed => { + panic!( + "block {} from `{}` succeeded when it should have failed: {:?}", + i, self.name, safe + ); + } + Err(e) if block.should_succeed => { + panic!( + "block {} from `{}` failed when it should have succeeded: {:?}", + i, self.name, e + ); + } + _ => (), + } + } + + for (i, att) in self.attestations.iter().enumerate() { + match slashing_db.check_and_insert_attestation_signing_root( + &att.pubkey, + att.source_epoch, + att.target_epoch, + Hash256::random(), + ) { + Ok(safe) if !att.should_succeed => { + panic!( + "attestation {} from `{}` succeeded when it should have failed: {:?}", + i, self.name, safe + ); + } + Err(e) if att.should_succeed => { + panic!( + "attestation {} from `{}` failed when it should have succeeded: {:?}", + i, self.name, e + ); + } + _ => (), + } + } + } +} diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 384523495..a576743aa 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -1,19 +1,25 @@ mod attestation_tests; mod block_tests; +pub mod interchange; +pub mod interchange_test; mod parallel_tests; +mod registration_tests; mod signed_attestation; mod signed_block; mod slashing_database; -mod test_utils; +pub mod test_utils; pub use crate::signed_attestation::{InvalidAttestation, SignedAttestation}; pub use crate::signed_block::{InvalidBlock, SignedBlock}; -pub use crate::slashing_database::SlashingDatabase; +pub use crate::slashing_database::{SlashingDatabase, SUPPORTED_INTERCHANGE_FORMAT_VERSION}; use rusqlite::Error as SQLError; use std::io::{Error as IOError, ErrorKind}; use std::string::ToString; use types::{Hash256, PublicKey}; +/// The filename within the `validators` directory that contains the slashing protection DB. +pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; + /// The attestation or block is not safe to sign. /// /// This could be because it's slashable, or because an error occurred. diff --git a/validator_client/slashing_protection/src/registration_tests.rs b/validator_client/slashing_protection/src/registration_tests.rs new file mode 100644 index 000000000..40a3d6ee7 --- /dev/null +++ b/validator_client/slashing_protection/src/registration_tests.rs @@ -0,0 +1,32 @@ +#![cfg(test)] + +use crate::test_utils::*; +use crate::*; +use tempfile::tempdir; + +#[test] +fn double_register_validators() { + let dir = tempdir().unwrap(); + let slashing_db_file = dir.path().join("slashing_protection.sqlite"); + let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); + + let num_validators = 100u32; + let pubkeys = (0..num_validators as usize).map(pubkey).collect::<Vec<_>>(); + + let get_validator_ids = || { + pubkeys + .iter() + .map(|pk| slashing_db.get_validator_id(pk).unwrap()) + .collect::<Vec<_>>() + }; + + assert_eq!(slashing_db.num_validator_rows().unwrap(), 0); + + slashing_db.register_validators(pubkeys.iter()).unwrap(); + assert_eq!(slashing_db.num_validator_rows().unwrap(), num_validators); + let validator_ids = get_validator_ids(); + + slashing_db.register_validators(pubkeys.iter()).unwrap(); + assert_eq!(slashing_db.num_validator_rows().unwrap(), num_validators); + assert_eq!(validator_ids, get_validator_ids()); +} diff --git a/validator_client/slashing_protection/src/signed_attestation.rs b/validator_client/slashing_protection/src/signed_attestation.rs index 3ab586e4e..1c8020614 100644 --- a/validator_client/slashing_protection/src/signed_attestation.rs +++ b/validator_client/slashing_protection/src/signed_attestation.rs @@ -20,6 +20,18 @@ pub enum InvalidAttestation { PrevSurroundsNew { prev: SignedAttestation }, /// The attestation is invalid because its source epoch is greater than its target epoch. SourceExceedsTarget, + /// The attestation is invalid because its source epoch is less than the lower bound on source + /// epochs for this validator. + SourceLessThanLowerBound { + source_epoch: Epoch, + bound_epoch: Epoch, + }, + /// The attestation is invalid because its target epoch is less than or equal to the lower + /// bound on target epochs for this validator. + TargetLessThanOrEqLowerBound { + target_epoch: Epoch, + bound_epoch: Epoch, + }, } impl SignedAttestation { diff --git a/validator_client/slashing_protection/src/signed_block.rs b/validator_client/slashing_protection/src/signed_block.rs index f299871a6..b31628f43 100644 --- a/validator_client/slashing_protection/src/signed_block.rs +++ b/validator_client/slashing_protection/src/signed_block.rs @@ -12,6 +12,7 @@ pub struct SignedBlock { #[derive(PartialEq, Debug)] pub enum InvalidBlock { DoubleBlockProposal(SignedBlock), + SlotViolatesLowerBound { block_slot: Slot, bound_slot: Slot }, } impl SignedBlock { diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index cd2413efd..1bfdcf60d 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1,12 +1,16 @@ +use crate::interchange::{ + CompleteInterchangeData, Interchange, InterchangeFormat, InterchangeMetadata, + SignedAttestation as InterchangeAttestation, SignedBlock as InterchangeBlock, +}; use crate::signed_attestation::InvalidAttestation; use crate::signed_block::InvalidBlock; -use crate::{NotSafe, Safe, SignedAttestation, SignedBlock}; +use crate::{hash256_from_row, NotSafe, Safe, SignedAttestation, SignedBlock}; use r2d2_sqlite::SqliteConnectionManager; use rusqlite::{params, OptionalExtension, Transaction, TransactionBehavior}; use std::fs::{File, OpenOptions}; use std::path::Path; use std::time::Duration; -use types::{AttestationData, BeaconBlockHeader, Hash256, PublicKey, SignedRoot}; +use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKey, SignedRoot, Slot}; type Pool = r2d2::Pool<SqliteConnectionManager>; @@ -20,6 +24,9 @@ pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); #[cfg(test)] pub const CONNECTION_TIMEOUT: Duration = Duration::from_millis(100); +/// Supported version of the interchange format. +pub const SUPPORTED_INTERCHANGE_FORMAT_VERSION: u64 = 4; + #[derive(Debug, Clone)] pub struct SlashingDatabase { conn_pool: Pool, @@ -52,7 +59,7 @@ impl SlashingDatabase { conn.execute( "CREATE TABLE validators ( id INTEGER PRIMARY KEY, - public_key BLOB NOT NULL + public_key BLOB NOT NULL UNIQUE )", params![], )?; @@ -144,30 +151,71 @@ impl SlashingDatabase { ) -> Result<(), NotSafe> { let mut conn = self.conn_pool.get()?; let txn = conn.transaction()?; - { - let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?; + self.register_validators_in_txn(public_keys, &txn)?; + txn.commit()?; + Ok(()) + } - for pubkey in public_keys { + /// Register multiple validators inside the given transaction. + /// + /// The caller must commit the transaction for the changes to be persisted. + pub fn register_validators_in_txn<'a>( + &self, + public_keys: impl Iterator<Item = &'a PublicKey>, + txn: &Transaction, + ) -> Result<(), NotSafe> { + let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?; + for pubkey in public_keys { + if self.get_validator_id_opt(&txn, pubkey)?.is_none() { stmt.execute(&[pubkey.to_hex_string()])?; } } - txn.commit()?; - Ok(()) } + /// Check that all of the given validators are registered. + pub fn check_validator_registrations<'a>( + &self, + mut public_keys: impl Iterator<Item = &'a PublicKey>, + ) -> Result<(), NotSafe> { + let mut conn = self.conn_pool.get()?; + let txn = conn.transaction()?; + public_keys + .try_for_each(|public_key| self.get_validator_id_in_txn(&txn, public_key).map(|_| ())) + } + /// Get the database-internal ID for a validator. /// /// This is NOT the same as a validator index, and depends on the ordering that validators /// are registered with the slashing protection database (and may vary between machines). - fn get_validator_id(txn: &Transaction, public_key: &PublicKey) -> Result<i64, NotSafe> { - txn.query_row( - "SELECT id FROM validators WHERE public_key = ?1", - params![&public_key.to_hex_string()], - |row| row.get(0), - ) - .optional()? - .ok_or_else(|| NotSafe::UnregisteredValidator(public_key.clone())) + pub fn get_validator_id(&self, public_key: &PublicKey) -> Result<i64, NotSafe> { + let mut conn = self.conn_pool.get()?; + let txn = conn.transaction()?; + self.get_validator_id_in_txn(&txn, public_key) + } + + fn get_validator_id_in_txn( + &self, + txn: &Transaction, + public_key: &PublicKey, + ) -> Result<i64, NotSafe> { + self.get_validator_id_opt(txn, public_key)? + .ok_or_else(|| NotSafe::UnregisteredValidator(public_key.clone())) + } + + /// Optional version of `get_validator_id`. + fn get_validator_id_opt( + &self, + txn: &Transaction, + public_key: &PublicKey, + ) -> Result<Option<i64>, NotSafe> { + Ok(txn + .query_row( + "SELECT id FROM validators WHERE public_key = ?1", + params![&public_key.to_hex_string()], + |row| row.get(0), + ) + .optional()?) } /// Check a block proposal from `validator_pubkey` for slash safety. @@ -175,10 +223,10 @@ impl SlashingDatabase { &self, txn: &Transaction, validator_pubkey: &PublicKey, - block_header: &BeaconBlockHeader, - domain: Hash256, + slot: Slot, + signing_root: Hash256, ) -> Result<Safe, NotSafe> { - let validator_id = Self::get_validator_id(txn, validator_pubkey)?; + let validator_id = self.get_validator_id_in_txn(txn, validator_pubkey)?; let existing_block = txn .prepare( @@ -186,25 +234,37 @@ impl SlashingDatabase { FROM signed_blocks WHERE validator_id = ?1 AND slot = ?2", )? - .query_row( - params![validator_id, block_header.slot], - SignedBlock::from_row, - ) + .query_row(params![validator_id, slot], SignedBlock::from_row) .optional()?; if let Some(existing_block) = existing_block { - if existing_block.signing_root == block_header.signing_root(domain) { + if existing_block.signing_root == signing_root { // Same slot and same hash -> we're re-broadcasting a previously signed block - Ok(Safe::SameData) + return Ok(Safe::SameData); } else { // Same epoch but not the same hash -> it's a DoubleBlockProposal - Err(NotSafe::InvalidBlock(InvalidBlock::DoubleBlockProposal( + return Err(NotSafe::InvalidBlock(InvalidBlock::DoubleBlockProposal( existing_block, - ))) + ))); } - } else { - Ok(Safe::Valid) } + + let min_slot = txn + .prepare("SELECT MIN(slot) FROM signed_blocks WHERE validator_id = ?1")? + .query_row(params![validator_id], |row| row.get(0))?; + + if let Some(min_slot) = min_slot { + if slot <= min_slot { + return Err(NotSafe::InvalidBlock( + InvalidBlock::SlotViolatesLowerBound { + block_slot: slot, + bound_slot: min_slot, + }, + )); + } + } + + Ok(Safe::Valid) } /// Check an attestation from `validator_pubkey` for slash safety. @@ -212,12 +272,10 @@ impl SlashingDatabase { &self, txn: &Transaction, validator_pubkey: &PublicKey, - attestation: &AttestationData, - domain: Hash256, + att_source_epoch: Epoch, + att_target_epoch: Epoch, + att_signing_root: Hash256, ) -> Result<Safe, NotSafe> { - let att_source_epoch = attestation.source.epoch; - let att_target_epoch = attestation.target.epoch; - // Although it's not required to avoid slashing, we disallow attestations // which are obviously invalid by virtue of their source epoch exceeding their target. if att_source_epoch > att_target_epoch { @@ -226,10 +284,10 @@ impl SlashingDatabase { )); } - let validator_id = Self::get_validator_id(txn, validator_pubkey)?; + let validator_id = self.get_validator_id_in_txn(txn, validator_pubkey)?; - // 1. Check for a double vote. Namely, an existing attestation with the same target epoch, - // and a different signing root. + // Check for a double vote. Namely, an existing attestation with the same target epoch, + // and a different signing root. let same_target_att = txn .prepare( "SELECT source_epoch, target_epoch, signing_root @@ -245,7 +303,7 @@ impl SlashingDatabase { if let Some(existing_attestation) = same_target_att { // If the new attestation is identical to the existing attestation, then we already // know that it is safe, and can return immediately. - if existing_attestation.signing_root == attestation.signing_root(domain) { + if existing_attestation.signing_root == att_signing_root { return Ok(Safe::SameData); // Otherwise if the hashes are different, this is a double vote. } else { @@ -255,7 +313,7 @@ impl SlashingDatabase { } } - // 2. Check that no previous vote is surrounding `attestation`. + // Check that no previous vote is surrounding `attestation`. // If there is a surrounding attestation, we only return the most recent one. let surrounding_attestation = txn .prepare( @@ -277,7 +335,7 @@ impl SlashingDatabase { )); } - // 3. Check that no previous vote is surrounded by `attestation`. + // Check that no previous vote is surrounded by `attestation`. // If there is a surrounded attestation, we only return the most recent one. let surrounded_attestation = txn .prepare( @@ -299,6 +357,39 @@ impl SlashingDatabase { )); } + // Check lower bounds: ensure that source is greater than or equal to min source, + // and target is greater than min target. This allows pruning, and compatibility + // with the interchange format. + let min_source = txn + .prepare("SELECT MIN(source_epoch) FROM signed_attestations WHERE validator_id = ?1")? + .query_row(params![validator_id], |row| row.get(0))?; + + if let Some(min_source) = min_source { + if att_source_epoch < min_source { + return Err(NotSafe::InvalidAttestation( + InvalidAttestation::SourceLessThanLowerBound { + source_epoch: att_source_epoch, + bound_epoch: min_source, + }, + )); + } + } + + let min_target = txn + .prepare("SELECT MIN(target_epoch) FROM signed_attestations WHERE validator_id = ?1")? + .query_row(params![validator_id], |row| row.get(0))?; + + if let Some(min_target) = min_target { + if att_target_epoch <= min_target { + return Err(NotSafe::InvalidAttestation( + InvalidAttestation::TargetLessThanOrEqLowerBound { + target_epoch: att_target_epoch, + bound_epoch: min_target, + }, + )); + } + } + // Everything has been checked, return Valid Ok(Safe::Valid) } @@ -311,19 +402,15 @@ impl SlashingDatabase { &self, txn: &Transaction, validator_pubkey: &PublicKey, - block_header: &BeaconBlockHeader, - domain: Hash256, + slot: Slot, + signing_root: Hash256, ) -> Result<(), NotSafe> { - let validator_id = Self::get_validator_id(txn, validator_pubkey)?; + let validator_id = self.get_validator_id_in_txn(txn, validator_pubkey)?; txn.execute( "INSERT INTO signed_blocks (validator_id, slot, signing_root) VALUES (?1, ?2, ?3)", - params![ - validator_id, - block_header.slot, - block_header.signing_root(domain).as_bytes() - ], + params![validator_id, slot, signing_root.as_bytes()], )?; Ok(()) } @@ -336,19 +423,20 @@ impl SlashingDatabase { &self, txn: &Transaction, validator_pubkey: &PublicKey, - attestation: &AttestationData, - domain: Hash256, + att_source_epoch: Epoch, + att_target_epoch: Epoch, + att_signing_root: Hash256, ) -> Result<(), NotSafe> { - let validator_id = Self::get_validator_id(txn, validator_pubkey)?; + let validator_id = self.get_validator_id_in_txn(txn, validator_pubkey)?; txn.execute( "INSERT INTO signed_attestations (validator_id, source_epoch, target_epoch, signing_root) VALUES (?1, ?2, ?3, ?4)", params![ validator_id, - attestation.source.epoch, - attestation.target.epoch, - attestation.signing_root(domain).as_bytes() + att_source_epoch, + att_target_epoch, + att_signing_root.as_bytes() ], )?; Ok(()) @@ -365,17 +453,46 @@ impl SlashingDatabase { validator_pubkey: &PublicKey, block_header: &BeaconBlockHeader, domain: Hash256, + ) -> Result<Safe, NotSafe> { + self.check_and_insert_block_signing_root( + validator_pubkey, + block_header.slot, + block_header.signing_root(domain), + ) + } + + /// As for `check_and_insert_block_proposal` but without requiring the whole `BeaconBlockHeader`. + pub fn check_and_insert_block_signing_root( + &self, + validator_pubkey: &PublicKey, + slot: Slot, + signing_root: Hash256, ) -> Result<Safe, NotSafe> { let mut conn = self.conn_pool.get()?; let txn = conn.transaction_with_behavior(TransactionBehavior::Exclusive)?; + let safe = self.check_and_insert_block_signing_root_txn( + validator_pubkey, + slot, + signing_root, + &txn, + )?; + txn.commit()?; + Ok(safe) + } - let safe = self.check_block_proposal(&txn, validator_pubkey, block_header, domain)?; + /// Transactional variant of `check_and_insert_block_signing_root`. + pub fn check_and_insert_block_signing_root_txn( + &self, + validator_pubkey: &PublicKey, + slot: Slot, + signing_root: Hash256, + txn: &Transaction, + ) -> Result<Safe, NotSafe> { + let safe = self.check_block_proposal(&txn, validator_pubkey, slot, signing_root)?; if safe != Safe::SameData { - self.insert_block_proposal(&txn, validator_pubkey, block_header, domain)?; + self.insert_block_proposal(&txn, validator_pubkey, slot, signing_root)?; } - - txn.commit()?; Ok(safe) } @@ -390,19 +507,238 @@ impl SlashingDatabase { validator_pubkey: &PublicKey, attestation: &AttestationData, domain: Hash256, + ) -> Result<Safe, NotSafe> { + let attestation_signing_root = attestation.signing_root(domain); + self.check_and_insert_attestation_signing_root( + validator_pubkey, + attestation.source.epoch, + attestation.target.epoch, + attestation_signing_root, + ) + } + + /// As for `check_and_insert_attestation` but without requiring the whole `AttestationData`. + pub fn check_and_insert_attestation_signing_root( + &self, + validator_pubkey: &PublicKey, + att_source_epoch: Epoch, + att_target_epoch: Epoch, + att_signing_root: Hash256, ) -> Result<Safe, NotSafe> { let mut conn = self.conn_pool.get()?; let txn = conn.transaction_with_behavior(TransactionBehavior::Exclusive)?; - - let safe = self.check_attestation(&txn, validator_pubkey, attestation, domain)?; - - if safe != Safe::SameData { - self.insert_attestation(&txn, validator_pubkey, attestation, domain)?; - } - + let safe = self.check_and_insert_attestation_signing_root_txn( + validator_pubkey, + att_source_epoch, + att_target_epoch, + att_signing_root, + &txn, + )?; txn.commit()?; Ok(safe) } + + /// Transactional variant of `check_and_insert_attestation_signing_root`. + fn check_and_insert_attestation_signing_root_txn( + &self, + validator_pubkey: &PublicKey, + att_source_epoch: Epoch, + att_target_epoch: Epoch, + att_signing_root: Hash256, + txn: &Transaction, + ) -> Result<Safe, NotSafe> { + let safe = self.check_attestation( + &txn, + validator_pubkey, + att_source_epoch, + att_target_epoch, + att_signing_root, + )?; + + if safe != Safe::SameData { + self.insert_attestation( + &txn, + validator_pubkey, + att_source_epoch, + att_target_epoch, + att_signing_root, + )?; + } + Ok(safe) + } + + /// Import slashing protection from another client in the interchange format. + pub fn import_interchange_info( + &self, + interchange: &Interchange, + genesis_validators_root: Hash256, + ) -> Result<(), InterchangeError> { + let version = interchange.metadata.interchange_format_version; + if version != SUPPORTED_INTERCHANGE_FORMAT_VERSION { + return Err(InterchangeError::UnsupportedVersion(version)); + } + + if genesis_validators_root != interchange.metadata.genesis_validators_root { + return Err(InterchangeError::GenesisValidatorsMismatch { + client: genesis_validators_root, + interchange_file: interchange.metadata.genesis_validators_root, + }); + } + + // Import atomically, to prevent registering validators with partial information. + let mut conn = self.conn_pool.get()?; + let txn = conn.transaction()?; + + for record in &interchange.data { + self.register_validators_in_txn(std::iter::once(&record.pubkey), &txn)?; + + // Insert all signed blocks. + for block in &record.signed_blocks { + self.check_and_insert_block_signing_root_txn( + &record.pubkey, + block.slot, + block.signing_root.unwrap_or_else(Hash256::zero), + &txn, + )?; + } + + // Insert all signed attestations. + for attestation in &record.signed_attestations { + self.check_and_insert_attestation_signing_root_txn( + &record.pubkey, + attestation.source_epoch, + attestation.target_epoch, + attestation.signing_root.unwrap_or_else(Hash256::zero), + &txn, + )?; + } + } + txn.commit()?; + + Ok(()) + } + + pub fn export_interchange_info( + &self, + genesis_validators_root: Hash256, + ) -> Result<Interchange, InterchangeError> { + use std::collections::BTreeMap; + + let mut conn = self.conn_pool.get()?; + let txn = conn.transaction()?; + + // Map from internal validator pubkey to blocks and attestation for that pubkey. + let mut data: BTreeMap<String, (Vec<InterchangeBlock>, Vec<InterchangeAttestation>)> = + BTreeMap::new(); + + txn.prepare( + "SELECT public_key, slot, signing_root + FROM signed_blocks, validators + WHERE signed_blocks.validator_id = validators.id", + )? + .query_and_then(params![], |row| { + let validator_pubkey: String = row.get(0)?; + let slot = row.get(1)?; + let signing_root = Some(hash256_from_row(2, &row)?); + let signed_block = InterchangeBlock { slot, signing_root }; + data.entry(validator_pubkey) + .or_insert_with(|| (vec![], vec![])) + .0 + .push(signed_block); + Ok(()) + })? + .collect::<Result<_, InterchangeError>>()?; + + txn.prepare( + "SELECT public_key, source_epoch, target_epoch, signing_root + FROM signed_attestations, validators + WHERE signed_attestations.validator_id = validators.id", + )? + .query_and_then(params![], |row| { + let validator_pubkey: String = row.get(0)?; + let source_epoch = row.get(1)?; + let target_epoch = row.get(2)?; + let signing_root = Some(hash256_from_row(3, &row)?); + let signed_attestation = InterchangeAttestation { + source_epoch, + target_epoch, + signing_root, + }; + data.entry(validator_pubkey) + .or_insert_with(|| (vec![], vec![])) + .1 + .push(signed_attestation); + Ok(()) + })? + .collect::<Result<_, InterchangeError>>()?; + + let metadata = InterchangeMetadata { + interchange_format: InterchangeFormat::Complete, + interchange_format_version: SUPPORTED_INTERCHANGE_FORMAT_VERSION, + genesis_validators_root, + }; + + let data = data + .into_iter() + .map(|(pubkey, (signed_blocks, signed_attestations))| { + Ok(CompleteInterchangeData { + pubkey: pubkey.parse().map_err(InterchangeError::InvalidPubkey)?, + signed_blocks, + signed_attestations, + }) + }) + .collect::<Result<_, InterchangeError>>()?; + + Ok(Interchange { metadata, data }) + } + + pub fn num_validator_rows(&self) -> Result<u32, NotSafe> { + let mut conn = self.conn_pool.get()?; + let txn = conn.transaction()?; + let count = txn + .prepare("SELECT COALESCE(COUNT(*), 0) FROM validators")? + .query_row(params![], |row| row.get(0))?; + Ok(count) + } +} + +#[derive(Debug)] +pub enum InterchangeError { + UnsupportedVersion(u64), + GenesisValidatorsMismatch { + interchange_file: Hash256, + client: Hash256, + }, + MinimalAttestationSourceAndTargetInconsistent, + SQLError(String), + SQLPoolError(r2d2::Error), + SerdeJsonError(serde_json::Error), + InvalidPubkey(String), + NotSafe(NotSafe), +} + +impl From<NotSafe> for InterchangeError { + fn from(error: NotSafe) -> Self { + InterchangeError::NotSafe(error) + } +} + +impl From<rusqlite::Error> for InterchangeError { + fn from(error: rusqlite::Error) -> Self { + Self::SQLError(error.to_string()) + } +} + +impl From<r2d2::Error> for InterchangeError { + fn from(error: r2d2::Error) -> Self { + InterchangeError::SQLPoolError(error) + } +} + +impl From<serde_json::Error> for InterchangeError { + fn from(error: serde_json::Error) -> Self { + InterchangeError::SerdeJsonError(error) + } } #[cfg(test)] diff --git a/validator_client/slashing_protection/src/test_utils.rs b/validator_client/slashing_protection/src/test_utils.rs index e95665298..c9320c10d 100644 --- a/validator_client/slashing_protection/src/test_utils.rs +++ b/validator_client/slashing_protection/src/test_utils.rs @@ -1,13 +1,12 @@ -#![cfg(test)] - use crate::*; -use tempfile::tempdir; +use tempfile::{tempdir, TempDir}; use types::{ test_utils::generate_deterministic_keypair, AttestationData, BeaconBlockHeader, Hash256, }; pub const DEFAULT_VALIDATOR_INDEX: usize = 0; pub const DEFAULT_DOMAIN: Hash256 = Hash256::zero(); +pub const DEFAULT_GENESIS_VALIDATORS_ROOT: Hash256 = Hash256::zero(); pub fn pubkey(index: usize) -> PublicKey { generate_deterministic_keypair(index).pk @@ -73,6 +72,16 @@ impl<T> Default for StreamTest<T> { } } +impl<T> StreamTest<T> { + /// The number of test cases that are expected to pass processing successfully. + fn num_expected_successes(&self) -> usize { + self.cases + .iter() + .filter(|case| case.expected.is_ok()) + .count() + } +} + impl StreamTest<AttestationData> { pub fn run(&self) { let dir = tempdir().unwrap(); @@ -91,6 +100,8 @@ impl StreamTest<AttestationData> { i ); } + + roundtrip_database(&dir, &slashing_db, self.num_expected_successes() == 0); } } @@ -112,5 +123,24 @@ impl StreamTest<BeaconBlockHeader> { i ); } + + roundtrip_database(&dir, &slashing_db, self.num_expected_successes() == 0); } } + +fn roundtrip_database(dir: &TempDir, db: &SlashingDatabase, is_empty: bool) { + let exported = db + .export_interchange_info(DEFAULT_GENESIS_VALIDATORS_ROOT) + .unwrap(); + let new_db = + SlashingDatabase::create(&dir.path().join("roundtrip_slashing_protection.sqlite")).unwrap(); + new_db + .import_interchange_info(&exported, DEFAULT_GENESIS_VALIDATORS_ROOT) + .unwrap(); + let reexported = new_db + .export_interchange_info(DEFAULT_GENESIS_VALIDATORS_ROOT) + .unwrap(); + + assert_eq!(exported, reexported); + assert_eq!(is_empty, exported.is_empty()); +} diff --git a/validator_client/slashing_protection/tests/interop.rs b/validator_client/slashing_protection/tests/interop.rs new file mode 100644 index 000000000..c0ea6b8c6 --- /dev/null +++ b/validator_client/slashing_protection/tests/interop.rs @@ -0,0 +1,23 @@ +use slashing_protection::interchange_test::TestCase; +use std::fs::File; +use std::path::PathBuf; + +fn test_root_dir() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("interchange-tests") + .join("tests") +} + +#[test] +fn generated() { + for entry in test_root_dir() + .join("generated") + .read_dir() + .unwrap() + .map(Result::unwrap) + { + let file = File::open(entry.path()).unwrap(); + let test_case: TestCase = serde_json::from_reader(&file).unwrap(); + test_case.run(); + } +} diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index fa7987777..d675ebda2 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -3,22 +3,26 @@ use crate::{ validator_store::ValidatorStore, }; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use futures::StreamExt; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; -use slog::{crit, debug, error, info, trace}; +use slog::{crit, error, info, trace}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; use tokio::time::{delay_until, interval_at, Duration, Instant}; -use types::{Attestation, ChainSpec, CommitteeIndex, EthSpec, Slot, SubnetId}; +use tree_hash::TreeHash; +use types::{ + AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec, + Slot, +}; /// Builds an `AttestationService`. pub struct AttestationServiceBuilder<T, E: EthSpec> { duties_service: Option<DutiesService<T, E>>, validator_store: Option<ValidatorStore<T, E>>, slot_clock: Option<T>, - beacon_node: Option<RemoteBeaconNode<E>>, + beacon_node: Option<BeaconNodeHttpClient>, context: Option<RuntimeContext<E>>, } @@ -48,7 +52,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -86,7 +90,7 @@ pub struct Inner<T, E: EthSpec> { duties_service: DutiesService<T, E>, validator_store: ValidatorStore<T, E>, slot_clock: T, - beacon_node: RemoteBeaconNode<E>, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext<E>, } @@ -262,7 +266,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { // Step 2. // // If an attestation was produced, make an aggregate. - if let Some(attestation) = attestation_opt { + if let Some(attestation_data) = attestation_opt { // First, wait until the `aggregation_production_instant` (2/3rds // of the way though the slot). As verified in the // `delay_triggers_when_in_the_past` test, this code will still run @@ -272,7 +276,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { // Then download, sign and publish a `SignedAggregateAndProof` for each // validator that is elected to aggregate for this `slot` and // `committee_index`. - self.produce_and_publish_aggregates(attestation, &validator_duties) + self.produce_and_publish_aggregates(attestation_data, &validator_duties) .await .map_err(move |e| { crit!( @@ -305,7 +309,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { slot: Slot, committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], - ) -> Result<Option<Attestation<E>>, String> { + ) -> Result<Option<AttestationData>, String> { let log = self.context.log(); if validator_duties.is_empty() { @@ -318,124 +322,88 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { .ok_or_else(|| "Unable to determine current slot from clock".to_string())? .epoch(E::slots_per_epoch()); - let attestation = self + let attestation_data = self .beacon_node - .http - .validator() - .produce_attestation(slot, committee_index) + .get_validator_attestation_data(slot, committee_index) .await - .map_err(|e| format!("Failed to produce attestation: {:?}", e))?; + .map_err(|e| format!("Failed to produce attestation data: {:?}", e))? + .data; - // For each validator in `validator_duties`, clone the `attestation` and add - // their signature. - // - // If any validator is unable to sign, they are simply skipped. - let signed_attestations = validator_duties - .iter() - .filter_map(|duty| { - // Ensure that all required fields are present in the validator duty. - let ( - duty_slot, - duty_committee_index, + for duty in validator_duties { + // Ensure that all required fields are present in the validator duty. + let ( + duty_slot, + duty_committee_index, + validator_committee_position, + _, + _, + committee_length, + ) = if let Some(tuple) = duty.attestation_duties() { + tuple + } else { + crit!( + log, + "Missing validator duties when signing"; + "duties" => format!("{:?}", duty) + ); + continue; + }; + + // Ensure that the attestation matches the duties. + if duty_slot != attestation_data.slot || duty_committee_index != attestation_data.index + { + crit!( + log, + "Inconsistent validator duties during signing"; + "validator" => format!("{:?}", duty.validator_pubkey()), + "duty_slot" => duty_slot, + "attestation_slot" => attestation_data.slot, + "duty_index" => duty_committee_index, + "attestation_index" => attestation_data.index, + ); + continue; + } + + let mut attestation = Attestation { + aggregation_bits: BitList::with_capacity(committee_length as usize).unwrap(), + data: attestation_data.clone(), + signature: AggregateSignature::infinity(), + }; + + self.validator_store + .sign_attestation( + duty.validator_pubkey(), validator_committee_position, - _, - committee_count_at_slot, - ) = if let Some(tuple) = duty.attestation_duties() { - tuple - } else { - crit!( - log, - "Missing validator duties when signing"; - "duties" => format!("{:?}", duty) - ); - return None; - }; - - // Ensure that the attestation matches the duties. - if duty_slot != attestation.data.slot - || duty_committee_index != attestation.data.index - { - crit!( - log, - "Inconsistent validator duties during signing"; - "validator" => format!("{:?}", duty.validator_pubkey()), - "duty_slot" => duty_slot, - "attestation_slot" => attestation.data.slot, - "duty_index" => duty_committee_index, - "attestation_index" => attestation.data.index, - ); - return None; - } - - let mut attestation = attestation.clone(); - let subnet_id = SubnetId::compute_subnet_for_attestation_data::<E>( - &attestation.data, - committee_count_at_slot, - &self.context.eth2_config().spec, + &mut attestation, + current_epoch, ) - .map_err(|e| { - error!( - log, - "Failed to compute subnet id to publish attestation: {:?}", e - ) - }) - .ok()?; - self.validator_store - .sign_attestation( - duty.validator_pubkey(), - validator_committee_position, - &mut attestation, - current_epoch, - ) - .map(|_| (attestation, subnet_id)) - }) - .collect::<Vec<_>>(); + .ok_or_else(|| "Failed to sign attestation".to_string())?; - // If there are any signed attestations, publish them to the BN. Otherwise, - // just return early. - if let Some(attestation) = signed_attestations.first().cloned() { - let num_attestations = signed_attestations.len(); - let beacon_block_root = attestation.0.data.beacon_block_root; - - self.beacon_node - .http - .validator() - .publish_attestations(signed_attestations) + match self + .beacon_node + .post_beacon_pool_attestations(&attestation) .await - .map_err(|e| format!("Failed to publish attestation: {:?}", e)) - .map(move |publish_status| match publish_status { - PublishStatus::Valid => info!( - log, - "Successfully published attestations"; - "count" => num_attestations, - "head_block" => format!("{:?}", beacon_block_root), - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Invalid(msg) => crit!( - log, - "Published attestation was invalid"; - "message" => msg, - "committee_index" => committee_index, - "slot" => slot.as_u64(), - "type" => "unaggregated", - ), - PublishStatus::Unknown => { - crit!(log, "Unknown condition when publishing unagg. attestation") - } - }) - .map(|()| Some(attestation.0)) - } else { - debug!( - log, - "No attestations to publish"; - "committee_index" => committee_index, - "slot" => slot.as_u64(), - ); - - Ok(None) + { + Ok(()) => info!( + log, + "Successfully published attestation"; + "head_block" => format!("{:?}", attestation.data.beacon_block_root), + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + "type" => "unaggregated", + ), + Err(e) => error!( + log, + "Unable to publish attestation"; + "error" => e.to_string(), + "committee_index" => attestation.data.index, + "slot" => slot.as_u64(), + "type" => "unaggregated", + ), + } } + + Ok(Some(attestation_data)) } /// Performs the second step of the attesting process: downloading an aggregated `Attestation`, @@ -453,103 +421,89 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { /// returned to the BN. async fn produce_and_publish_aggregates( &self, - attestation: Attestation<E>, + attestation_data: AttestationData, validator_duties: &[DutyAndProof], ) -> Result<(), String> { let log = self.context.log(); let aggregated_attestation = self .beacon_node - .http - .validator() - .produce_aggregate_attestation(&attestation.data) + .get_validator_aggregate_attestation( + attestation_data.slot, + attestation_data.tree_hash_root(), + ) .await - .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))?; + .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e))? + .ok_or_else(|| format!("No aggregate available for {:?}", attestation_data))? + .data; - // For each validator, clone the `aggregated_attestation` and convert it into - // a `SignedAggregateAndProof` - let signed_aggregate_and_proofs = validator_duties - .iter() - .filter_map(|duty_and_proof| { - // Do not produce a signed aggregator for validators that are not + for duty_and_proof in validator_duties { + let selection_proof = if let Some(proof) = duty_and_proof.selection_proof.as_ref() { + proof + } else { + // Do not produce a signed aggregate for validators that are not // subscribed aggregators. - let selection_proof = duty_and_proof.selection_proof.as_ref()?.clone(); - - let (duty_slot, duty_committee_index, _, validator_index, _) = - duty_and_proof.attestation_duties().or_else(|| { - crit!(log, "Missing duties when signing aggregate"); - None - })?; - - let pubkey = &duty_and_proof.duty.validator_pubkey; - let slot = attestation.data.slot; - let committee_index = attestation.data.index; - - if duty_slot != slot || duty_committee_index != committee_index { - crit!(log, "Inconsistent validator duties during signing"); - return None; - } - - if let Some(signed_aggregate_and_proof) = - self.validator_store.produce_signed_aggregate_and_proof( - pubkey, - validator_index, - aggregated_attestation.clone(), - selection_proof, - ) - { - Some(signed_aggregate_and_proof) + continue; + }; + let (duty_slot, duty_committee_index, _, validator_index, _, _) = + if let Some(tuple) = duty_and_proof.attestation_duties() { + tuple } else { - crit!(log, "Failed to sign attestation"); - None - } - }) - .collect::<Vec<_>>(); + crit!(log, "Missing duties when signing aggregate"); + continue; + }; - // If there any signed aggregates and proofs were produced, publish them to the - // BN. - if let Some(first) = signed_aggregate_and_proofs.first().cloned() { - let attestation = first.message.aggregate; + let pubkey = &duty_and_proof.duty.validator_pubkey; + let slot = attestation_data.slot; + let committee_index = attestation_data.index; - let publish_status = self + if duty_slot != slot || duty_committee_index != committee_index { + crit!(log, "Inconsistent validator duties during signing"); + continue; + } + + let signed_aggregate_and_proof = if let Some(aggregate) = + self.validator_store.produce_signed_aggregate_and_proof( + pubkey, + validator_index, + aggregated_attestation.clone(), + selection_proof.clone(), + ) { + aggregate + } else { + crit!(log, "Failed to sign attestation"); + continue; + }; + + let attestation = &signed_aggregate_and_proof.message.aggregate; + + match self .beacon_node - .http - .validator() - .publish_aggregate_and_proof(signed_aggregate_and_proofs) + .post_validator_aggregate_and_proof(&signed_aggregate_and_proof) .await - .map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e))?; - match publish_status { - PublishStatus::Valid => info!( + { + Ok(()) => info!( log, - "Successfully published attestations"; + "Successfully published attestation"; + "aggregator" => signed_aggregate_and_proof.message.aggregator_index, "signatures" => attestation.aggregation_bits.num_set_bits(), "head_block" => format!("{:?}", attestation.data.beacon_block_root), "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", ), - PublishStatus::Invalid(msg) => crit!( + Err(e) => crit!( log, - "Published attestation was invalid"; - "message" => msg, + "Failed to publish attestation"; + "error" => e.to_string(), "committee_index" => attestation.data.index, "slot" => attestation.data.slot.as_u64(), "type" => "aggregated", ), - PublishStatus::Unknown => { - crit!(log, "Unknown condition when publishing agg. attestation") - } - }; - Ok(()) - } else { - debug!( - log, - "No signed aggregates to publish"; - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), - ); - Ok(()) + } } + + Ok(()) } } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 60d1f4d55..bf52cacfc 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -1,19 +1,19 @@ use crate::validator_store::ValidatorStore; use environment::RuntimeContext; +use eth2::{types::Graffiti, BeaconNodeHttpClient}; use futures::channel::mpsc::Receiver; use futures::{StreamExt, TryFutureExt}; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use types::{EthSpec, Graffiti, PublicKey, Slot}; +use types::{EthSpec, PublicKey, Slot}; /// Builds a `BlockService`. pub struct BlockServiceBuilder<T, E: EthSpec> { validator_store: Option<ValidatorStore<T, E>>, slot_clock: Option<Arc<T>>, - beacon_node: Option<RemoteBeaconNode<E>>, + beacon_node: Option<BeaconNodeHttpClient>, context: Option<RuntimeContext<E>>, graffiti: Option<Graffiti>, } @@ -39,7 +39,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -79,7 +79,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockServiceBuilder<T, E> { pub struct Inner<T, E: EthSpec> { validator_store: ValidatorStore<T, E>, slot_clock: Arc<T>, - beacon_node: RemoteBeaconNode<E>, + beacon_node: BeaconNodeHttpClient, context: RuntimeContext<E>, graffiti: Option<Graffiti>, } @@ -221,41 +221,28 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { let block = self .beacon_node - .http - .validator() - .produce_block(slot, randao_reveal, self.graffiti) + .get_validator_blocks(slot, randao_reveal.into(), self.graffiti.as_ref()) .await - .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))?; + .map_err(|e| format!("Error from beacon node when producing block: {:?}", e))? + .data; let signed_block = self .validator_store .sign_block(&validator_pubkey, block, current_slot) .ok_or_else(|| "Unable to sign block".to_string())?; - let publish_status = self - .beacon_node - .http - .validator() - .publish_block(signed_block.clone()) + self.beacon_node + .post_beacon_blocks(&signed_block) .await .map_err(|e| format!("Error from beacon node when publishing block: {:?}", e))?; - match publish_status { - PublishStatus::Valid => info!( - log, - "Successfully published block"; - "deposits" => signed_block.message.body.deposits.len(), - "attestations" => signed_block.message.body.attestations.len(), - "slot" => signed_block.slot().as_u64(), - ), - PublishStatus::Invalid(msg) => crit!( - log, - "Published block was invalid"; - "message" => msg, - "slot" => signed_block.slot().as_u64(), - ), - PublishStatus::Unknown => crit!(log, "Unknown condition when publishing block"), - } + info!( + log, + "Successfully published block"; + "deposits" => signed_block.message.body.deposits.len(), + "attestations" => signed_block.message.body.attestations.len(), + "slot" => signed_block.slot().as_u64(), + ); Ok(()) } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index ed320c24c..c78979880 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -1,4 +1,4 @@ -use crate::config::DEFAULT_HTTP_SERVER; +use crate::config::DEFAULT_BEACON_NODE; use clap::{App, Arg}; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { @@ -8,13 +8,35 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", ) + .arg( + Arg::with_name("beacon-node") + .long("beacon-node") + .value_name("NETWORK_ADDRESS") + .help("Address to a beacon node HTTP API") + .default_value(&DEFAULT_BEACON_NODE) + .takes_value(true), + ) + // This argument is deprecated, use `--beacon-node` instead. .arg( Arg::with_name("server") .long("server") .value_name("NETWORK_ADDRESS") - .help("Address to connect to BeaconNode.") - .default_value(&DEFAULT_HTTP_SERVER) - .takes_value(true), + .help("Deprecated. Use --beacon-node.") + .takes_value(true) + .conflicts_with("beacon-node"), + ) + .arg( + Arg::with_name("validators-dir") + .long("validators-dir") + .value_name("VALIDATORS_DIR") + .help( + "The directory which contains the validator keystores, deposit data for \ + each validator along with the common slashing protection database \ + and the validator_definitions.yml" + ) + .takes_value(true) + .conflicts_with("datadir") + .requires("secrets-dir") ) .arg( Arg::with_name("secrets-dir") @@ -24,26 +46,35 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "The directory which contains the password to unlock the validator \ voting keypairs. Each password should be contained in a file where the \ name is the 0x-prefixed hex representation of the validators voting public \ - key. Defaults to ~/.lighthouse/secrets.", + key. Defaults to ~/.lighthouse/{testnet}/secrets.", ) - .takes_value(true), + .takes_value(true) + .conflicts_with("datadir") + .requires("validators-dir"), ) - .arg(Arg::with_name("auto-register").long("auto-register").help( - "If present, the validator client will register any new signing keys with \ - the slashing protection database so that they may be used. WARNING: \ - enabling the same signing key on multiple validator clients WILL lead to \ - that validator getting slashed. Only use this flag the first time you run \ - the validator client, or if you're certain there are no other \ - nodes using the same key. Automatically enabled unless `--strict` is specified", - )) .arg( - Arg::with_name("strict-lockfiles") - .long("strict-lockfiles") + Arg::with_name("delete-lockfiles") + .long("delete-lockfiles") .help( - "If present, do not load validators that are guarded by a lockfile. Note: for \ - Eth2 mainnet, this flag will likely be removed and its behaviour will become default." + "If present, ignore and delete any keystore lockfiles encountered during start up. \ + This is useful if the validator client did not exit gracefully on the last run. \ + WARNING: lockfiles help prevent users from accidentally running the same validator \ + using two different validator clients, an action that likely leads to slashing. \ + Ensure you are certain that there are no other validator client instances running \ + that might also be using the same keystores." ) ) + .arg( + Arg::with_name("init-slashing-protection") + .long("init-slashing-protection") + .help( + "If present, do not require the slashing protection database to exist before \ + running. You SHOULD NOT use this flag unless you're certain that a new \ + slashing protection database is required. Usually, your database \ + will have been initialized when you imported your validator keys. If you \ + misplace your database and then run with this flag you risk being slashed." + ) + ) .arg( Arg::with_name("disable-auto-discover") .long("disable-auto-discover") @@ -68,4 +99,40 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("GRAFFITI") .takes_value(true) ) + /* REST API related arguments */ + .arg( + Arg::with_name("http") + .long("http") + .help("Enable the RESTful HTTP API server. Disabled by default.") + .takes_value(false), + ) + /* + * Note: there is purposefully no `--http-address` flag provided. + * + * The HTTP server is **not** encrypted (i.e., not HTTPS) and therefore it is unsafe to + * publish on a public network. + * + * We restrict the user to `127.0.0.1` and they must provide some other transport-layer + * encryption (e.g., SSH tunnels). + */ + .arg( + Arg::with_name("http-port") + .long("http-port") + .value_name("PORT") + .help("Set the listen TCP port for the RESTful HTTP API server. This server does **not** \ + provide encryption and is completely unsuitable to expose to a public network. \ + We do not provide a --http-address flag and restrict the user to listening on \ + 127.0.0.1. For access via the Internet, apply a transport-layer security like \ + a HTTPS reverse-proxy or SSH tunnelling.") + .default_value("5062") + .takes_value(true), + ) + .arg( + Arg::with_name("http-allow-origin") + .long("http-allow-origin") + .value_name("ORIGIN") + .help("Set the value of the Access-Control-Allow-Origin response HTTP header. Use * to allow any origin (not recommended in production)") + .default_value("") + .takes_value(true), + ) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 482c4ed70..f26eaf9e0 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -1,54 +1,66 @@ +use crate::http_api; use clap::ArgMatches; -use clap_utils::{parse_optional, parse_path_with_default_in_home_dir}; +use clap_utils::{parse_optional, parse_required}; +use directory::{ + get_testnet_name, DEFAULT_HARDCODED_TESTNET, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, + DEFAULT_VALIDATOR_DIR, +}; +use eth2::types::Graffiti; use serde_derive::{Deserialize, Serialize}; +use slog::{warn, Logger}; +use std::fs; use std::path::PathBuf; -use types::{Graffiti, GRAFFITI_BYTES_LEN}; +use types::GRAFFITI_BYTES_LEN; -pub const DEFAULT_HTTP_SERVER: &str = "http://localhost:5052/"; -pub const DEFAULT_DATA_DIR: &str = ".lighthouse/validators"; -pub const DEFAULT_SECRETS_DIR: &str = ".lighthouse/secrets"; -/// Path to the slashing protection database within the datadir. -pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; +pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; /// Stores the core configuration for this validator instance. #[derive(Clone, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases - pub data_dir: PathBuf, + pub validator_dir: PathBuf, /// The directory containing the passwords to unlock validator keystores. pub secrets_dir: PathBuf, /// The http endpoint of the beacon node API. /// /// Should be similar to `http://localhost:8080` - pub http_server: String, + pub beacon_node: String, /// If true, the validator client will still poll for duties and produce blocks even if the /// beacon node is not synced at startup. pub allow_unsynced_beacon_node: bool, - /// If true, refuse to unlock a keypair that is guarded by a lockfile. - pub strict_lockfiles: bool, + /// If true, delete any validator keystore lockfiles that would prevent starting. + pub delete_lockfiles: bool, /// If true, don't scan the validators dir for new keystores. pub disable_auto_discover: bool, + /// If true, re-register existing validators in definitions.yml for slashing protection. + pub init_slashing_protection: bool, /// Graffiti to be inserted everytime we create a block. pub graffiti: Option<Graffiti>, + /// Configuration for the HTTP REST API. + pub http_api: http_api::Config, } impl Default for Config { /// Build a new configuration from defaults. fn default() -> Self { - let data_dir = dirs::home_dir() - .map(|home| home.join(DEFAULT_DATA_DIR)) - .unwrap_or_else(|| PathBuf::from(".")); - let secrets_dir = dirs::home_dir() - .map(|home| home.join(DEFAULT_SECRETS_DIR)) - .unwrap_or_else(|| PathBuf::from(".")); + // WARNING: these directory defaults should be always overrided with parameters + // from cli for specific networks. + let base_dir = dirs::home_dir() + .unwrap_or_else(|| PathBuf::from(".")) + .join(DEFAULT_ROOT_DIR) + .join(DEFAULT_HARDCODED_TESTNET); + let validator_dir = base_dir.join(DEFAULT_VALIDATOR_DIR); + let secrets_dir = base_dir.join(DEFAULT_SECRET_DIR); Self { - data_dir, + validator_dir, secrets_dir, - http_server: DEFAULT_HTTP_SERVER.to_string(), + beacon_node: DEFAULT_BEACON_NODE.to_string(), allow_unsynced_beacon_node: false, - strict_lockfiles: false, + delete_lockfiles: false, disable_auto_discover: false, + init_slashing_protection: false, graffiti: None, + http_api: <_>::default(), } } } @@ -56,33 +68,61 @@ impl Default for Config { impl Config { /// Returns a `Default` implementation of `Self` with some parameters modified by the supplied /// `cli_args`. - pub fn from_cli(cli_args: &ArgMatches) -> Result<Config, String> { + pub fn from_cli(cli_args: &ArgMatches, log: &Logger) -> Result<Config, String> { let mut config = Config::default(); - config.data_dir = parse_path_with_default_in_home_dir( - cli_args, - "datadir", - PathBuf::from(".lighthouse").join("validators"), - )?; + let default_root_dir = dirs::home_dir() + .map(|home| home.join(DEFAULT_ROOT_DIR)) + .unwrap_or_else(|| PathBuf::from(".")); - if !config.data_dir.exists() { - return Err(format!( - "The directory for validator data (--datadir) does not exist: {:?}", - config.data_dir - )); + let (mut validator_dir, mut secrets_dir) = (None, None); + if cli_args.value_of("datadir").is_some() { + let base_dir: PathBuf = parse_required(cli_args, "datadir")?; + validator_dir = Some(base_dir.join(DEFAULT_VALIDATOR_DIR)); + secrets_dir = Some(base_dir.join(DEFAULT_SECRET_DIR)); + } + if cli_args.value_of("validators-dir").is_some() + && cli_args.value_of("secrets-dir").is_some() + { + validator_dir = Some(parse_required(cli_args, "validators-dir")?); + secrets_dir = Some(parse_required(cli_args, "secrets-dir")?); } + config.validator_dir = validator_dir.unwrap_or_else(|| { + default_root_dir + .join(get_testnet_name(cli_args)) + .join(DEFAULT_VALIDATOR_DIR) + }); + + config.secrets_dir = secrets_dir.unwrap_or_else(|| { + default_root_dir + .join(get_testnet_name(cli_args)) + .join(DEFAULT_SECRET_DIR) + }); + + if !config.validator_dir.exists() { + fs::create_dir_all(&config.validator_dir) + .map_err(|e| format!("Failed to create {:?}: {:?}", config.validator_dir, e))?; + } + + if let Some(beacon_node) = parse_optional(cli_args, "beacon-node")? { + config.beacon_node = beacon_node; + } + + // To be deprecated. if let Some(server) = parse_optional(cli_args, "server")? { - config.http_server = server; + warn!( + log, + "The --server flag is deprecated"; + "msg" => "please use --beacon-node instead" + ); + config.beacon_node = server; } config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); - config.strict_lockfiles = cli_args.is_present("strict-lockfiles"); + config.delete_lockfiles = cli_args.is_present("delete-lockfiles"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); - - if let Some(secrets_dir) = parse_optional(cli_args, "secrets-dir")? { - config.secrets_dir = secrets_dir; - } + config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); if let Some(input_graffiti) = cli_args.value_of("graffiti") { let graffiti_bytes = input_graffiti.as_bytes(); @@ -92,18 +132,40 @@ impl Config { GRAFFITI_BYTES_LEN )); } else { - // Default graffiti to all 0 bytes. - let mut graffiti = Graffiti::default(); + let mut graffiti = [0; 32]; // Copy the provided bytes over. // // Panic-free because `graffiti_bytes.len()` <= `GRAFFITI_BYTES_LEN`. graffiti[..graffiti_bytes.len()].copy_from_slice(&graffiti_bytes); - config.graffiti = Some(graffiti); + config.graffiti = Some(graffiti.into()); } } + /* + * Http API server + */ + + if cli_args.is_present("http") { + config.http_api.enabled = true; + } + + if let Some(port) = cli_args.value_of("http-port") { + config.http_api.listen_port = port + .parse::<u16>() + .map_err(|_| "http-port is not a valid u16.")?; + } + + if let Some(allow_origin) = cli_args.value_of("http-allow-origin") { + // Pre-validate the config value to give feedback to the user on node startup, instead of + // as late as when the first API response is produced. + hyper::header::HeaderValue::from_str(allow_origin) + .map_err(|_| "Invalid allow-origin value")?; + + config.http_api.allow_origin = Some(allow_origin.to_string()); + } + Ok(config) } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 7375d5502..7f6d33fe8 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -1,16 +1,15 @@ use crate::{ - block_service::BlockServiceNotification, is_synced::is_synced, validator_store::ValidatorStore, + block_service::BlockServiceNotification, is_synced::is_synced, validator_duty::ValidatorDuty, + validator_store::ValidatorStore, }; use environment::RuntimeContext; +use eth2::BeaconNodeHttpClient; use futures::channel::mpsc::Sender; use futures::{SinkExt, StreamExt}; use parking_lot::RwLock; -use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; -use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription}; use slog::{debug, error, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; -use std::convert::TryInto; use std::ops::Deref; use std::sync::Arc; use tokio::time::{interval_at, Duration, Instant}; @@ -44,14 +43,14 @@ impl DutyAndProof { pub fn compute_selection_proof<T: SlotClock + 'static, E: EthSpec>( &mut self, validator_store: &ValidatorStore<T, E>, + spec: &ChainSpec, ) -> Result<(), String> { - let (modulo, slot) = if let (Some(modulo), Some(slot)) = - (self.duty.aggregator_modulo, self.duty.attestation_slot) + let (committee_length, slot) = if let (Some(count), Some(slot)) = + (self.duty.committee_length, self.duty.attestation_slot) { - (modulo, slot) + (count as usize, slot) } else { - // If there is no modulo or for the aggregator we assume they are not activated and - // therefore not an aggregator. + // If there are no attester duties we assume the validator is inactive. self.selection_proof = None; return Ok(()); }; @@ -61,7 +60,7 @@ impl DutyAndProof { .ok_or_else(|| "Failed to produce selection proof".to_string())?; self.selection_proof = selection_proof - .is_aggregator_from_modulo(modulo) + .is_aggregator(committee_length, spec) .map_err(|e| format!("Invalid modulo: {:?}", e)) .map(|is_aggregator| { if is_aggregator { @@ -87,19 +86,20 @@ impl DutyAndProof { /// It's important to note that this doesn't actually check `self.selection_proof`, instead it /// checks to see if the inputs to computing the selection proof are equal. fn selection_proof_eq(&self, other: &Self) -> bool { - self.duty.aggregator_modulo == other.duty.aggregator_modulo + self.duty.committee_count_at_slot == other.duty.committee_count_at_slot && self.duty.attestation_slot == other.duty.attestation_slot } /// Returns the information required for an attesting validator, if they are scheduled to /// attest. - pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64, u64)> { + pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64, u64, u64)> { Some(( self.duty.attestation_slot?, self.duty.attestation_committee_index?, self.duty.attestation_committee_position?, self.duty.validator_index?, self.duty.committee_count_at_slot?, + self.duty.committee_length?, )) } @@ -108,26 +108,12 @@ impl DutyAndProof { } } -impl TryInto<DutyAndProof> for ValidatorDutyBytes { - type Error = String; - - fn try_into(self) -> Result<DutyAndProof, Self::Error> { - let duty = ValidatorDuty { - validator_pubkey: (&self.validator_pubkey) - .try_into() - .map_err(|e| format!("Invalid pubkey bytes from server: {:?}", e))?, - validator_index: self.validator_index, - attestation_slot: self.attestation_slot, - attestation_committee_index: self.attestation_committee_index, - attestation_committee_position: self.attestation_committee_position, - committee_count_at_slot: self.committee_count_at_slot, - block_proposal_slots: self.block_proposal_slots, - aggregator_modulo: self.aggregator_modulo, - }; - Ok(DutyAndProof { - duty, +impl Into<DutyAndProof> for ValidatorDuty { + fn into(self) -> DutyAndProof { + DutyAndProof { + duty: self, selection_proof: None, - }) + } } } @@ -260,6 +246,7 @@ impl DutiesStore { mut duties: DutyAndProof, slots_per_epoch: u64, validator_store: &ValidatorStore<T, E>, + spec: &ChainSpec, ) -> Result<InsertOutcome, String> { let mut store = self.store.write(); @@ -282,7 +269,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; // Determine if a re-subscription is required. let should_resubscribe = !duties.subscription_eq(known_duties); @@ -294,7 +281,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; validator_map.insert(epoch, duties); @@ -302,7 +289,7 @@ impl DutiesStore { } } else { // Compute the selection proof. - duties.compute_selection_proof(validator_store)?; + duties.compute_selection_proof(validator_store, spec)?; let validator_pubkey = duties.duty.validator_pubkey.clone(); @@ -328,7 +315,7 @@ impl DutiesStore { pub struct DutiesServiceBuilder<T, E: EthSpec> { validator_store: Option<ValidatorStore<T, E>>, slot_clock: Option<T>, - beacon_node: Option<RemoteBeaconNode<E>>, + beacon_node: Option<BeaconNodeHttpClient>, context: Option<RuntimeContext<E>>, allow_unsynced_beacon_node: bool, } @@ -354,7 +341,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } @@ -397,7 +384,7 @@ pub struct Inner<T, E: EthSpec> { store: Arc<DutiesStore>, validator_store: ValidatorStore<T, E>, pub(crate) slot_clock: T, - pub(crate) beacon_node: RemoteBeaconNode<E>, + pub(crate) beacon_node: BeaconNodeHttpClient, context: RuntimeContext<E>, /// If true, the duties service will poll for duties from the beacon node even if it is not /// synced. @@ -462,7 +449,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { pub fn start_update_service( self, mut block_service_tx: Sender<BlockServiceNotification>, - spec: &ChainSpec, + spec: Arc<ChainSpec>, ) -> Result<(), String> { let duration_to_next_slot = self .slot_clock @@ -481,17 +468,22 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { // Run an immediate update before starting the updater service. let duties_service = self.clone(); let mut block_service_tx_clone = block_service_tx.clone(); + let inner_spec = spec.clone(); self.inner .context .executor .runtime_handle() - .spawn(async move { duties_service.do_update(&mut block_service_tx_clone).await }); + .spawn(async move { + duties_service + .do_update(&mut block_service_tx_clone, &inner_spec) + .await + }); let executor = self.inner.context.executor.clone(); let interval_fut = async move { while interval.next().await.is_some() { - self.clone().do_update(&mut block_service_tx).await; + self.clone().do_update(&mut block_service_tx, &spec).await; } }; @@ -501,7 +493,11 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { } /// Attempt to download the duties of all managed validators for this epoch and the next. - async fn do_update(self, block_service_tx: &mut Sender<BlockServiceNotification>) { + async fn do_update( + self, + block_service_tx: &mut Sender<BlockServiceNotification>, + spec: &ChainSpec, + ) { let log = self.context.log(); if !is_synced(&self.beacon_node, &self.slot_clock, None).await @@ -534,7 +530,11 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { // Update duties for the current epoch, but keep running if there's an error: // block production or the next epoch update could still succeed. - if let Err(e) = self.clone().update_epoch(current_epoch).await { + if let Err(e) = self + .clone() + .update_epoch(current_epoch, current_epoch, spec) + .await + { error!( log, "Failed to get current epoch duties"; @@ -558,7 +558,11 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { }; // Update duties for the next epoch. - if let Err(e) = self.clone().update_epoch(current_epoch + 1).await { + if let Err(e) = self + .clone() + .update_epoch(current_epoch, current_epoch + 1, spec) + .await + { error!( log, "Failed to get next epoch duties"; @@ -567,18 +571,15 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { } } - /// Attempt to download the duties of all managed validators for the given `epoch`. - async fn update_epoch(self, epoch: Epoch) -> Result<(), String> { - let pubkeys = self.validator_store.voting_pubkeys(); - let all_duties = self - .beacon_node - .http - .validator() - .get_duties(epoch, pubkeys.as_slice()) - .await - .map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))?; - - let log = self.context.log().clone(); + /// Attempt to download the duties of all managed validators for the given `request_epoch`. The + /// `current_epoch` should be a local reading of the slot clock. + async fn update_epoch( + self, + current_epoch: Epoch, + request_epoch: Epoch, + spec: &ChainSpec, + ) -> Result<(), String> { + let log = self.context.log(); let mut new_validator = 0; let mut new_epoch = 0; @@ -587,74 +588,76 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { let mut replaced = 0; let mut invalid = 0; - // For each of the duties, attempt to insert them into our local store and build a - // list of new or changed selections proofs for any aggregating validators. - let validator_subscriptions = all_duties - .into_iter() - .filter_map(|remote_duties| { - // Convert the remote duties into our local representation. - let duties: DutyAndProof = remote_duties - .clone() - .try_into() - .map_err(|e| { - error!( - log, - "Unable to convert remote duties"; - "error" => e - ) - }) - .ok()?; - - let validator_pubkey = duties.duty.validator_pubkey.clone(); - - // Attempt to update our local store. - let outcome = self - .store - .insert(epoch, duties, E::slots_per_epoch(), &self.validator_store) - .map_err(|e| { - error!( - log, - "Unable to store duties"; - "error" => e - ) - }) - .ok()?; - - match &outcome { - InsertOutcome::NewValidator => { - debug!( - log, - "First duty assignment for validator"; - "proposal_slots" => format!("{:?}", &remote_duties.block_proposal_slots), - "attestation_slot" => format!("{:?}", &remote_duties.attestation_slot), - "validator" => format!("{:?}", &remote_duties.validator_pubkey) - ); - new_validator += 1; - } - InsertOutcome::NewProposalSlots => new_proposal_slots += 1, - InsertOutcome::NewEpoch => new_epoch += 1, - InsertOutcome::Identical => identical += 1, - InsertOutcome::Replaced { .. } => replaced += 1, - InsertOutcome::Invalid => invalid += 1, - }; - - // The selection proof is computed on `store.insert`, so it's necessary to check - // with the store that the validator is an aggregator. - let is_aggregator = self.store.is_aggregator(&validator_pubkey, epoch)?; - - if outcome.is_subscription_candidate() { - Some(ValidatorSubscription { - validator_index: remote_duties.validator_index?, - attestation_committee_index: remote_duties.attestation_committee_index?, - slot: remote_duties.attestation_slot?, - committee_count_at_slot: remote_duties.committee_count_at_slot?, - is_aggregator, - }) - } else { - None + let mut validator_subscriptions = vec![]; + for pubkey in self.validator_store.voting_pubkeys() { + let remote_duties = match ValidatorDuty::download( + &self.beacon_node, + current_epoch, + request_epoch, + pubkey, + ) + .await + { + Ok(duties) => duties, + Err(e) => { + error!( + log, + "Failed to download validator duties"; + "error" => e + ); + continue; } - }) - .collect::<Vec<_>>(); + }; + + // Convert the remote duties into our local representation. + let duties: DutyAndProof = remote_duties.clone().into(); + + let validator_pubkey = duties.duty.validator_pubkey.clone(); + + // Attempt to update our local store. + match self.store.insert( + request_epoch, + duties, + E::slots_per_epoch(), + &self.validator_store, + spec, + ) { + Ok(outcome) => { + match &outcome { + InsertOutcome::NewValidator => { + debug!( + log, + "First duty assignment for validator"; + "proposal_slots" => format!("{:?}", &remote_duties.block_proposal_slots), + "attestation_slot" => format!("{:?}", &remote_duties.attestation_slot), + "validator" => format!("{:?}", &remote_duties.validator_pubkey) + ); + new_validator += 1; + } + InsertOutcome::NewProposalSlots => new_proposal_slots += 1, + InsertOutcome::NewEpoch => new_epoch += 1, + InsertOutcome::Identical => identical += 1, + InsertOutcome::Replaced { .. } => replaced += 1, + InsertOutcome::Invalid => invalid += 1, + } + + if let Some(is_aggregator) = + self.store.is_aggregator(&validator_pubkey, request_epoch) + { + if outcome.is_subscription_candidate() { + if let Some(subscription) = remote_duties.subscription(is_aggregator) { + validator_subscriptions.push(subscription) + } + } + } + } + Err(e) => error!( + log, + "Unable to store duties"; + "error" => e + ), + } + } if invalid > 0 { error!( @@ -673,7 +676,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { "new_proposal_slots" => new_proposal_slots, "new_validator" => new_validator, "replaced" => replaced, - "epoch" => format!("{}", epoch) + "epoch" => format!("{}", request_epoch) ); if replaced > 0 { @@ -690,34 +693,19 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> { if count == 0 { debug!(log, "No new subscriptions required"); - - Ok(()) } else { self.beacon_node - .http - .validator() - .subscribe(validator_subscriptions) + .post_validator_beacon_committee_subscriptions(&validator_subscriptions) .await - .map_err(|e| format!("Failed to subscribe validators: {:?}", e)) - .map(move |status| { - match status { - PublishStatus::Valid => debug!( - log, - "Successfully subscribed validators"; - "count" => count - ), - PublishStatus::Unknown => error!( - log, - "Unknown response from subscription"; - ), - PublishStatus::Invalid(e) => error!( - log, - "Failed to subscribe validator"; - "error" => e - ), - }; - }) + .map_err(|e| format!("Failed to subscribe validators: {:?}", e))?; + debug!( + log, + "Successfully subscribed validators"; + "count" => count + ); } + + Ok(()) } } diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index b8db7b72e..58665ee01 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -1,32 +1,33 @@ use environment::RuntimeContext; +use eth2::{types::StateId, BeaconNodeHttpClient}; use futures::StreamExt; use parking_lot::RwLock; -use remote_beacon_node::RemoteBeaconNode; +use slog::Logger; use slog::{debug, trace}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; use tokio::time::{interval_at, Duration, Instant}; -use types::{ChainSpec, EthSpec, Fork}; +use types::{EthSpec, Fork}; /// Delay this period of time after the slot starts. This allows the node to process the new slot. const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(80); /// Builds a `ForkService`. -pub struct ForkServiceBuilder<T, E: EthSpec> { +pub struct ForkServiceBuilder<T> { fork: Option<Fork>, slot_clock: Option<T>, - beacon_node: Option<RemoteBeaconNode<E>>, - context: Option<RuntimeContext<E>>, + beacon_node: Option<BeaconNodeHttpClient>, + log: Option<Logger>, } -impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> { +impl<T: SlotClock + 'static> ForkServiceBuilder<T> { pub fn new() -> Self { Self { fork: None, slot_clock: None, beacon_node: None, - context: None, + log: None, } } @@ -35,17 +36,17 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> { self } - pub fn beacon_node(mut self, beacon_node: RemoteBeaconNode<E>) -> Self { + pub fn beacon_node(mut self, beacon_node: BeaconNodeHttpClient) -> Self { self.beacon_node = Some(beacon_node); self } - pub fn runtime_context(mut self, context: RuntimeContext<E>) -> Self { - self.context = Some(context); + pub fn log(mut self, log: Logger) -> Self { + self.log = Some(log); self } - pub fn build(self) -> Result<ForkService<T, E>, String> { + pub fn build(self) -> Result<ForkService<T>, String> { Ok(ForkService { inner: Arc::new(Inner { fork: RwLock::new(self.fork), @@ -55,28 +56,48 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkServiceBuilder<T, E> { beacon_node: self .beacon_node .ok_or_else(|| "Cannot build ForkService without beacon_node")?, - context: self - .context - .ok_or_else(|| "Cannot build ForkService without runtime_context")?, + log: self + .log + .ok_or_else(|| "Cannot build ForkService without logger")? + .clone(), }), }) } } +#[cfg(test)] +#[allow(dead_code)] +impl ForkServiceBuilder<slot_clock::TestingSlotClock> { + pub fn testing_only(log: Logger) -> Self { + Self { + fork: Some(types::Fork::default()), + slot_clock: Some(slot_clock::TestingSlotClock::new( + types::Slot::new(0), + std::time::Duration::from_secs(42), + std::time::Duration::from_secs(42), + )), + beacon_node: Some(eth2::BeaconNodeHttpClient::new( + eth2::Url::parse("http://127.0.0.1").unwrap(), + )), + log: Some(log), + } + } +} + /// Helper to minimise `Arc` usage. -pub struct Inner<T, E: EthSpec> { +pub struct Inner<T> { fork: RwLock<Option<Fork>>, - beacon_node: RemoteBeaconNode<E>, - context: RuntimeContext<E>, + beacon_node: BeaconNodeHttpClient, + log: Logger, slot_clock: T, } /// Attempts to download the `Fork` struct from the beacon node at the start of each epoch. -pub struct ForkService<T, E: EthSpec> { - inner: Arc<Inner<T, E>>, +pub struct ForkService<T> { + inner: Arc<Inner<T>>, } -impl<T, E: EthSpec> Clone for ForkService<T, E> { +impl<T> Clone for ForkService<T> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -84,22 +105,27 @@ impl<T, E: EthSpec> Clone for ForkService<T, E> { } } -impl<T, E: EthSpec> Deref for ForkService<T, E> { - type Target = Inner<T, E>; +impl<T> Deref for ForkService<T> { + type Target = Inner<T>; fn deref(&self) -> &Self::Target { self.inner.deref() } } -impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> { +impl<T: SlotClock + 'static> ForkService<T> { /// Returns the last fork downloaded from the beacon node, if any. pub fn fork(&self) -> Option<Fork> { *self.fork.read() } /// Starts the service that periodically polls for the `Fork`. - pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> { + pub fn start_update_service<E: EthSpec>( + self, + context: &RuntimeContext<E>, + ) -> Result<(), String> { + let spec = &context.eth2_config.spec; + let duration_to_next_epoch = self .slot_clock .duration_to_next_epoch(E::slots_per_epoch()) @@ -115,13 +141,12 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> { }; // Run an immediate update before starting the updater service. - self.inner - .context + context .executor .runtime_handle() .spawn(self.clone().do_update()); - let executor = self.inner.context.executor.clone(); + let executor = context.executor.clone(); let interval_fut = async move { while interval.next().await.is_some() { @@ -136,28 +161,32 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> { /// Attempts to download the `Fork` from the server. async fn do_update(self) -> Result<(), ()> { - let log = self.context.log(); - let fork = self .inner .beacon_node - .http - .beacon() - .get_fork() + .get_beacon_states_fork(StateId::Head) .await .map_err(|e| { trace!( - log, + self.log, "Fork update failed"; "error" => format!("Error retrieving fork: {:?}", e) ) - })?; + })? + .ok_or_else(|| { + trace!( + self.log, + "Fork update failed"; + "error" => "The beacon head fork is unknown" + ) + })? + .data; if self.fork.read().as_ref() != Some(&fork) { *(self.fork.write()) = Some(fork); } - debug!(log, "Fork update success"); + debug!(self.log, "Fork update success"); // Returning an error will stop the interval. This is not desired, a single failure // should not stop all future attempts. diff --git a/validator_client/src/http_api/api_secret.rs b/validator_client/src/http_api/api_secret.rs new file mode 100644 index 000000000..a3aa5f0b9 --- /dev/null +++ b/validator_client/src/http_api/api_secret.rs @@ -0,0 +1,184 @@ +use eth2::lighthouse_vc::{PK_LEN, SECRET_PREFIX as PK_PREFIX}; +use rand::thread_rng; +use ring::digest::{digest, SHA256}; +use secp256k1::{Message, PublicKey, SecretKey}; +use std::fs; +use std::path::Path; +use warp::Filter; + +/// The name of the file which stores the secret key. +/// +/// It is purposefully opaque to prevent users confusing it with the "secret" that they need to +/// share with API consumers (which is actually the public key). +pub const SK_FILENAME: &str = ".secp-sk"; + +/// Length of the raw secret key, in bytes. +pub const SK_LEN: usize = 32; + +/// The name of the file which stores the public key. +/// +/// For users, this public key is a "secret" that can be shared with API consumers to provide them +/// access to the API. We avoid calling it a "public" key to users, since they should not post this +/// value in a public forum. +pub const PK_FILENAME: &str = "api-token.txt"; + +/// Contains a `secp256k1` keypair that is saved-to/loaded-from disk on instantiation. The keypair +/// is used for authorization/authentication for requests/responses on the HTTP API. +/// +/// Provides convenience functions to ultimately provide: +/// +/// - A signature across outgoing HTTP responses, applied to the `Signature` header. +/// - Verification of proof-of-knowledge of the public key in `self` for incoming HTTP requests, +/// via the `Authorization` header. +/// +/// The aforementioned scheme was first defined here: +/// +/// https://github.com/sigp/lighthouse/issues/1269#issuecomment-649879855 +pub struct ApiSecret { + pk: PublicKey, + sk: SecretKey, +} + +impl ApiSecret { + /// If both the secret and public keys are already on-disk, parse them and ensure they're both + /// from the same keypair. + /// + /// The provided `dir` is a directory containing two files, `SK_FILENAME` and `PK_FILENAME`. + /// + /// If either the secret or public key files are missing on disk, create a new keypair and + /// write it to disk (over-writing any existing files). + pub fn create_or_open<P: AsRef<Path>>(dir: P) -> Result<Self, String> { + let sk_path = dir.as_ref().join(SK_FILENAME); + let pk_path = dir.as_ref().join(PK_FILENAME); + + if !(sk_path.exists() && pk_path.exists()) { + let sk = SecretKey::random(&mut thread_rng()); + let pk = PublicKey::from_secret_key(&sk); + + fs::write( + &sk_path, + serde_utils::hex::encode(&sk.serialize()).as_bytes(), + ) + .map_err(|e| e.to_string())?; + fs::write( + &pk_path, + format!( + "{}{}", + PK_PREFIX, + serde_utils::hex::encode(&pk.serialize_compressed()[..]) + ) + .as_bytes(), + ) + .map_err(|e| e.to_string())?; + } + + let sk = fs::read(&sk_path) + .map_err(|e| format!("cannot read {}: {}", SK_FILENAME, e)) + .and_then(|bytes| { + serde_utils::hex::decode(&String::from_utf8_lossy(&bytes)) + .map_err(|_| format!("{} should be 0x-prefixed hex", PK_FILENAME)) + }) + .and_then(|bytes| { + if bytes.len() == SK_LEN { + let mut array = [0; SK_LEN]; + array.copy_from_slice(&bytes); + SecretKey::parse(&array).map_err(|e| format!("invalid {}: {}", SK_FILENAME, e)) + } else { + Err(format!( + "{} expected {} bytes not {}", + SK_FILENAME, + SK_LEN, + bytes.len() + )) + } + })?; + + let pk = fs::read(&pk_path) + .map_err(|e| format!("cannot read {}: {}", PK_FILENAME, e)) + .and_then(|bytes| { + let hex = + String::from_utf8(bytes).map_err(|_| format!("{} is not utf8", SK_FILENAME))?; + if hex.starts_with(PK_PREFIX) { + serde_utils::hex::decode(&hex[PK_PREFIX.len()..]) + .map_err(|_| format!("{} should be 0x-prefixed hex", SK_FILENAME)) + } else { + Err(format!("unable to parse {}", SK_FILENAME)) + } + }) + .and_then(|bytes| { + if bytes.len() == PK_LEN { + let mut array = [0; PK_LEN]; + array.copy_from_slice(&bytes); + PublicKey::parse_compressed(&array) + .map_err(|e| format!("invalid {}: {}", PK_FILENAME, e)) + } else { + Err(format!( + "{} expected {} bytes not {}", + PK_FILENAME, + PK_LEN, + bytes.len() + )) + } + })?; + + // Ensure that the keys loaded from disk are indeed a pair. + if PublicKey::from_secret_key(&sk) != pk { + fs::remove_file(&sk_path) + .map_err(|e| format!("unable to remove {}: {}", SK_FILENAME, e))?; + fs::remove_file(&pk_path) + .map_err(|e| format!("unable to remove {}: {}", PK_FILENAME, e))?; + return Err(format!( + "{:?} does not match {:?} and the files have been deleted. Please try again.", + sk_path, pk_path + )); + } + + Ok(Self { sk, pk }) + } + + /// Returns the public key of `self` as a 0x-prefixed hex string. + fn pubkey_string(&self) -> String { + serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) + } + + /// Returns the API token. + pub fn api_token(&self) -> String { + format!("{}{}", PK_PREFIX, self.pubkey_string()) + } + + /// Returns the value of the `Authorization` header which is used for verifying incoming HTTP + /// requests. + fn auth_header_value(&self) -> String { + format!("Basic {}", self.api_token()) + } + + /// Returns a `warp` header which filters out request that have a missing or inaccurate + /// `Authorization` header. + pub fn authorization_header_filter(&self) -> warp::filters::BoxedFilter<()> { + let expected = self.auth_header_value(); + warp::any() + .map(move || expected.clone()) + .and(warp::filters::header::header("Authorization")) + .and_then(move |expected: String, header: String| async move { + if header == expected { + Ok(()) + } else { + Err(warp_utils::reject::invalid_auth(header)) + } + }) + .untuple_one() + .boxed() + } + + /// Returns a closure which produces a signature over some bytes using the secret key in + /// `self`. The signature is a 32-byte hash formatted as a 0x-prefixed string. + pub fn signer(&self) -> impl Fn(&[u8]) -> String + Clone { + let sk = self.sk.clone(); + move |input: &[u8]| -> String { + let message = + Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes"); + let (signature, _) = secp256k1::sign(&message, &sk); + serde_utils::hex::encode(signature.serialize_der().as_ref()) + } + } +} diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs new file mode 100644 index 000000000..b84f9d614 --- /dev/null +++ b/validator_client/src/http_api/create_validator.rs @@ -0,0 +1,151 @@ +use crate::ValidatorStore; +use account_utils::{ + eth2_wallet::{bip39::Mnemonic, WalletBuilder}, + random_mnemonic, random_password, ZeroizeString, +}; +use eth2::lighthouse_vc::types::{self as api_types}; +use slot_clock::SlotClock; +use std::path::Path; +use types::ChainSpec; +use types::EthSpec; +use validator_dir::Builder as ValidatorDirBuilder; + +/// Create some validator EIP-2335 keystores and store them on disk. Then, enroll the validators in +/// this validator client. +/// +/// Returns the list of created validators and the mnemonic used to derive them via EIP-2334. +/// +/// ## Detail +/// +/// If `mnemonic_opt` is not supplied it will be randomly generated and returned in the response. +/// +/// If `key_derivation_path_offset` is supplied then the EIP-2334 validator index will start at +/// this point. +pub fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>( + mnemonic_opt: Option<Mnemonic>, + key_derivation_path_offset: Option<u32>, + validator_requests: &[api_types::ValidatorRequest], + validator_dir: P, + validator_store: &ValidatorStore<T, E>, + spec: &ChainSpec, +) -> Result<(Vec<api_types::CreatedValidator>, Mnemonic), warp::Rejection> { + let mnemonic = mnemonic_opt.unwrap_or_else(random_mnemonic); + + let wallet_password = random_password(); + let mut wallet = + WalletBuilder::from_mnemonic(&mnemonic, wallet_password.as_bytes(), String::new()) + .and_then(|builder| builder.build()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to create EIP-2386 wallet: {:?}", + e + )) + })?; + + if let Some(nextaccount) = key_derivation_path_offset { + wallet.set_nextaccount(nextaccount).map_err(|()| { + warp_utils::reject::custom_server_error("unable to set wallet nextaccount".to_string()) + })?; + } + + let mut validators = Vec::with_capacity(validator_requests.len()); + + for request in validator_requests { + let voting_password = random_password(); + let withdrawal_password = random_password(); + let voting_password_string = ZeroizeString::from( + String::from_utf8(voting_password.as_bytes().to_vec()).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "locally generated password is not utf8: {:?}", + e + )) + })?, + ); + + let mut keystores = wallet + .next_validator( + wallet_password.as_bytes(), + voting_password.as_bytes(), + withdrawal_password.as_bytes(), + ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to create validator keys: {:?}", + e + )) + })?; + + keystores + .voting + .set_description(request.description.clone()); + keystores + .withdrawal + .set_description(request.description.clone()); + + let voting_pubkey = format!("0x{}", keystores.voting.pubkey()) + .parse() + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "created invalid public key: {:?}", + e + )) + })?; + + let validator_dir = ValidatorDirBuilder::new(validator_dir.as_ref().into()) + .voting_keystore(keystores.voting, voting_password.as_bytes()) + .withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes()) + .create_eth1_tx_data(request.deposit_gwei, &spec) + .store_withdrawal_keystore(false) + .build() + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to build validator directory: {:?}", + e + )) + })?; + + let eth1_deposit_data = validator_dir + .eth1_deposit_data() + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to read local deposit data: {:?}", + e + )) + })? + .ok_or_else(|| { + warp_utils::reject::custom_server_error( + "failed to create local deposit data: {:?}".to_string(), + ) + })?; + + if eth1_deposit_data.deposit_data.amount != request.deposit_gwei { + return Err(warp_utils::reject::custom_server_error(format!( + "invalid deposit_gwei {}, expected {}", + eth1_deposit_data.deposit_data.amount, request.deposit_gwei + ))); + } + + tokio::runtime::Handle::current() + .block_on(validator_store.add_validator_keystore( + validator_dir.voting_keystore_path(), + voting_password_string, + request.enable, + )) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to initialize validator: {:?}", + e + )) + })?; + + validators.push(api_types::CreatedValidator { + enabled: request.enable, + description: request.description.clone(), + voting_pubkey, + eth1_deposit_tx_data: serde_utils::hex::encode(ð1_deposit_data.rlp), + deposit_gwei: request.deposit_gwei, + }); + } + + Ok((validators, mnemonic)) +} diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs new file mode 100644 index 000000000..7e0d387d2 --- /dev/null +++ b/validator_client/src/http_api/mod.rs @@ -0,0 +1,488 @@ +mod api_secret; +mod create_validator; +mod tests; + +use crate::ValidatorStore; +use account_utils::mnemonic_from_phrase; +use create_validator::create_validators; +use eth2::lighthouse_vc::types::{self as api_types, PublicKey, PublicKeyBytes}; +use lighthouse_version::version_with_platform; +use serde::{Deserialize, Serialize}; +use slog::{crit, info, Logger}; +use slot_clock::SlotClock; +use std::future::Future; +use std::marker::PhantomData; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::path::PathBuf; +use std::sync::Arc; +use types::{ChainSpec, EthSpec, YamlConfig}; +use validator_dir::Builder as ValidatorDirBuilder; +use warp::{ + http::{ + header::{HeaderValue, CONTENT_TYPE}, + response::Response, + StatusCode, + }, + Filter, +}; + +pub use api_secret::ApiSecret; + +#[derive(Debug)] +pub enum Error { + Warp(warp::Error), + Other(String), +} + +impl From<warp::Error> for Error { + fn from(e: warp::Error) -> Self { + Error::Warp(e) + } +} + +impl From<String> for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} + +/// A wrapper around all the items required to spawn the HTTP server. +/// +/// The server will gracefully handle the case where any fields are `None`. +pub struct Context<T: Clone, E: EthSpec> { + pub api_secret: ApiSecret, + pub validator_store: Option<ValidatorStore<T, E>>, + pub validator_dir: Option<PathBuf>, + pub spec: ChainSpec, + pub config: Config, + pub log: Logger, + pub _phantom: PhantomData<E>, +} + +/// Configuration for the HTTP server. +#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + pub listen_addr: Ipv4Addr, + pub listen_port: u16, + pub allow_origin: Option<String>, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 5062, + allow_origin: None, + } + } +} + +/// Creates a server that will serve requests using information from `ctx`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the provided address and then return a tuple of: +/// +/// - `SocketAddr`: the address that the HTTP server will listen on. +/// - `Future`: the actual server future that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>( + ctx: Arc<Context<T, E>>, + shutdown: impl Future<Output = ()> + Send + Sync + 'static, +) -> Result<(SocketAddr, impl Future<Output = ()>), Error> { + let config = &ctx.config; + let log = ctx.log.clone(); + let allow_origin = config.allow_origin.clone(); + + // Sanity check. + if !config.enabled { + crit!(log, "Cannot start disabled metrics HTTP server"); + return Err(Error::Other( + "A disabled metrics server should not be started".to_string(), + )); + } + + let authorization_header_filter = ctx.api_secret.authorization_header_filter(); + let api_token = ctx.api_secret.api_token(); + let signer = ctx.api_secret.signer(); + let signer = warp::any().map(move || signer.clone()); + + let inner_validator_store = ctx.validator_store.clone(); + let validator_store_filter = warp::any() + .map(move || inner_validator_store.clone()) + .and_then(|validator_store: Option<_>| async move { + validator_store.ok_or_else(|| { + warp_utils::reject::custom_not_found( + "validator store is not initialized.".to_string(), + ) + }) + }); + + let inner_validator_dir = ctx.validator_dir.clone(); + let validator_dir_filter = warp::any() + .map(move || inner_validator_dir.clone()) + .and_then(|validator_dir: Option<_>| async move { + validator_dir.ok_or_else(|| { + warp_utils::reject::custom_not_found( + "validator_dir directory is not initialized.".to_string(), + ) + }) + }); + + let inner_spec = Arc::new(ctx.spec.clone()); + let spec_filter = warp::any().map(move || inner_spec.clone()); + + // GET lighthouse/version + let get_node_version = warp::path("lighthouse") + .and(warp::path("version")) + .and(warp::path::end()) + .and(signer.clone()) + .and_then(|signer| { + blocking_signed_json_task(signer, move || { + Ok(api_types::GenericResponse::from(api_types::VersionData { + version: version_with_platform(), + })) + }) + }); + + // GET lighthouse/health + let get_lighthouse_health = warp::path("lighthouse") + .and(warp::path("health")) + .and(warp::path::end()) + .and(signer.clone()) + .and_then(|signer| { + blocking_signed_json_task(signer, move || { + eth2::lighthouse::Health::observe() + .map(api_types::GenericResponse::from) + .map_err(warp_utils::reject::custom_bad_request) + }) + }); + + // GET lighthouse/spec + let get_lighthouse_spec = warp::path("lighthouse") + .and(warp::path("spec")) + .and(warp::path::end()) + .and(spec_filter.clone()) + .and(signer.clone()) + .and_then(|spec: Arc<_>, signer| { + blocking_signed_json_task(signer, move || { + Ok(api_types::GenericResponse::from( + YamlConfig::from_spec::<E>(&spec), + )) + }) + }); + + // GET lighthouse/validators + let get_lighthouse_validators = warp::path("lighthouse") + .and(warp::path("validators")) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then(|validator_store: ValidatorStore<T, E>, signer| { + blocking_signed_json_task(signer, move || { + let validators = validator_store + .initialized_validators() + .read() + .validator_definitions() + .iter() + .map(|def| api_types::ValidatorData { + enabled: def.enabled, + description: def.description.clone(), + voting_pubkey: PublicKeyBytes::from(&def.voting_public_key), + }) + .collect::<Vec<_>>(); + + Ok(api_types::GenericResponse::from(validators)) + }) + }); + + // GET lighthouse/validators/{validator_pubkey} + let get_lighthouse_validators_pubkey = warp::path("lighthouse") + .and(warp::path("validators")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path::end()) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |validator_pubkey: PublicKey, validator_store: ValidatorStore<T, E>, signer| { + blocking_signed_json_task(signer, move || { + let validator = validator_store + .initialized_validators() + .read() + .validator_definitions() + .iter() + .find(|def| def.voting_public_key == validator_pubkey) + .map(|def| api_types::ValidatorData { + enabled: def.enabled, + description: def.description.clone(), + voting_pubkey: PublicKeyBytes::from(&def.voting_public_key), + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "no validator for {:?}", + validator_pubkey + )) + })?; + + Ok(api_types::GenericResponse::from(validator)) + }) + }, + ); + + // POST lighthouse/validators/ + let post_validators = warp::path("lighthouse") + .and(warp::path("validators")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(validator_dir_filter.clone()) + .and(validator_store_filter.clone()) + .and(spec_filter.clone()) + .and(signer.clone()) + .and_then( + |body: Vec<api_types::ValidatorRequest>, + validator_dir: PathBuf, + validator_store: ValidatorStore<T, E>, + spec: Arc<ChainSpec>, + signer| { + blocking_signed_json_task(signer, move || { + let (validators, mnemonic) = create_validators( + None, + None, + &body, + &validator_dir, + &validator_store, + &spec, + )?; + let response = api_types::PostValidatorsResponseData { + mnemonic: mnemonic.into_phrase().into(), + validators, + }; + Ok(api_types::GenericResponse::from(response)) + }) + }, + ); + + // POST lighthouse/validators/mnemonic + let post_validators_mnemonic = warp::path("lighthouse") + .and(warp::path("validators")) + .and(warp::path("mnemonic")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(validator_dir_filter.clone()) + .and(validator_store_filter.clone()) + .and(spec_filter) + .and(signer.clone()) + .and_then( + |body: api_types::CreateValidatorsMnemonicRequest, + validator_dir: PathBuf, + validator_store: ValidatorStore<T, E>, + spec: Arc<ChainSpec>, + signer| { + blocking_signed_json_task(signer, move || { + let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid mnemonic: {:?}", e)) + })?; + let (validators, _mnemonic) = create_validators( + Some(mnemonic), + Some(body.key_derivation_path_offset), + &body.validators, + &validator_dir, + &validator_store, + &spec, + )?; + Ok(api_types::GenericResponse::from(validators)) + }) + }, + ); + + // POST lighthouse/validators/keystore + let post_validators_keystore = warp::path("lighthouse") + .and(warp::path("validators")) + .and(warp::path("keystore")) + .and(warp::path::end()) + .and(warp::body::json()) + .and(validator_dir_filter) + .and(validator_store_filter.clone()) + .and(signer.clone()) + .and_then( + |body: api_types::KeystoreValidatorsPostRequest, + validator_dir: PathBuf, + validator_store: ValidatorStore<T, E>, + signer| { + blocking_signed_json_task(signer, move || { + // Check to ensure the password is correct. + let keypair = body + .keystore + .decrypt_keypair(body.password.as_ref()) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "invalid keystore: {:?}", + e + )) + })?; + + let validator_dir = ValidatorDirBuilder::new(validator_dir.clone()) + .voting_keystore(body.keystore.clone(), body.password.as_ref()) + .store_withdrawal_keystore(false) + .build() + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to build validator directory: {:?}", + e + )) + })?; + + let voting_password = body.password.clone(); + + let validator_def = tokio::runtime::Handle::current() + .block_on(validator_store.add_validator_keystore( + validator_dir.voting_keystore_path(), + voting_password, + body.enable, + )) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to initialize validator: {:?}", + e + )) + })?; + + Ok(api_types::GenericResponse::from(api_types::ValidatorData { + enabled: body.enable, + description: validator_def.description, + voting_pubkey: keypair.pk.into(), + })) + }) + }, + ); + + // PATCH lighthouse/validators/{validator_pubkey} + let patch_validators = warp::path("lighthouse") + .and(warp::path("validators")) + .and(warp::path::param::<PublicKey>()) + .and(warp::path::end()) + .and(warp::body::json()) + .and(validator_store_filter) + .and(signer) + .and_then( + |validator_pubkey: PublicKey, + body: api_types::ValidatorPatchRequest, + validator_store: ValidatorStore<T, E>, + signer| { + blocking_signed_json_task(signer, move || { + let initialized_validators_rw_lock = validator_store.initialized_validators(); + let mut initialized_validators = initialized_validators_rw_lock.write(); + + match initialized_validators.is_enabled(&validator_pubkey) { + None => Err(warp_utils::reject::custom_not_found(format!( + "no validator for {:?}", + validator_pubkey + ))), + Some(enabled) if enabled == body.enabled => Ok(()), + Some(_) => { + tokio::runtime::Handle::current() + .block_on( + initialized_validators + .set_validator_status(&validator_pubkey, body.enabled), + ) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to set validator status: {:?}", + e + )) + })?; + + Ok(()) + } + } + }) + }, + ); + + let routes = warp::any() + .and(authorization_header_filter) + .and( + warp::get().and( + get_node_version + .or(get_lighthouse_health) + .or(get_lighthouse_spec) + .or(get_lighthouse_validators) + .or(get_lighthouse_validators_pubkey), + ), + ) + .or(warp::post().and( + post_validators + .or(post_validators_keystore) + .or(post_validators_mnemonic), + )) + .or(warp::patch().and(patch_validators)) + // Maps errors into HTTP responses. + .recover(warp_utils::reject::handle_rejection) + // Add a `Server` header. + .map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform())) + // Maybe add some CORS headers. + .map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref())); + + let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown( + SocketAddrV4::new(config.listen_addr, config.listen_port), + async { + shutdown.await; + }, + )?; + + info!( + log, + "HTTP API started"; + "listen_address" => listening_socket.to_string(), + "api_token" => api_token, + ); + + Ok((listening_socket, server)) +} + +/// Executes `func` in blocking tokio task (i.e., where long-running tasks are permitted). +/// JSON-encodes the return value of `func`, using the `signer` function to produce a signature of +/// those bytes. +pub async fn blocking_signed_json_task<S, F, T>( + signer: S, + func: F, +) -> Result<impl warp::Reply, warp::Rejection> +where + S: Fn(&[u8]) -> String, + F: Fn() -> Result<T, warp::Rejection>, + T: Serialize, +{ + warp_utils::task::blocking_task(func) + .await + .map(|func_output| { + let mut response = match serde_json::to_vec(&func_output) { + Ok(body) => { + let mut res = Response::new(body); + res.headers_mut() + .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + res + } + Err(_) => Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(vec![]) + .expect("can produce simple response from static values"), + }; + + let body: &Vec<u8> = response.body(); + let signature = signer(body); + let header_value = + HeaderValue::from_str(&signature).expect("hash can be encoded as header"); + + response.headers_mut().append("Signature", header_value); + + response + }) +} diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs new file mode 100644 index 000000000..eef3aa8ae --- /dev/null +++ b/validator_client/src/http_api/tests.rs @@ -0,0 +1,530 @@ +#![cfg(test)] +#![cfg(not(debug_assertions))] + +use crate::{ + http_api::{ApiSecret, Config as HttpConfig, Context}, + Config, ForkServiceBuilder, InitializedValidators, ValidatorDefinitions, ValidatorStore, +}; +use account_utils::{ + eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, + ZeroizeString, +}; +use deposit_contract::decode_eth1_tx_data; +use environment::null_logger; +use eth2::{ + lighthouse_vc::{http_client::ValidatorClientHttpClient, types::*}, + Url, +}; +use eth2_keystore::KeystoreBuilder; +use parking_lot::RwLock; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; +use slot_clock::TestingSlotClock; +use std::marker::PhantomData; +use std::net::Ipv4Addr; +use std::sync::Arc; +use tempfile::{tempdir, TempDir}; +use tokio::sync::oneshot; + +const PASSWORD_BYTES: &[u8] = &[42, 13, 37]; + +type E = MainnetEthSpec; + +struct ApiTester { + client: ValidatorClientHttpClient, + initialized_validators: Arc<RwLock<InitializedValidators>>, + url: Url, + _server_shutdown: oneshot::Sender<()>, + _validator_dir: TempDir, +} + +impl ApiTester { + pub async fn new() -> Self { + let log = null_logger().unwrap(); + + let validator_dir = tempdir().unwrap(); + let secrets_dir = tempdir().unwrap(); + + let validator_defs = ValidatorDefinitions::open_or_create(validator_dir.path()).unwrap(); + + let initialized_validators = InitializedValidators::from_definitions( + validator_defs, + validator_dir.path().into(), + false, + log.clone(), + ) + .await + .unwrap(); + + let api_secret = ApiSecret::create_or_open(validator_dir.path()).unwrap(); + let api_pubkey = api_secret.api_token(); + + let mut config = Config::default(); + config.validator_dir = validator_dir.path().into(); + config.secrets_dir = secrets_dir.path().into(); + + let fork_service = ForkServiceBuilder::testing_only(log.clone()) + .build() + .unwrap(); + + let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap(); + + let validator_store: ValidatorStore<TestingSlotClock, E> = ValidatorStore::new( + initialized_validators, + slashing_protection, + Hash256::repeat_byte(42), + E::default_spec(), + fork_service.clone(), + log.clone(), + ); + + let initialized_validators = validator_store.initialized_validators(); + + let context: Arc<Context<TestingSlotClock, E>> = Arc::new(Context { + api_secret, + validator_dir: Some(validator_dir.path().into()), + validator_store: Some(validator_store), + spec: E::default_spec(), + config: HttpConfig { + enabled: true, + listen_addr: Ipv4Addr::new(127, 0, 0, 1), + listen_port: 0, + allow_origin: None, + }, + log, + _phantom: PhantomData, + }); + let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + let server_shutdown = async { + // It's not really interesting why this triggered, just that it happened. + let _ = shutdown_rx.await; + }; + let (listening_socket, server) = super::serve(ctx, server_shutdown).unwrap(); + + tokio::spawn(async { server.await }); + + let url = Url::parse(&format!( + "http://{}:{}", + listening_socket.ip(), + listening_socket.port() + )) + .unwrap(); + + let client = ValidatorClientHttpClient::new(url.clone(), api_pubkey).unwrap(); + + Self { + initialized_validators, + _validator_dir: validator_dir, + client, + url, + _server_shutdown: shutdown_tx, + } + } + + pub fn invalidate_api_token(mut self) -> Self { + let tmp = tempdir().unwrap(); + let api_secret = ApiSecret::create_or_open(tmp.path()).unwrap(); + let invalid_pubkey = api_secret.api_token(); + + self.client = ValidatorClientHttpClient::new(self.url.clone(), invalid_pubkey).unwrap(); + self + } + + pub async fn test_get_lighthouse_version_invalid(self) -> Self { + self.client.get_lighthouse_version().await.unwrap_err(); + self + } + + pub async fn test_get_lighthouse_spec(self) -> Self { + let result = self.client.get_lighthouse_spec().await.unwrap().data; + + let expected = YamlConfig::from_spec::<E>(&E::default_spec()); + + assert_eq!(result, expected); + + self + } + + pub async fn test_get_lighthouse_version(self) -> Self { + let result = self.client.get_lighthouse_version().await.unwrap().data; + + let expected = VersionData { + version: lighthouse_version::version_with_platform(), + }; + + assert_eq!(result, expected); + + self + } + + #[cfg(target_os = "linux")] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap(); + + self + } + + #[cfg(not(target_os = "linux"))] + pub async fn test_get_lighthouse_health(self) -> Self { + self.client.get_lighthouse_health().await.unwrap_err(); + + self + } + pub fn vals_total(&self) -> usize { + self.initialized_validators.read().num_total() + } + + pub fn vals_enabled(&self) -> usize { + self.initialized_validators.read().num_enabled() + } + + pub fn assert_enabled_validators_count(self, count: usize) -> Self { + assert_eq!(self.vals_enabled(), count); + self + } + + pub fn assert_validators_count(self, count: usize) -> Self { + assert_eq!(self.vals_total(), count); + self + } + + pub async fn create_hd_validators(self, s: HdValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let validators = (0..s.count) + .map(|i| ValidatorRequest { + enable: !s.disabled.contains(&i), + description: format!("boi #{}", i), + deposit_gwei: E::default_spec().max_effective_balance, + }) + .collect::<Vec<_>>(); + + let (response, mnemonic) = if s.specify_mnemonic { + let mnemonic = ZeroizeString::from(random_mnemonic().phrase().to_string()); + let request = CreateValidatorsMnemonicRequest { + mnemonic: mnemonic.clone(), + key_derivation_path_offset: s.key_derivation_path_offset, + validators: validators.clone(), + }; + let response = self + .client + .post_lighthouse_validators_mnemonic(&request) + .await + .unwrap() + .data; + + (response, mnemonic) + } else { + assert_eq!( + s.key_derivation_path_offset, 0, + "cannot use a derivation offset without specifying a mnemonic" + ); + let response = self + .client + .post_lighthouse_validators(validators.clone()) + .await + .unwrap() + .data; + (response.validators.clone(), response.mnemonic.clone()) + }; + + assert_eq!(response.len(), s.count); + assert_eq!(self.vals_total(), initial_vals + s.count); + assert_eq!( + self.vals_enabled(), + initial_enabled_vals + s.count - s.disabled.len() + ); + + let server_vals = self.client.get_lighthouse_validators().await.unwrap().data; + + assert_eq!(server_vals.len(), self.vals_total()); + + // Ensure the server lists all of these newly created validators. + for validator in &response { + assert!(server_vals + .iter() + .any(|server_val| server_val.voting_pubkey == validator.voting_pubkey)); + } + + /* + * Verify that we can regenerate all the keys from the mnemonic. + */ + + let mnemonic = mnemonic_from_phrase(mnemonic.as_str()).unwrap(); + let mut wallet = WalletBuilder::from_mnemonic(&mnemonic, PASSWORD_BYTES, "".to_string()) + .unwrap() + .build() + .unwrap(); + + wallet + .set_nextaccount(s.key_derivation_path_offset) + .unwrap(); + + for i in 0..s.count { + let keypairs = wallet + .next_validator(PASSWORD_BYTES, PASSWORD_BYTES, PASSWORD_BYTES) + .unwrap(); + let voting_keypair = keypairs.voting.decrypt_keypair(PASSWORD_BYTES).unwrap(); + + assert_eq!( + response[i].voting_pubkey, + voting_keypair.pk.clone().into(), + "the locally generated voting pk should match the server response" + ); + + let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); + + let deposit_bytes = + serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + + let (deposit_data, _) = + decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) + .unwrap(); + + assert_eq!( + deposit_data.pubkey, + voting_keypair.pk.clone().into(), + "the locally generated voting pk should match the deposit data" + ); + + assert_eq!( + deposit_data.withdrawal_credentials, + Hash256::from_slice(&bls::get_withdrawal_credentials( + &withdrawal_keypair.pk, + E::default_spec().bls_withdrawal_prefix_byte + )), + "the locally generated withdrawal creds should match the deposit data" + ); + + assert_eq!( + deposit_data.signature, + deposit_data.create_signature(&voting_keypair.sk, &E::default_spec()), + "the locally-generated deposit sig should create the same deposit sig" + ); + } + + self + } + + pub async fn create_keystore_validators(self, s: KeystoreValidatorScenario) -> Self { + let initial_vals = self.vals_total(); + let initial_enabled_vals = self.vals_enabled(); + + let password = random_password(); + let keypair = Keypair::random(); + let keystore = KeystoreBuilder::new(&keypair, password.as_bytes(), String::new()) + .unwrap() + .build() + .unwrap(); + + if !s.correct_password { + let request = KeystoreValidatorsPostRequest { + enable: s.enabled, + password: String::from_utf8(random_password().as_ref().to_vec()) + .unwrap() + .into(), + keystore, + }; + + self.client + .post_lighthouse_validators_keystore(&request) + .await + .unwrap_err(); + + return self; + } + + let request = KeystoreValidatorsPostRequest { + enable: s.enabled, + password: String::from_utf8(password.as_ref().to_vec()) + .unwrap() + .into(), + keystore, + }; + + let response = self + .client + .post_lighthouse_validators_keystore(&request) + .await + .unwrap() + .data; + + let num_enabled = s.enabled as usize; + + assert_eq!(self.vals_total(), initial_vals + 1); + assert_eq!(self.vals_enabled(), initial_enabled_vals + num_enabled); + + let server_vals = self.client.get_lighthouse_validators().await.unwrap().data; + + assert_eq!(server_vals.len(), self.vals_total()); + + assert_eq!(response.voting_pubkey, keypair.pk.into()); + assert_eq!(response.enabled, s.enabled); + + self + } + + pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self { + let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index]; + + self.client + .patch_lighthouse_validators(&validator.voting_pubkey, enabled) + .await + .unwrap(); + + assert_eq!( + self.initialized_validators + .read() + .is_enabled(&validator.voting_pubkey.decompress().unwrap()) + .unwrap(), + enabled + ); + + assert!(self + .client + .get_lighthouse_validators() + .await + .unwrap() + .data + .into_iter() + .find(|v| v.voting_pubkey == validator.voting_pubkey) + .map(|v| v.enabled == enabled) + .unwrap()); + + // Check the server via an individual request. + assert_eq!( + self.client + .get_lighthouse_validators_pubkey(&validator.voting_pubkey) + .await + .unwrap() + .unwrap() + .data + .enabled, + enabled + ); + + self + } +} + +struct HdValidatorScenario { + count: usize, + specify_mnemonic: bool, + key_derivation_path_offset: u32, + disabled: Vec<usize>, +} + +struct KeystoreValidatorScenario { + enabled: bool, + correct_password: bool, +} + +#[tokio::test(core_threads = 2)] +async fn invalid_pubkey() { + ApiTester::new() + .await + .invalidate_api_token() + .test_get_lighthouse_version_invalid() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn simple_getters() { + ApiTester::new() + .await + .test_get_lighthouse_version() + .await + .test_get_lighthouse_health() + .await + .test_get_lighthouse_spec() + .await; +} + +#[tokio::test(core_threads = 2)] +async fn hd_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: true, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .create_hd_validators(HdValidatorScenario { + count: 1, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![0], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3) + .create_hd_validators(HdValidatorScenario { + count: 0, + specify_mnemonic: true, + key_derivation_path_offset: 4, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(3); +} + +#[tokio::test(core_threads = 2)] +async fn validator_enabling() { + ApiTester::new() + .await + .create_hd_validators(HdValidatorScenario { + count: 2, + specify_mnemonic: false, + key_derivation_path_offset: 0, + disabled: vec![], + }) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2) + .set_validator_enabled(0, false) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2) + .set_validator_enabled(0, true) + .await + .assert_enabled_validators_count(2) + .assert_validators_count(2); +} + +#[tokio::test(core_threads = 2)] +async fn keystore_validator_creation() { + ApiTester::new() + .await + .assert_enabled_validators_count(0) + .assert_validators_count(0) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: false, + enabled: true, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(1) + .create_keystore_validators(KeystoreValidatorScenario { + correct_password: true, + enabled: false, + }) + .await + .assert_enabled_validators_count(1) + .assert_validators_count(2); +} diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 436dcb4ba..09fd2ae9d 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -11,15 +11,19 @@ use account_utils::{ validator_definitions::{ self, SigningDefinition, ValidatorDefinition, ValidatorDefinitions, CONFIG_FILENAME, }, + ZeroizeString, }; use eth2_keystore::Keystore; -use slog::{error, info, warn, Logger}; -use std::collections::HashMap; +use slog::{debug, error, info, warn, Logger}; +use std::collections::{HashMap, HashSet}; use std::fs::{self, File, OpenOptions}; use std::io; use std::path::PathBuf; use types::{Keypair, PublicKey}; +use crate::key_cache; +use crate::key_cache::KeyCache; + // Use TTY instead of stdin to capture passwords from users. const USE_STDIN: bool = false; @@ -37,9 +41,11 @@ pub enum Error { }, /// There was a filesystem error when opening the keystore. UnableToOpenVotingKeystore(io::Error), + UnableToOpenKeyCache(key_cache::Error), /// The keystore path is not as expected. It should be a file, not `..` or something obscure /// like that. BadVotingKeystorePath(PathBuf), + BadKeyCachePath(PathBuf), /// The keystore could not be parsed, it is likely bad JSON. UnableToParseVotingKeystore(eth2_keystore::Error), /// The keystore could not be decrypted. The password might be wrong. @@ -50,10 +56,14 @@ pub enum Error { UnableToSaveDefinitions(validator_definitions::Error), /// It is not legal to try and initialize a disabled validator definition. UnableToInitializeDisabledValidator, - /// It is not legal to try and initialize a disabled validator definition. - PasswordUnknown(PathBuf), /// There was an error reading from stdin. UnableToReadPasswordFromUser(String), + /// There was an error running a tokio async task. + TokioJoin(tokio::task::JoinError), + /// There was a filesystem error when deleting a lockfile. + UnableToDeleteLockfile(io::Error), + /// Cannot initialize the same validator twice. + DuplicatePublicKey, } /// A method used by a validator to sign messages. @@ -75,6 +85,59 @@ pub struct InitializedValidator { signing_method: SigningMethod, } +fn open_keystore(path: &PathBuf) -> Result<Keystore, Error> { + let keystore_file = File::open(path).map_err(Error::UnableToOpenVotingKeystore)?; + Keystore::from_json_reader(keystore_file).map_err(Error::UnableToParseVotingKeystore) +} + +fn get_lockfile_path(file_path: &PathBuf) -> Option<PathBuf> { + file_path + .file_name() + .and_then(|os_str| os_str.to_str()) + .map(|filename| { + file_path + .clone() + .with_file_name(format!("{}.lock", filename)) + }) +} + +fn create_lock_file( + file_path: &PathBuf, + delete_lockfiles: bool, + log: &Logger, +) -> Result<(), Error> { + if file_path.exists() { + if delete_lockfiles { + warn!( + log, + "Deleting validator lockfile"; + "file" => format!("{:?}", file_path) + ); + + fs::remove_file(file_path).map_err(Error::UnableToDeleteLockfile)?; + } else { + return Err(Error::LockfileExists(file_path.clone())); + } + } + // Create a new lockfile. + OpenOptions::new() + .write(true) + .create_new(true) + .open(file_path) + .map_err(Error::UnableToCreateLockfile)?; + Ok(()) +} + +fn remove_lock(lock_path: &PathBuf) { + if lock_path.exists() { + if let Err(e) = fs::remove_file(&lock_path) { + eprintln!("Failed to remove {:?}: {:?}", lock_path, e) + } + } else { + eprintln!("Lockfile missing: {:?}", lock_path) + } +} + impl InitializedValidator { /// Instantiate `self` from a `ValidatorDefinition`. /// @@ -84,10 +147,12 @@ impl InitializedValidator { /// ## Errors /// /// If the validator is unable to be initialized for whatever reason. - pub fn from_definition( + async fn from_definition( def: ValidatorDefinition, - strict_lockfiles: bool, + delete_lockfiles: bool, log: &Logger, + key_cache: &mut KeyCache, + key_stores: &mut HashMap<PathBuf, Keystore>, ) -> Result<Self, Error> { if !def.enabled { return Err(Error::UnableToInitializeDisabledValidator); @@ -101,30 +166,55 @@ impl InitializedValidator { voting_keystore_password_path, voting_keystore_password, } => { - let keystore_file = - File::open(&voting_keystore_path).map_err(Error::UnableToOpenVotingKeystore)?; - let voting_keystore = Keystore::from_json_reader(keystore_file) - .map_err(Error::UnableToParseVotingKeystore)?; + use std::collections::hash_map::Entry::*; + let voting_keystore = match key_stores.entry(voting_keystore_path.clone()) { + Vacant(entry) => entry.insert(open_keystore(&voting_keystore_path)?), + Occupied(entry) => entry.into_mut(), + }; - let voting_keypair = match (voting_keystore_password_path, voting_keystore_password) - { - // If the password is supplied, use it and ignore the path (if supplied). - (_, Some(password)) => voting_keystore - .decrypt_keypair(password.as_ref()) - .map_err(Error::UnableToDecryptKeystore)?, - // If only the path is supplied, use the path. - (Some(path), None) => { - let password = read_password(path) - .map_err(Error::UnableToReadVotingKeystorePassword)?; - - voting_keystore - .decrypt_keypair(password.as_bytes()) - .map_err(Error::UnableToDecryptKeystore)? - } - // If there is no password available, maybe prompt for a password. - (None, None) => { - unlock_keystore_via_stdin_password(&voting_keystore, &voting_keystore_path)? - } + let voting_keypair = if let Some(keypair) = key_cache.get(voting_keystore.uuid()) { + keypair + } else { + let keystore = voting_keystore.clone(); + let keystore_path = voting_keystore_path.clone(); + // Decoding a local keystore can take several seconds, therefore it's best + // to keep if off the core executor. This also has the fortunate effect of + // interrupting the potentially long-running task during shut down. + let (password, keypair) = tokio::task::spawn_blocking(move || { + Ok( + match (voting_keystore_password_path, voting_keystore_password) { + // If the password is supplied, use it and ignore the path + // (if supplied). + (_, Some(password)) => ( + password.as_ref().to_vec().into(), + keystore + .decrypt_keypair(password.as_ref()) + .map_err(Error::UnableToDecryptKeystore)?, + ), + // If only the path is supplied, use the path. + (Some(path), None) => { + let password = read_password(path) + .map_err(Error::UnableToReadVotingKeystorePassword)?; + let keypair = keystore + .decrypt_keypair(password.as_bytes()) + .map_err(Error::UnableToDecryptKeystore)?; + (password, keypair) + } + // If there is no password available, maybe prompt for a password. + (None, None) => { + let (password, keypair) = unlock_keystore_via_stdin_password( + &keystore, + &keystore_path, + )?; + (password.as_ref().to_vec().into(), keypair) + } + }, + ) + }) + .await + .map_err(Error::TokioJoin)??; + key_cache.add(keypair.clone(), voting_keystore.uuid(), password); + keypair }; if voting_keypair.pk != def.voting_public_key { @@ -135,46 +225,16 @@ impl InitializedValidator { } // Append a `.lock` suffix to the voting keystore. - let voting_keystore_lockfile_path = voting_keystore_path - .file_name() - .ok_or_else(|| Error::BadVotingKeystorePath(voting_keystore_path.clone())) - .and_then(|os_str| { - os_str.to_str().ok_or_else(|| { - Error::BadVotingKeystorePath(voting_keystore_path.clone()) - }) - }) - .map(|filename| { - voting_keystore_path - .clone() - .with_file_name(format!("{}.lock", filename)) - })?; + let voting_keystore_lockfile_path = get_lockfile_path(&voting_keystore_path) + .ok_or_else(|| Error::BadVotingKeystorePath(voting_keystore_path.clone()))?; - if voting_keystore_lockfile_path.exists() { - if strict_lockfiles { - return Err(Error::LockfileExists(voting_keystore_lockfile_path)); - } else { - // If **not** respecting lockfiles, just raise a warning if the voting - // keypair cannot be unlocked. - warn!( - log, - "Ignoring validator lockfile"; - "file" => format!("{:?}", voting_keystore_lockfile_path) - ); - } - } else { - // Create a new lockfile. - OpenOptions::new() - .write(true) - .create_new(true) - .open(&voting_keystore_lockfile_path) - .map_err(Error::UnableToCreateLockfile)?; - } + create_lock_file(&voting_keystore_lockfile_path, delete_lockfiles, &log)?; Ok(Self { signing_method: SigningMethod::LocalKeystore { voting_keystore_path, voting_keystore_lockfile_path, - voting_keystore, + voting_keystore: voting_keystore.clone(), voting_keypair, }, }) @@ -205,16 +265,7 @@ impl Drop for InitializedValidator { voting_keystore_lockfile_path, .. } => { - if voting_keystore_lockfile_path.exists() { - if let Err(e) = fs::remove_file(&voting_keystore_lockfile_path) { - eprintln!( - "Failed to remove {:?}: {:?}", - voting_keystore_lockfile_path, e - ) - } - } else { - eprintln!("Lockfile missing: {:?}", voting_keystore_lockfile_path) - } + remove_lock(voting_keystore_lockfile_path); } } } @@ -224,7 +275,7 @@ impl Drop for InitializedValidator { fn unlock_keystore_via_stdin_password( keystore: &Keystore, keystore_path: &PathBuf, -) -> Result<Keypair, Error> { +) -> Result<(ZeroizeString, Keypair), Error> { eprintln!(""); eprintln!( "The {} file does not contain either of the following fields for {:?}:", @@ -250,7 +301,7 @@ fn unlock_keystore_via_stdin_password( eprintln!(""); match keystore.decrypt_keypair(password.as_ref()) { - Ok(keystore) => break Ok(keystore), + Ok(keystore) => break Ok((password, keystore)), Err(eth2_keystore::Error::InvalidPassword) => { eprintln!("Invalid password, try again (or press Ctrl+c to exit):"); } @@ -264,9 +315,8 @@ fn unlock_keystore_via_stdin_password( /// /// Forms the fundamental list of validators that are managed by this validator client instance. pub struct InitializedValidators { - /// If `true`, no validator will be opened if a lockfile exists. If `false`, a warning will be - /// raised for an existing lockfile, but it will ultimately be ignored. - strict_lockfiles: bool, + /// If `true`, delete any validator keystore lockfiles that would prevent starting. + delete_lockfiles: bool, /// A list of validator definitions which can be stored on-disk. definitions: ValidatorDefinitions, /// The directory that the `self.definitions` will be saved into. @@ -279,20 +329,20 @@ pub struct InitializedValidators { impl InitializedValidators { /// Instantiates `Self`, initializing all validators in `definitions`. - pub fn from_definitions( + pub async fn from_definitions( definitions: ValidatorDefinitions, validators_dir: PathBuf, - strict_lockfiles: bool, + delete_lockfiles: bool, log: Logger, ) -> Result<Self, Error> { let mut this = Self { - strict_lockfiles, + delete_lockfiles, validators_dir, definitions, validators: HashMap::default(), log, }; - this.update_validators()?; + this.update_validators().await?; Ok(this) } @@ -319,6 +369,42 @@ impl InitializedValidators { .map(|v| v.voting_keypair()) } + /// Add a validator definition to `self`, overwriting the on-disk representation of `self`. + pub async fn add_definition(&mut self, def: ValidatorDefinition) -> Result<(), Error> { + if self + .definitions + .as_slice() + .iter() + .any(|existing| existing.voting_public_key == def.voting_public_key) + { + return Err(Error::DuplicatePublicKey); + } + + self.definitions.push(def); + + self.update_validators().await?; + + self.definitions + .save(&self.validators_dir) + .map_err(Error::UnableToSaveDefinitions)?; + + Ok(()) + } + + /// Returns a slice of all defined validators (regardless of their enabled state). + pub fn validator_definitions(&self) -> &[ValidatorDefinition] { + self.definitions.as_slice() + } + + /// Indicates if the `voting_public_key` exists in self and if it is enabled. + pub fn is_enabled(&self, voting_public_key: &PublicKey) -> Option<bool> { + self.definitions + .as_slice() + .iter() + .find(|def| def.voting_public_key == *voting_public_key) + .map(|def| def.enabled) + } + /// Sets the `InitializedValidator` and `ValidatorDefinition` `enabled` values. /// /// ## Notes @@ -328,7 +414,8 @@ impl InitializedValidators { /// validator will be removed from `self.validators`. /// /// Saves the `ValidatorDefinitions` to file, even if no definitions were changed. - pub fn set_validator_status( + #[allow(dead_code)] // Will be used once VC API is enabled. + pub async fn set_validator_status( &mut self, voting_public_key: &PublicKey, enabled: bool, @@ -342,7 +429,7 @@ impl InitializedValidators { def.enabled = enabled; } - self.update_validators()?; + self.update_validators().await?; self.definitions .save(&self.validators_dir) @@ -351,6 +438,84 @@ impl InitializedValidators { Ok(()) } + /// Tries to decrypt the key cache. + /// + /// Returns `Ok(true)` if decryption was successful, `Ok(false)` if it couldn't get decrypted + /// and an error if a needed password couldn't get extracted. + /// + async fn decrypt_key_cache( + &self, + mut cache: KeyCache, + key_stores: &mut HashMap<PathBuf, Keystore>, + ) -> Result<KeyCache, Error> { + //read relevant key_stores + let mut definitions_map = HashMap::new(); + for def in self.definitions.as_slice() { + match &def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_path, + .. + } => { + use std::collections::hash_map::Entry::*; + let key_store = match key_stores.entry(voting_keystore_path.clone()) { + Vacant(entry) => entry.insert(open_keystore(voting_keystore_path)?), + Occupied(entry) => entry.into_mut(), + }; + definitions_map.insert(*key_store.uuid(), def); + } + } + } + + //check if all paths are in the definitions_map + for uuid in cache.uuids() { + if !definitions_map.contains_key(uuid) { + warn!( + self.log, + "Unknown uuid in cache"; + "uuid" => format!("{}", uuid) + ); + return Ok(KeyCache::new()); + } + } + + //collect passwords + let mut passwords = Vec::new(); + let mut public_keys = Vec::new(); + for uuid in cache.uuids() { + let def = definitions_map.get(uuid).expect("Existence checked before"); + let pw = match &def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_password_path, + voting_keystore_password, + voting_keystore_path, + } => { + if let Some(p) = voting_keystore_password { + p.as_ref().to_vec().into() + } else if let Some(path) = voting_keystore_password_path { + read_password(path).map_err(Error::UnableToReadVotingKeystorePassword)? + } else { + let keystore = open_keystore(voting_keystore_path)?; + unlock_keystore_via_stdin_password(&keystore, &voting_keystore_path)? + .0 + .as_ref() + .to_vec() + .into() + } + } + }; + passwords.push(pw); + public_keys.push(def.voting_public_key.clone()); + } + + //decrypt + tokio::task::spawn_blocking(move || match cache.decrypt(passwords, public_keys) { + Ok(_) | Err(key_cache::Error::AlreadyDecrypted) => cache, + _ => KeyCache::new(), + }) + .await + .map_err(Error::TokioJoin) + } + /// Scans `self.definitions` and attempts to initialize and validators which are not already /// initialized. /// @@ -362,20 +527,49 @@ impl InitializedValidators { /// A validator is considered "already known" and skipped if the public key is already known. /// I.e., if there are two different definitions with the same public key then the second will /// be ignored. - fn update_validators(&mut self) -> Result<(), Error> { + async fn update_validators(&mut self) -> Result<(), Error> { + //use key cache if available + let mut key_stores = HashMap::new(); + + // Create a lock file for the cache + let key_cache_path = KeyCache::cache_file_path(&self.validators_dir); + let cache_lockfile_path = get_lockfile_path(&key_cache_path) + .ok_or_else(|| Error::BadKeyCachePath(key_cache_path))?; + create_lock_file(&cache_lockfile_path, self.delete_lockfiles, &self.log)?; + + let mut key_cache = self + .decrypt_key_cache( + KeyCache::open_or_create(&self.validators_dir) + .map_err(Error::UnableToOpenKeyCache)?, + &mut key_stores, + ) + .await?; + + let mut disabled_uuids = HashSet::new(); for def in self.definitions.as_slice() { if def.enabled { match &def.signing_definition { - SigningDefinition::LocalKeystore { .. } => { + SigningDefinition::LocalKeystore { + voting_keystore_path, + .. + } => { if self.validators.contains_key(&def.voting_public_key) { continue; } + if let Some(key_store) = key_stores.get(voting_keystore_path) { + disabled_uuids.remove(key_store.uuid()); + } + match InitializedValidator::from_definition( def.clone(), - self.strict_lockfiles, + self.delete_lockfiles, &self.log, - ) { + &mut key_cache, + &mut key_stores, + ) + .await + { Ok(init) => { self.validators .insert(init.voting_public_key().clone(), init); @@ -401,6 +595,17 @@ impl InitializedValidators { } } else { self.validators.remove(&def.voting_public_key); + match &def.signing_definition { + SigningDefinition::LocalKeystore { + voting_keystore_path, + .. + } => { + if let Some(key_store) = key_stores.get(voting_keystore_path) { + disabled_uuids.insert(*key_store.uuid()); + } + } + } + info!( self.log, "Disabled validator"; @@ -408,6 +613,31 @@ impl InitializedValidators { ); } } + for uuid in disabled_uuids { + key_cache.remove(&uuid); + } + + let validators_dir = self.validators_dir.clone(); + let log = self.log.clone(); + if key_cache.is_modified() { + tokio::task::spawn_blocking(move || { + match key_cache.save(validators_dir) { + Err(e) => warn!( + log, + "Error during saving of key_cache"; + "err" => format!("{:?}", e) + ), + Ok(true) => info!(log, "Modified key_cache saved successfully"), + _ => {} + }; + remove_lock(&cache_lockfile_path); + }) + .await + .map_err(Error::TokioJoin)?; + } else { + debug!(log, "Key cache not modified"); + remove_lock(&cache_lockfile_path); + } Ok(()) } } diff --git a/validator_client/src/is_synced.rs b/validator_client/src/is_synced.rs index e1017ac77..f967d629c 100644 --- a/validator_client/src/is_synced.rs +++ b/validator_client/src/is_synced.rs @@ -1,8 +1,6 @@ -use remote_beacon_node::RemoteBeaconNode; -use rest_types::SyncingResponse; -use slog::{debug, error, Logger}; +use eth2::BeaconNodeHttpClient; +use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; -use types::EthSpec; /// A distance in slots. const SYNC_TOLERANCE: u64 = 4; @@ -17,19 +15,19 @@ const SYNC_TOLERANCE: u64 = 4; /// /// The second condition means the even if the beacon node thinks that it's syncing, we'll still /// try to use it if it's close enough to the head. -pub async fn is_synced<T: SlotClock, E: EthSpec>( - beacon_node: &RemoteBeaconNode<E>, +pub async fn is_synced<T: SlotClock>( + beacon_node: &BeaconNodeHttpClient, slot_clock: &T, log_opt: Option<&Logger>, ) -> bool { - let resp = match beacon_node.http.node().syncing_status().await { + let resp = match beacon_node.get_node_syncing().await { Ok(resp) => resp, Err(e) => { if let Some(log) = log_opt { error!( log, "Unable connect to beacon node"; - "error" => format!("{:?}", e) + "error" => e.to_string() ) } @@ -37,44 +35,38 @@ pub async fn is_synced<T: SlotClock, E: EthSpec>( } }; - match &resp { - SyncingResponse { - is_syncing: false, .. - } => true, - SyncingResponse { - is_syncing: true, - sync_status, - } => { - if let Some(log) = log_opt { - debug!( + let is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); + + if let Some(log) = log_opt { + if !is_synced { + debug!( + log, + "Beacon node sync status"; + "status" => format!("{:?}", resp), + ); + + warn!( + log, + "Beacon node is syncing"; + "msg" => "not receiving new duties", + "sync_distance" => resp.data.sync_distance.as_u64(), + "head_slot" => resp.data.head_slot.as_u64(), + ); + } + + if let Some(local_slot) = slot_clock.now() { + let remote_slot = resp.data.head_slot + resp.data.sync_distance; + if remote_slot + 1 < local_slot || local_slot + 1 < remote_slot { + error!( log, - "Beacon node sync status"; - "status" => format!("{:?}", resp), + "Time discrepancy with beacon node"; + "msg" => "check the system time on this host and the beacon node", + "beacon_node_slot" => remote_slot, + "local_slot" => local_slot, ); } - - let now = if let Some(slot) = slot_clock.now() { - slot - } else { - // There's no good reason why we shouldn't be able to read the slot clock, so we'll - // indicate we're not synced if that's the case. - return false; - }; - - if sync_status.current_slot + SYNC_TOLERANCE >= now { - true - } else { - if let Some(log) = log_opt { - error!( - log, - "Beacon node is syncing"; - "msg" => "not receiving new duties", - "target_slot" => sync_status.highest_slot.as_u64(), - "current_slot" => sync_status.current_slot.as_u64(), - ); - } - false - } } } + + is_synced } diff --git a/validator_client/src/key_cache.rs b/validator_client/src/key_cache.rs new file mode 100644 index 000000000..6da06aaa1 --- /dev/null +++ b/validator_client/src/key_cache.rs @@ -0,0 +1,347 @@ +use account_utils::create_with_600_perms; +use bls::{Keypair, PublicKey}; +use eth2_keystore::json_keystore::{ + Aes128Ctr, ChecksumModule, Cipher, CipherModule, Crypto, EmptyMap, EmptyString, KdfModule, + Sha256Checksum, +}; +use eth2_keystore::{ + decrypt, default_kdf, encrypt, keypair_from_secret, Error as KeystoreError, PlainText, Uuid, + ZeroizeHash, IV_SIZE, SALT_SIZE, +}; +use rand::prelude::*; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs::OpenOptions; +use std::path::{Path, PathBuf}; +use std::{fs, io}; + +/// The file name for the serialized `KeyCache` struct. +pub const CACHE_FILENAME: &str = "validator_key_cache.json"; + +#[derive(Debug, Copy, Clone, PartialEq)] +pub enum State { + NotDecrypted, + DecryptedAndSaved, + DecryptedWithUnsavedUpdates, +} + +fn not_decrypted() -> State { + State::NotDecrypted +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct KeyCache { + crypto: Crypto, + uuids: Vec<Uuid>, + #[serde(skip)] + pairs: HashMap<Uuid, Keypair>, //maps public keystore uuids to their corresponding Keypair + #[serde(skip)] + passwords: Vec<PlainText>, + #[serde(skip)] + #[serde(default = "not_decrypted")] + state: State, +} + +type SerializedKeyMap = HashMap<Uuid, ZeroizeHash>; + +impl KeyCache { + pub fn new() -> Self { + KeyCache { + uuids: Vec::new(), + crypto: Self::init_crypto(), + pairs: HashMap::new(), + passwords: Vec::new(), + state: State::DecryptedWithUnsavedUpdates, + } + } + + pub fn init_crypto() -> Crypto { + let salt = rand::thread_rng().gen::<[u8; SALT_SIZE]>(); + let iv = rand::thread_rng().gen::<[u8; IV_SIZE]>().to_vec().into(); + + let kdf = default_kdf(salt.to_vec()); + let cipher = Cipher::Aes128Ctr(Aes128Ctr { iv }); + + Crypto { + kdf: KdfModule { + function: kdf.function(), + params: kdf, + message: EmptyString, + }, + checksum: ChecksumModule { + function: Sha256Checksum::function(), + params: EmptyMap, + message: Vec::new().into(), + }, + cipher: CipherModule { + function: cipher.function(), + params: cipher, + message: Vec::new().into(), + }, + } + } + + pub fn cache_file_path<P: AsRef<Path>>(validators_dir: P) -> PathBuf { + validators_dir.as_ref().join(CACHE_FILENAME) + } + + /// Open an existing file or create a new, empty one if it does not exist. + pub fn open_or_create<P: AsRef<Path>>(validators_dir: P) -> Result<Self, Error> { + let cache_path = Self::cache_file_path(validators_dir.as_ref()); + if !cache_path.exists() { + Ok(Self::new()) + } else { + Self::open(validators_dir) + } + } + + /// Open an existing file, returning an error if the file does not exist. + pub fn open<P: AsRef<Path>>(validators_dir: P) -> Result<Self, Error> { + let cache_path = validators_dir.as_ref().join(CACHE_FILENAME); + let file = OpenOptions::new() + .read(true) + .create_new(false) + .open(&cache_path) + .map_err(Error::UnableToOpenFile)?; + serde_json::from_reader(file).map_err(Error::UnableToParseFile) + } + + fn encrypt(&mut self) -> Result<(), Error> { + self.crypto = Self::init_crypto(); + let secret_map: SerializedKeyMap = self + .pairs + .iter() + .map(|(k, v)| (*k, v.sk.serialize())) + .collect(); + + let raw = PlainText::from( + bincode::serialize(&secret_map).map_err(Error::UnableToSerializeKeyMap)?, + ); + let (cipher_text, checksum) = encrypt( + raw.as_ref(), + Self::password(&self.passwords).as_ref(), + &self.crypto.kdf.params, + &self.crypto.cipher.params, + ) + .map_err(Error::UnableToEncrypt)?; + + self.crypto.cipher.message = cipher_text.into(); + self.crypto.checksum.message = checksum.to_vec().into(); + Ok(()) + } + + /// Stores `Self` encrypted in json format. + /// + /// Will create a new file if it does not exist or over-write any existing file. + /// Returns false iff there are no unsaved changes + pub fn save<P: AsRef<Path>>(&mut self, validators_dir: P) -> Result<bool, Error> { + if self.is_modified() { + self.encrypt()?; + + let cache_path = validators_dir.as_ref().join(CACHE_FILENAME); + let bytes = serde_json::to_vec(self).map_err(Error::UnableToEncodeFile)?; + + let res = if cache_path.exists() { + fs::write(cache_path, &bytes).map_err(Error::UnableToWriteFile) + } else { + create_with_600_perms(&cache_path, &bytes).map_err(Error::UnableToWriteFile) + }; + if res.is_ok() { + self.state = State::DecryptedAndSaved; + } + res.map(|_| true) + } else { + Ok(false) + } + } + + pub fn is_modified(&self) -> bool { + self.state == State::DecryptedWithUnsavedUpdates + } + + pub fn uuids(&self) -> &Vec<Uuid> { + &self.uuids + } + + fn password(passwords: &[PlainText]) -> PlainText { + PlainText::from(passwords.iter().fold(Vec::new(), |mut v, p| { + v.extend(p.as_ref()); + v + })) + } + + pub fn decrypt( + &mut self, + passwords: Vec<PlainText>, + public_keys: Vec<PublicKey>, + ) -> Result<&HashMap<Uuid, Keypair>, Error> { + match self.state { + State::NotDecrypted => { + let password = Self::password(&passwords); + let text = + decrypt(password.as_ref(), &self.crypto).map_err(Error::UnableToDecrypt)?; + let key_map: SerializedKeyMap = + bincode::deserialize(text.as_bytes()).map_err(Error::UnableToParseKeyMap)?; + self.passwords = passwords; + self.pairs = HashMap::new(); + if public_keys.len() != self.uuids.len() { + return Err(Error::PublicKeyMismatch); + } + for (uuid, public_key) in self.uuids.iter().zip(public_keys.iter()) { + if let Some(secret) = key_map.get(uuid) { + let key_pair = keypair_from_secret(secret.as_ref()) + .map_err(Error::UnableToParseKeyPair)?; + if &key_pair.pk != public_key { + return Err(Error::PublicKeyMismatch); + } + self.pairs.insert(*uuid, key_pair); + } else { + return Err(Error::MissingUuidKey); + } + } + self.state = State::DecryptedAndSaved; + Ok(&self.pairs) + } + _ => Err(Error::AlreadyDecrypted), + } + } + + pub fn remove(&mut self, uuid: &Uuid) { + //do nothing in not decrypted state + if let State::NotDecrypted = self.state { + return; + } + self.pairs.remove(uuid); + if let Some(pos) = self.uuids.iter().position(|uuid2| uuid2 == uuid) { + self.uuids.remove(pos); + self.passwords.remove(pos); + } + self.state = State::DecryptedWithUnsavedUpdates; + } + + pub fn add(&mut self, keypair: Keypair, uuid: &Uuid, password: PlainText) { + //do nothing in not decrypted state + if let State::NotDecrypted = self.state { + return; + } + self.pairs.insert(*uuid, keypair); + self.uuids.push(*uuid); + self.passwords.push(password); + self.state = State::DecryptedWithUnsavedUpdates; + } + + pub fn get(&self, uuid: &Uuid) -> Option<Keypair> { + self.pairs.get(uuid).cloned() + } +} + +#[derive(Debug)] +pub enum Error { + /// The cache file could not be opened. + UnableToOpenFile(io::Error), + /// The cache file could not be parsed as JSON. + UnableToParseFile(serde_json::Error), + /// The cache file could not be serialized as YAML. + UnableToEncodeFile(serde_json::Error), + /// The cache file could not be written to the filesystem. + UnableToWriteFile(io::Error), + /// Couldn't decrypt the cache file + UnableToDecrypt(KeystoreError), + UnableToEncrypt(KeystoreError), + /// Couldn't decode the decrypted hashmap + UnableToParseKeyMap(bincode::Error), + UnableToParseKeyPair(KeystoreError), + UnableToSerializeKeyMap(bincode::Error), + PublicKeyMismatch, + MissingUuidKey, + /// Cache file is already decrypted + AlreadyDecrypted, +} + +#[cfg(test)] +mod tests { + use super::*; + use eth2_keystore::json_keystore::{HexBytes, Kdf}; + + #[derive(Debug, Clone, Serialize, Deserialize)] + pub struct KeyCacheTest { + pub params: Kdf, + //pub checksum: ChecksumModule, + //pub cipher: CipherModule, + uuids: Vec<Uuid>, + } + + #[tokio::test] + async fn test_serialization() { + let mut key_cache = KeyCache::new(); + let key_pair = Keypair::random(); + let uuid = Uuid::from_u128(1); + let password = PlainText::from(vec![1, 2, 3, 4, 5, 6]); + key_cache.add(key_pair, &uuid, password); + + key_cache.crypto.cipher.message = HexBytes::from(vec![7, 8, 9]); + key_cache.crypto.checksum.message = HexBytes::from(vec![10, 11, 12]); + + let binary = serde_json::to_vec(&key_cache).unwrap(); + let clone: KeyCache = serde_json::from_slice(binary.as_ref()).unwrap(); + + assert_eq!(clone.crypto, key_cache.crypto); + assert_eq!(clone.uuids, key_cache.uuids); + } + + #[tokio::test] + async fn test_encryption() { + let mut key_cache = KeyCache::new(); + let keypairs = vec![Keypair::random(), Keypair::random()]; + let uuids = vec![Uuid::from_u128(1), Uuid::from_u128(2)]; + let passwords = vec![ + PlainText::from(vec![1, 2, 3, 4, 5, 6]), + PlainText::from(vec![7, 8, 9, 10, 11, 12]), + ]; + + for ((keypair, uuid), password) in keypairs.iter().zip(uuids.iter()).zip(passwords.iter()) { + key_cache.add(keypair.clone(), uuid, password.clone()); + } + + key_cache.encrypt().unwrap(); + key_cache.state = State::DecryptedAndSaved; + + assert_eq!(&key_cache.uuids, &uuids); + + let mut new_clone = KeyCache { + crypto: key_cache.crypto.clone(), + uuids: key_cache.uuids.clone(), + pairs: Default::default(), + passwords: vec![], + state: State::NotDecrypted, + }; + + new_clone + .decrypt(passwords, keypairs.iter().map(|p| p.pk.clone()).collect()) + .unwrap(); + + let passwords_to_plain = |cache: &KeyCache| -> Vec<Vec<u8>> { + cache + .passwords + .iter() + .map(|x| x.as_bytes().to_vec()) + .collect() + }; + + assert_eq!(key_cache.crypto, new_clone.crypto); + assert_eq!( + passwords_to_plain(&key_cache), + passwords_to_plain(&new_clone) + ); + assert_eq!(key_cache.uuids, new_clone.uuids); + assert_eq!(key_cache.state, new_clone.state); + assert_eq!(key_cache.pairs.len(), new_clone.pairs.len()); + for (key, value) in key_cache.pairs { + assert!(new_clone.pairs.contains_key(&key)); + assert_eq!( + format!("{:?}", value), + format!("{:?}", new_clone.pairs[&key]) + ); + } + } +} diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 220d82a66..5591a6cbf 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -6,9 +6,13 @@ mod duties_service; mod fork_service; mod initialized_validators; mod is_synced; +mod key_cache; mod notifier; +mod validator_duty; mod validator_store; +pub mod http_api; + pub use cli::cli_app; pub use config::Config; @@ -18,17 +22,22 @@ use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; use duties_service::{DutiesService, DutiesServiceBuilder}; use environment::RuntimeContext; +use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Url}; use fork_service::{ForkService, ForkServiceBuilder}; use futures::channel::mpsc; +use http_api::ApiSecret; use initialized_validators::InitializedValidators; use notifier::spawn_notifier; -use remote_beacon_node::RemoteBeaconNode; +use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use slog::{error, info, Logger}; use slot_clock::SlotClock; use slot_clock::SystemTimeSlotClock; +use std::marker::PhantomData; +use std::net::SocketAddr; +use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{delay_for, Duration}; -use types::EthSpec; +use types::{EthSpec, Hash256, YamlConfig}; use validator_store::ValidatorStore; /// The interval between attempts to contact the beacon node during startup. @@ -40,9 +49,11 @@ const HTTP_TIMEOUT: Duration = Duration::from_secs(12); pub struct ProductionValidatorClient<T: EthSpec> { context: RuntimeContext<T>, duties_service: DutiesService<SystemTimeSlotClock, T>, - fork_service: ForkService<SystemTimeSlotClock, T>, + fork_service: ForkService<SystemTimeSlotClock>, block_service: BlockService<SystemTimeSlotClock, T>, attestation_service: AttestationService<SystemTimeSlotClock, T>, + validator_store: ValidatorStore<SystemTimeSlotClock, T>, + http_api_listen_addr: Option<SocketAddr>, config: Config, } @@ -53,32 +64,32 @@ impl<T: EthSpec> ProductionValidatorClient<T> { context: RuntimeContext<T>, cli_args: &ArgMatches<'_>, ) -> Result<Self, String> { - let config = Config::from_cli(&cli_args) + let config = Config::from_cli(&cli_args, context.log()) .map_err(|e| format!("Unable to initialize config: {}", e))?; Self::new(context, config).await } /// Instantiates the validator client, _without_ starting the timers to trigger block /// and attestation production. - pub async fn new(mut context: RuntimeContext<T>, config: Config) -> Result<Self, String> { + pub async fn new(context: RuntimeContext<T>, config: Config) -> Result<Self, String> { let log = context.log().clone(); info!( log, "Starting validator client"; - "beacon_node" => &config.http_server, - "datadir" => format!("{:?}", config.data_dir), + "beacon_node" => &config.beacon_node, + "validator_dir" => format!("{:?}", config.validator_dir), ); - let mut validator_defs = ValidatorDefinitions::open_or_create(&config.data_dir) + let mut validator_defs = ValidatorDefinitions::open_or_create(&config.validator_dir) .map_err(|e| format!("Unable to open or create validator definitions: {:?}", e))?; if !config.disable_auto_discover { let new_validators = validator_defs - .discover_local_keystores(&config.data_dir, &config.secrets_dir, &log) + .discover_local_keystores(&config.validator_dir, &config.secrets_dir, &log) .map_err(|e| format!("Unable to discover local validator keystores: {:?}", e))?; validator_defs - .save(&config.data_dir) + .save(&config.validator_dir) .map_err(|e| format!("Unable to update validator definitions: {:?}", e))?; info!( log, @@ -89,12 +100,51 @@ impl<T: EthSpec> ProductionValidatorClient<T> { let validators = InitializedValidators::from_definitions( validator_defs, - config.data_dir.clone(), - config.strict_lockfiles, + config.validator_dir.clone(), + config.delete_lockfiles, log.clone(), ) + .await .map_err(|e| format!("Unable to initialize validators: {:?}", e))?; + // Initialize slashing protection. + let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME); + let slashing_protection = if config.init_slashing_protection { + SlashingDatabase::open_or_create(&slashing_db_path).map_err(|e| { + format!( + "Failed to open or create slashing protection database: {:?}", + e + ) + }) + } else { + SlashingDatabase::open(&slashing_db_path).map_err(|e| { + format!( + "Failed to open slashing protection database: {:?}.\n\ + Ensure that `slashing_protection.sqlite` is in {:?} folder", + e, config.validator_dir + ) + }) + }?; + + // Check validator registration with slashing protection, or auto-register all validators. + if config.init_slashing_protection { + slashing_protection + .register_validators(validators.iter_voting_pubkeys()) + .map_err(|e| format!("Error while registering slashing protection: {:?}", e))?; + } else { + slashing_protection + .check_validator_registrations(validators.iter_voting_pubkeys()) + .map_err(|e| { + format!( + "One or more validators not found in slashing protection database.\n\ + Ensure you haven't misplaced your slashing protection database, or \ + carefully consider running with --init-slashing-protection (see --help). \ + Error: {:?}", + e + ) + })?; + } + info!( log, "Initialized validators"; @@ -102,77 +152,35 @@ impl<T: EthSpec> ProductionValidatorClient<T> { "enabled" => validators.num_enabled(), ); + let beacon_node_url: Url = config + .beacon_node + .parse() + .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; + let beacon_node_http_client = ClientBuilder::new() + .timeout(HTTP_TIMEOUT) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; let beacon_node = - RemoteBeaconNode::new_with_timeout(config.http_server.clone(), HTTP_TIMEOUT) - .map_err(|e| format!("Unable to init beacon node http client: {}", e))?; + BeaconNodeHttpClient::from_components(beacon_node_url, beacon_node_http_client); - // TODO: check if all logs in wait_for_node are produed while awaiting - let beacon_node = wait_for_node(beacon_node, &log).await?; - let eth2_config = beacon_node - .http - .spec() - .get_eth2_config() - .await - .map_err(|e| format!("Unable to read eth2 config from beacon node: {:?}", e))?; - let genesis_time = beacon_node - .http - .beacon() - .get_genesis_time() - .await - .map_err(|e| format!("Unable to read genesis time from beacon node: {:?}", e))?; - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {:?}", e))?; - let genesis = Duration::from_secs(genesis_time); + // Perform some potentially long-running initialization tasks. + let (yaml_config, genesis_time, genesis_validators_root) = tokio::select! { + tuple = init_from_beacon_node(&beacon_node, &context) => tuple?, + () = context.executor.exit() => return Err("Shutting down".to_string()) + }; + let beacon_node_spec = yaml_config.apply_to_chain_spec::<T>(&T::default_spec()) + .ok_or_else(|| + "The minimal/mainnet spec type of the beacon node does not match the validator client. \ + See the --testnet command.".to_string() + )?; - // If the time now is less than (prior to) genesis, then delay until the - // genesis instant. - // - // If the validator client starts before genesis, it will get errors from - // the slot clock. - if now < genesis { - info!( - log, - "Starting node prior to genesis"; - "seconds_to_wait" => (genesis - now).as_secs() - ); - - delay_for(genesis - now).await - } else { - info!( - log, - "Genesis has already occurred"; - "seconds_ago" => (now - genesis).as_secs() + if context.eth2_config.spec != beacon_node_spec { + return Err( + "The beacon node is using a different Eth2 specification to this validator client. \ + See the --testnet command." + .to_string(), ); } - let genesis_validators_root = beacon_node - .http - .beacon() - .get_genesis_validators_root() - .await - .map_err(|e| { - format!( - "Unable to read genesis validators root from beacon node: {:?}", - e - ) - })?; - - // Do not permit a connection to a beacon node using different spec constants. - if context.eth2_config.spec_constants != eth2_config.spec_constants { - return Err(format!( - "Beacon node is using an incompatible spec. Got {}, expected {}", - eth2_config.spec_constants, context.eth2_config.spec_constants - )); - } - - // Note: here we just assume the spec variables of the remote node. This is very useful - // for testnets, but perhaps a security issue when it comes to mainnet. - // - // A damaging attack would be for a beacon node to convince the validator client of a - // different `SLOTS_PER_EPOCH` variable. This could result in slashable messages being - // produced. We are safe from this because `SLOTS_PER_EPOCH` is a type-level constant - // for Lighthouse. - context.eth2_config = eth2_config; let slot_clock = SystemTimeSlotClock::new( context.eth2_config.spec.genesis_slot, @@ -183,17 +191,17 @@ impl<T: EthSpec> ProductionValidatorClient<T> { let fork_service = ForkServiceBuilder::new() .slot_clock(slot_clock.clone()) .beacon_node(beacon_node.clone()) - .runtime_context(context.service_context("fork".into())) + .log(log.clone()) .build()?; let validator_store: ValidatorStore<SystemTimeSlotClock, T> = ValidatorStore::new( validators, - &config, + slashing_protection, genesis_validators_root, context.eth2_config.spec.clone(), fork_service.clone(), log.clone(), - )?; + ); info!( log, @@ -201,8 +209,6 @@ impl<T: EthSpec> ProductionValidatorClient<T> { "voting_validators" => validator_store.num_voting_validators() ); - validator_store.register_all_validators_for_slashing_protection()?; - let duties_service = DutiesServiceBuilder::new() .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) @@ -222,7 +228,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> { let attestation_service = AttestationServiceBuilder::new() .duties_service(duties_service.clone()) .slot_clock(slot_clock) - .validator_store(validator_store) + .validator_store(validator_store.clone()) .beacon_node(beacon_node) .runtime_context(context.service_context("attestation".into())) .build()?; @@ -233,7 +239,9 @@ impl<T: EthSpec> ProductionValidatorClient<T> { fork_service, block_service, attestation_service, + validator_store, config, + http_api_listen_addr: None, }) } @@ -243,15 +251,19 @@ impl<T: EthSpec> ProductionValidatorClient<T> { // whole epoch! let channel_capacity = T::slots_per_epoch() as usize; let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); + let log = self.context.log(); self.duties_service .clone() - .start_update_service(block_service_tx, &self.context.eth2_config.spec) + .start_update_service( + block_service_tx, + Arc::new(self.context.eth2_config.spec.clone()), + ) .map_err(|e| format!("Unable to start duties service: {}", e))?; self.fork_service .clone() - .start_update_service(&self.context.eth2_config.spec) + .start_update_service(&self.context) .map_err(|e| format!("Unable to start fork service: {}", e))?; self.block_service @@ -266,26 +278,119 @@ impl<T: EthSpec> ProductionValidatorClient<T> { spawn_notifier(self).map_err(|e| format!("Failed to start notifier: {}", e))?; + let api_secret = ApiSecret::create_or_open(&self.config.validator_dir)?; + + self.http_api_listen_addr = if self.config.http_api.enabled { + let ctx: Arc<http_api::Context<SystemTimeSlotClock, T>> = Arc::new(http_api::Context { + api_secret, + validator_store: Some(self.validator_store.clone()), + validator_dir: Some(self.config.validator_dir.clone()), + spec: self.context.eth2_config.spec.clone(), + config: self.config.http_api.clone(), + log: log.clone(), + _phantom: PhantomData, + }); + + let exit = self.context.executor.exit(); + + let (listen_addr, server) = http_api::serve(ctx, exit) + .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; + + self.context + .clone() + .executor + .spawn_without_exit(async move { server.await }, "http-api"); + + Some(listen_addr) + } else { + info!(log, "HTTP API server is disabled"); + None + }; + Ok(()) } } +async fn init_from_beacon_node<E: EthSpec>( + beacon_node: &BeaconNodeHttpClient, + context: &RuntimeContext<E>, +) -> Result<(YamlConfig, u64, Hash256), String> { + // Wait for the beacon node to come online. + wait_for_node(beacon_node, context.log()).await?; + + let yaml_config = beacon_node + .get_config_spec() + .await + .map_err(|e| format!("Unable to read spec from beacon node: {:?}", e))? + .data; + + let genesis = loop { + match beacon_node.get_beacon_genesis().await { + Ok(genesis) => break genesis.data, + Err(e) => { + // A 404 error on the genesis endpoint indicates that genesis has not yet occurred. + if e.status() == Some(StatusCode::NOT_FOUND) { + info!( + context.log(), + "Waiting for genesis"; + ); + } else { + error!( + context.log(), + "Error polling beacon node"; + "error" => format!("{:?}", e) + ); + } + } + } + + delay_for(RETRY_DELAY).await; + }; + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {:?}", e))?; + let genesis_time = Duration::from_secs(genesis.genesis_time); + + // If the time now is less than (prior to) genesis, then delay until the + // genesis instant. + // + // If the validator client starts before genesis, it will get errors from + // the slot clock. + if now < genesis_time { + info!( + context.log(), + "Starting node prior to genesis"; + "seconds_to_wait" => (genesis_time - now).as_secs() + ); + + delay_for(genesis_time - now).await; + } else { + info!( + context.log(), + "Genesis has already occurred"; + "seconds_ago" => (now - genesis_time).as_secs() + ); + } + + Ok(( + yaml_config, + genesis.genesis_time, + genesis.genesis_validators_root, + )) +} + /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. -async fn wait_for_node<E: EthSpec>( - beacon_node: RemoteBeaconNode<E>, - log: &Logger, -) -> Result<RemoteBeaconNode<E>, String> { +async fn wait_for_node(beacon_node: &BeaconNodeHttpClient, log: &Logger) -> Result<(), String> { // Try to get the version string from the node, looping until success is returned. loop { let log = log.clone(); let result = beacon_node - .clone() - .http - .node() - .get_version() + .get_node_version() .await - .map_err(|e| format!("{:?}", e)); + .map_err(|e| format!("{:?}", e)) + .map(|body| body.data.version); match result { Ok(version) => { @@ -295,7 +400,7 @@ async fn wait_for_node<E: EthSpec>( "version" => version, ); - return Ok(beacon_node); + return Ok(()); } Err(e) => { error!( diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index d9ee7faec..c997979b9 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -45,7 +45,12 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu let attesting_validators = duties_service.attester_count(epoch); if total_validators == 0 { - error!(log, "No validators present") + info!( + log, + "No validators present"; + "msg" => "see `lighthouse account validator create --help` \ + or the HTTP API documentation" + ) } else if total_validators == attesting_validators { info!( log, diff --git a/validator_client/src/validator_duty.rs b/validator_client/src/validator_duty.rs new file mode 100644 index 000000000..e5f56c385 --- /dev/null +++ b/validator_client/src/validator_duty.rs @@ -0,0 +1,131 @@ +use eth2::{ + types::{BeaconCommitteeSubscription, StateId, ValidatorId}, + BeaconNodeHttpClient, +}; +use serde::{Deserialize, Serialize}; +use types::{CommitteeIndex, Epoch, PublicKey, PublicKeyBytes, Slot}; + +/// This struct is being used as a shim since we deprecated the `rest_api` in favour of `http_api`. +/// +/// Tracking issue: https://github.com/sigp/lighthouse/issues/1643 +// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots` +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct ValidatorDuty { + /// The validator's BLS public key, uniquely identifying them. + pub validator_pubkey: PublicKey, + /// The validator's index in `state.validators` + pub validator_index: Option<u64>, + /// The slot at which the validator must attest. + pub attestation_slot: Option<Slot>, + /// The index of the committee within `slot` of which the validator is a member. + pub attestation_committee_index: Option<CommitteeIndex>, + /// The position of the validator in the committee. + pub attestation_committee_position: Option<usize>, + /// The committee count at `attestation_slot`. + pub committee_count_at_slot: Option<u64>, + /// The number of validators in the committee. + pub committee_length: Option<u64>, + /// The slots in which a validator must propose a block (can be empty). + /// + /// Should be set to `None` when duties are not yet known (before the current epoch). + pub block_proposal_slots: Option<Vec<Slot>>, +} + +impl ValidatorDuty { + /// Instantiate `Self` as if there are no known dutes for `validator_pubkey`. + fn no_duties(validator_pubkey: PublicKey) -> Self { + ValidatorDuty { + validator_pubkey, + validator_index: None, + attestation_slot: None, + attestation_committee_index: None, + attestation_committee_position: None, + committee_count_at_slot: None, + committee_length: None, + block_proposal_slots: None, + } + } + + /// Instantiate `Self` by performing requests on the `beacon_node`. + /// + /// Will only request proposer duties if `current_epoch == request_epoch`. + pub async fn download( + beacon_node: &BeaconNodeHttpClient, + current_epoch: Epoch, + request_epoch: Epoch, + pubkey: PublicKey, + ) -> Result<ValidatorDuty, String> { + let pubkey_bytes = PublicKeyBytes::from(&pubkey); + + let validator_index = if let Some(index) = beacon_node + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey_bytes.clone()), + ) + .await + .map_err(|e| format!("Failed to get validator index: {}", e))? + .map(|body| body.data.index) + { + index + } else { + return Ok(Self::no_duties(pubkey)); + }; + + if let Some(attester) = beacon_node + .get_validator_duties_attester(request_epoch, Some(&[validator_index])) + .await + .map_err(|e| format!("Failed to get attester duties: {}", e))? + .data + .first() + { + let block_proposal_slots = if current_epoch == request_epoch { + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + .map_err(|e| format!("Failed to get proposer indices: {}", e))? + .data + .into_iter() + .filter(|data| data.pubkey == pubkey_bytes) + .map(|data| data.slot) + .collect() + } else { + vec![] + }; + + Ok(ValidatorDuty { + validator_pubkey: pubkey, + validator_index: Some(attester.validator_index), + attestation_slot: Some(attester.slot), + attestation_committee_index: Some(attester.committee_index), + attestation_committee_position: Some(attester.validator_committee_index as usize), + committee_count_at_slot: Some(attester.committees_at_slot), + committee_length: Some(attester.committee_length), + block_proposal_slots: Some(block_proposal_slots), + }) + } else { + Ok(Self::no_duties(pubkey)) + } + } + + /// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`. + pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool { + self.validator_pubkey == other.validator_pubkey + && self.validator_index == other.validator_index + && self.attestation_slot == other.attestation_slot + && self.attestation_committee_index == other.attestation_committee_index + && self.attestation_committee_position == other.attestation_committee_position + && self.committee_count_at_slot == other.committee_count_at_slot + && self.committee_length == other.committee_length + } + + /// Generate a subscription for `self`, if `self` has appropriate attestation duties. + pub fn subscription(&self, is_aggregator: bool) -> Option<BeaconCommitteeSubscription> { + Some(BeaconCommitteeSubscription { + validator_index: self.validator_index?, + committee_index: self.attestation_committee_index?, + committees_at_slot: self.committee_count_at_slot?, + slot: self.attestation_slot?, + is_aggregator, + }) + } +} diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index f7d0442d3..66b75874a 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -1,13 +1,11 @@ -use crate::{ - config::{Config, SLASHING_PROTECTION_FILENAME}, - fork_service::ForkService, - initialized_validators::InitializedValidators, -}; +use crate::{fork_service::ForkService, initialized_validators::InitializedValidators}; +use account_utils::{validator_definitions::ValidatorDefinition, ZeroizeString}; use parking_lot::RwLock; use slashing_protection::{NotSafe, Safe, SlashingDatabase}; use slog::{crit, error, warn, Logger}; use slot_clock::SlotClock; use std::marker::PhantomData; +use std::path::Path; use std::sync::Arc; use tempdir::TempDir; use types::{ @@ -49,29 +47,20 @@ pub struct ValidatorStore<T, E: EthSpec> { spec: Arc<ChainSpec>, log: Logger, temp_dir: Option<Arc<TempDir>>, - fork_service: ForkService<T, E>, + fork_service: ForkService<T>, _phantom: PhantomData<E>, } impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { pub fn new( validators: InitializedValidators, - config: &Config, + slashing_protection: SlashingDatabase, genesis_validators_root: Hash256, spec: ChainSpec, - fork_service: ForkService<T, E>, + fork_service: ForkService<T>, log: Logger, - ) -> Result<Self, String> { - let slashing_db_path = config.data_dir.join(SLASHING_PROTECTION_FILENAME); - let slashing_protection = - SlashingDatabase::open_or_create(&slashing_db_path).map_err(|e| { - format!( - "Failed to open or create slashing protection database: {:?}", - e - ) - })?; - - Ok(Self { + ) -> Self { + Self { validators: Arc::new(RwLock::new(validators)), slashing_protection, genesis_validators_root, @@ -80,17 +69,44 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { temp_dir: None, fork_service, _phantom: PhantomData, - }) + } } - /// Register all known validators with the slashing protection database. + pub fn initialized_validators(&self) -> Arc<RwLock<InitializedValidators>> { + self.validators.clone() + } + + /// Insert a new validator to `self`, where the validator is represented by an EIP-2335 + /// keystore on the filesystem. /// - /// Registration is required to protect against a lost or missing slashing database, - /// such as when relocating validator keys to a new machine. - pub fn register_all_validators_for_slashing_protection(&self) -> Result<(), String> { + /// This function includes: + /// + /// - Add the validator definition to the YAML file, saving it to the filesystem. + /// - Enable validator with the slashing protection database. + /// - If `enable == true`, start performing duties for the validator. + pub async fn add_validator_keystore<P: AsRef<Path>>( + &self, + voting_keystore_path: P, + password: ZeroizeString, + enable: bool, + ) -> Result<ValidatorDefinition, String> { + let mut validator_def = + ValidatorDefinition::new_keystore_with_password(voting_keystore_path, Some(password)) + .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; + self.slashing_protection - .register_validators(self.validators.read().iter_voting_pubkeys()) - .map_err(|e| format!("Error while registering validators: {:?}", e)) + .register_validator(&validator_def.voting_public_key) + .map_err(|e| format!("failed to register validator: {:?}", e))?; + + validator_def.enabled = enable; + + self.validators + .write() + .add_definition(validator_def.clone()) + .await + .map_err(|e| format!("Unable to add definition: {:?}", e))?; + + Ok(validator_def) } pub fn voting_pubkeys(&self) -> Vec<PublicKey> { @@ -116,7 +132,6 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { } pub fn randao_reveal(&self, validator_pubkey: &PublicKey, epoch: Epoch) -> Option<Signature> { - // TODO: check this against the slot clock to make sure it's not an early reveal? self.validators .read() .voting_keypair(validator_pubkey) @@ -189,7 +204,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { warn!( self.log, "Not signing block for unregistered validator"; - "msg" => "Carefully consider running with --auto-register (see --help)", + "msg" => "Carefully consider running with --init-slashing-protection (see --help)", "public_key" => format!("{:?}", pk) ); None @@ -268,7 +283,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> { warn!( self.log, "Not signing attestation for unregistered validator"; - "msg" => "Carefully consider running with --auto-register (see --help)", + "msg" => "Carefully consider running with --init-slashing-protection (see --help)", "public_key" => format!("{:?}", pk) ); None