From 8630ddfec4305789135b64ce4d78bd3e5a1c12ff Mon Sep 17 00:00:00 2001 From: Mac L Date: Mon, 3 Apr 2023 05:35:11 +0000 Subject: [PATCH] Add `beacon.watch` (#3362) > This is currently a WIP and all features are subject to alteration or removal at any time. ## Overview The successor to #2873. Contains the backbone of `beacon.watch` including syncing code, the initial API, and several core database tables. See `watch/README.md` for more information, requirements and usage. --- Cargo.lock | 699 ++++++--- Cargo.toml | 2 + beacon_node/http_api/Cargo.toml | 8 +- beacon_node/http_api/src/lib.rs | 1 + .../{tests/common.rs => src/test_utils.rs} | 10 +- beacon_node/http_api/tests/fork_tests.rs | 2 +- .../http_api/tests/interactive_tests.rs | 2 +- beacon_node/http_api/tests/main.rs | 1 - beacon_node/http_api/tests/tests.rs | 6 +- common/eth2/src/lib.rs | 2 +- common/eth2/src/lighthouse.rs | 71 +- watch/.gitignore | 1 + watch/Cargo.toml | 45 + watch/README.md | 460 ++++++ watch/config.yaml.default | 49 + watch/diesel.toml | 5 + watch/migrations/.gitkeep | 0 .../down.sql | 6 + .../up.sql | 36 + .../down.sql | 1 + .../2022-01-01-000000_canonical_slots/up.sql | 6 + .../2022-01-01-000001_beacon_blocks/down.sql | 1 + .../2022-01-01-000001_beacon_blocks/up.sql | 7 + .../2022-01-01-000002_validators/down.sql | 1 + .../2022-01-01-000002_validators/up.sql | 7 + .../2022-01-01-000003_proposer_info/down.sql | 1 + .../2022-01-01-000003_proposer_info/up.sql | 5 + .../2022-01-01-000004_active_config/down.sql | 1 + .../2022-01-01-000004_active_config/up.sql | 5 + .../2022-01-01-000010_blockprint/down.sql | 1 + .../2022-01-01-000010_blockprint/up.sql | 4 + .../2022-01-01-000011_block_rewards/down.sql | 1 + .../2022-01-01-000011_block_rewards/up.sql | 6 + .../2022-01-01-000012_block_packing/down.sql | 1 + .../2022-01-01-000012_block_packing/up.sql | 6 + .../down.sql | 1 + .../up.sql | 8 + .../2022-01-01-000020_capella/down.sql | 2 + .../2022-01-01-000020_capella/up.sql | 3 + watch/postgres_docker_compose/compose.yml | 16 + watch/src/block_packing/database.rs | 140 ++ watch/src/block_packing/mod.rs | 38 + watch/src/block_packing/server.rs | 31 + watch/src/block_packing/updater.rs | 211 +++ watch/src/block_rewards/database.rs | 137 ++ watch/src/block_rewards/mod.rs | 38 + watch/src/block_rewards/server.rs | 31 + watch/src/block_rewards/updater.rs | 157 +++ watch/src/blockprint/config.rs | 40 + watch/src/blockprint/database.rs | 224 +++ watch/src/blockprint/mod.rs | 149 ++ watch/src/blockprint/server.rs | 31 + watch/src/blockprint/updater.rs | 172 +++ watch/src/cli.rs | 55 + watch/src/client.rs | 178 +++ watch/src/config.rs | 50 + watch/src/database/compat.rs | 49 + watch/src/database/config.rs | 74 + watch/src/database/error.rs | 55 + watch/src/database/mod.rs | 782 ++++++++++ watch/src/database/models.rs | 67 + watch/src/database/schema.rs | 102 ++ watch/src/database/utils.rs | 29 + watch/src/database/watch_types.rs | 119 ++ watch/src/lib.rs | 12 + watch/src/logger.rs | 24 + watch/src/main.rs | 41 + watch/src/server/config.rs | 28 + watch/src/server/error.rs | 50 + watch/src/server/handler.rs | 266 ++++ watch/src/server/mod.rs | 134 ++ watch/src/suboptimal_attestations/database.rs | 224 +++ watch/src/suboptimal_attestations/mod.rs | 56 + watch/src/suboptimal_attestations/server.rs | 299 ++++ watch/src/suboptimal_attestations/updater.rs | 236 ++++ watch/src/updater/config.rs | 65 + watch/src/updater/error.rs | 56 + watch/src/updater/handler.rs | 471 +++++++ watch/src/updater/mod.rs | 234 +++ watch/tests/tests.rs | 1254 +++++++++++++++++ 80 files changed, 7663 insertions(+), 236 deletions(-) rename beacon_node/http_api/{tests/common.rs => src/test_utils.rs} (96%) create mode 100644 watch/.gitignore create mode 100644 watch/Cargo.toml create mode 100644 watch/README.md create mode 100644 watch/config.yaml.default create mode 100644 watch/diesel.toml create mode 100644 watch/migrations/.gitkeep create mode 100644 watch/migrations/00000000000000_diesel_initial_setup/down.sql create mode 100644 watch/migrations/00000000000000_diesel_initial_setup/up.sql create mode 100644 watch/migrations/2022-01-01-000000_canonical_slots/down.sql create mode 100644 watch/migrations/2022-01-01-000000_canonical_slots/up.sql create mode 100644 watch/migrations/2022-01-01-000001_beacon_blocks/down.sql create mode 100644 watch/migrations/2022-01-01-000001_beacon_blocks/up.sql create mode 100644 watch/migrations/2022-01-01-000002_validators/down.sql create mode 100644 watch/migrations/2022-01-01-000002_validators/up.sql create mode 100644 watch/migrations/2022-01-01-000003_proposer_info/down.sql create mode 100644 watch/migrations/2022-01-01-000003_proposer_info/up.sql create mode 100644 watch/migrations/2022-01-01-000004_active_config/down.sql create mode 100644 watch/migrations/2022-01-01-000004_active_config/up.sql create mode 100644 watch/migrations/2022-01-01-000010_blockprint/down.sql create mode 100644 watch/migrations/2022-01-01-000010_blockprint/up.sql create mode 100644 watch/migrations/2022-01-01-000011_block_rewards/down.sql create mode 100644 watch/migrations/2022-01-01-000011_block_rewards/up.sql create mode 100644 watch/migrations/2022-01-01-000012_block_packing/down.sql create mode 100644 watch/migrations/2022-01-01-000012_block_packing/up.sql create mode 100644 watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql create mode 100644 watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql create mode 100644 watch/migrations/2022-01-01-000020_capella/down.sql create mode 100644 watch/migrations/2022-01-01-000020_capella/up.sql create mode 100644 watch/postgres_docker_compose/compose.yml create mode 100644 watch/src/block_packing/database.rs create mode 100644 watch/src/block_packing/mod.rs create mode 100644 watch/src/block_packing/server.rs create mode 100644 watch/src/block_packing/updater.rs create mode 100644 watch/src/block_rewards/database.rs create mode 100644 watch/src/block_rewards/mod.rs create mode 100644 watch/src/block_rewards/server.rs create mode 100644 watch/src/block_rewards/updater.rs create mode 100644 watch/src/blockprint/config.rs create mode 100644 watch/src/blockprint/database.rs create mode 100644 watch/src/blockprint/mod.rs create mode 100644 watch/src/blockprint/server.rs create mode 100644 watch/src/blockprint/updater.rs create mode 100644 watch/src/cli.rs create mode 100644 watch/src/client.rs create mode 100644 watch/src/config.rs create mode 100644 watch/src/database/compat.rs create mode 100644 watch/src/database/config.rs create mode 100644 watch/src/database/error.rs create mode 100644 watch/src/database/mod.rs create mode 100644 watch/src/database/models.rs create mode 100644 watch/src/database/schema.rs create mode 100644 watch/src/database/utils.rs create mode 100644 watch/src/database/watch_types.rs create mode 100644 watch/src/lib.rs create mode 100644 watch/src/logger.rs create mode 100644 watch/src/main.rs create mode 100644 watch/src/server/config.rs create mode 100644 watch/src/server/error.rs create mode 100644 watch/src/server/handler.rs create mode 100644 watch/src/server/mod.rs create mode 100644 watch/src/suboptimal_attestations/database.rs create mode 100644 watch/src/suboptimal_attestations/mod.rs create mode 100644 watch/src/suboptimal_attestations/server.rs create mode 100644 watch/src/suboptimal_attestations/updater.rs create mode 100644 watch/src/updater/config.rs create mode 100644 watch/src/updater/error.rs create mode 100644 watch/src/updater/handler.rs create mode 100644 watch/src/updater/mod.rs create mode 100644 watch/tests/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 214f3baa3..7a67b77bf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -90,9 +90,9 @@ dependencies = [ [[package]] name = "aead" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c192eb8f11fc081b0fe4259ba5af04217d4e0faddd02417310a927911abd7c8" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", "generic-array", @@ -153,7 +153,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" dependencies = [ - "aead 0.5.1", + "aead 0.5.2", "aes 0.8.2", "cipher 0.4.4", "ctr 0.9.2", @@ -226,9 +226,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arbitrary" @@ -246,9 +246,9 @@ checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayref" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -296,7 +296,7 @@ checksum = "db8b7511298d5b7784b40b092d9e9dcd3a627a5707e4b5e507931ab0d44eeebf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -308,7 +308,7 @@ checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -320,7 +320,7 @@ checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -331,22 +331,22 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "async-io" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg 1.1.0", + "cfg-if", "concurrent-queue", "futures-lite", - "libc", "log", "parking", "polling", + "rustix", "slab", - "socket2", + "socket2 0.4.9", "waker-fn", - "windows-sys 0.42.0", ] [[package]] @@ -377,18 +377,18 @@ checksum = "e4655ae1a7b0cdf149156f780c5bf3f1352bc53cbd9e0a361a7ef7b22947e965" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "async-trait" -version = "0.1.66" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84f9ebcc6c1f5b8cb160f6990096a5c127f423fcb6e1ccc46c370cbdfb75dfc" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -452,7 +452,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -805,6 +805,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "bollard-stubs" +version = "1.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2f2e73fffe9455141e170fb9c1feb0ac521ec7e7dcd47a7cab72a658490fb8" +dependencies = [ + "chrono", + "serde", + "serde_with", +] + [[package]] name = "boot_node" version = "4.0.1" @@ -994,6 +1005,7 @@ dependencies = [ "js-sys", "num-integer", "num-traits", + "serde", "time 0.1.45", "wasm-bindgen", "winapi", @@ -1029,9 +1041,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ed9a53e5d4d9c573ae844bfac6872b159cb1d1585a83b29e7a64b7eef7332a" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1112,9 +1124,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.49" +version = "0.1.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] @@ -1141,7 +1153,7 @@ name = "compare_fields_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1187,9 +1199,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "core2" @@ -1202,9 +1214,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] @@ -1425,9 +1437,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.1" +version = "4.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" +checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" dependencies = [ "cfg-if", "fiat-crypto", @@ -1439,9 +1451,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a140f260e6f3f79013b8bfc65e7ce630c9ab4388c6a89c71e07226f49487b72" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -1451,9 +1463,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da6383f459341ea689374bf0a42979739dc421874f112ff26f829b8040b8e613" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -1461,24 +1473,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn", + "syn 2.0.13", ] [[package]] name = "cxxbridge-flags" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90201c1a650e95ccff1c8c0bb5a343213bdd317c6e600a93075bca2eff54ec97" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b75aed41bb2e6367cae39e6326ef817a851db13c13e4f3263714ca3cfb8de56" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -1512,7 +1524,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1526,7 +1538,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -1537,7 +1549,7 @@ checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1548,7 +1560,7 @@ checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core 0.14.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1594,7 +1606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" dependencies = [ "data-encoding", - "syn", + "syn 1.0.109", ] [[package]] @@ -1692,7 +1704,7 @@ checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1702,7 +1714,7 @@ source = "git+https://github.com/michaelsproul/arbitrary?rev=f002b99989b561ddce6 dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1723,7 +1735,7 @@ dependencies = [ "darling 0.14.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1733,7 +1745,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f0314b72bed045f3a68671b3c86328386762c93f82d98c65c3cb5e5f573dd68" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.109", ] [[package]] @@ -1746,7 +1758,44 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "diesel" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" +dependencies = [ + "bitflags", + "byteorder", + "diesel_derives", + "itoa", + "pq-sys", + "r2d2", +] + +[[package]] +name = "diesel_derives" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad74fdcf086be3d4fdd142f67937678fe60ed431c3b2f08599e7687269410c4" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "diesel_migrations" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ae22beef5e9d6fab9225ddb073c1c6c1a7a6ded5019d5da11d1e5c5adc34e2" +dependencies = [ + "diesel", + "migrations_internals", + "migrations_macros", ] [[package]] @@ -1843,7 +1892,7 @@ dependencies = [ "rand 0.8.5", "rlp", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -1861,7 +1910,7 @@ checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2021,7 +2070,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2071,13 +2120,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.2.8" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" dependencies = [ "errno-dragonfly", "libc", - "winapi", + "windows-sys 0.45.0", ] [[package]] @@ -2283,7 +2332,7 @@ dependencies = [ "eth2_ssz", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2632,9 +2681,9 @@ checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" [[package]] name = "fiat-crypto" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ace6ec7cc19c8ed33a32eaa9ea692d7faea05006b5356b9e2b668ec4bc3955" +checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" [[package]] name = "field-offset" @@ -2765,9 +2814,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2780,9 +2829,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -2790,15 +2839,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2808,9 +2857,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" @@ -2829,13 +2878,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -2851,15 +2900,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-timer" @@ -2869,9 +2918,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-channel", "futures-core", @@ -2896,9 +2945,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -2996,7 +3045,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3150,6 +3199,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -3352,7 +3407,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.9", - "socket2", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -3387,16 +3442,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0c17cc76786e99f8d2f055c11159e7f0091c42474dcc3189fbab96072e873e6d" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows 0.46.0", ] [[package]] @@ -3469,9 +3524,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba7abdbb86e485125dad06c2691e1e393bf3b08c7b743b43aa162a00fd39062e" +checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" dependencies = [ "async-io", "core-foundation", @@ -3483,7 +3538,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows", + "windows 0.34.0", ] [[package]] @@ -3552,14 +3607,14 @@ checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", @@ -3625,10 +3680,11 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" +checksum = "09270fd4fa1111bc614ed2246c7ef56239a3063d5be0d1ec3b589c505d400aeb" dependencies = [ + "hermit-abi 0.3.1", "libc", "windows-sys 0.45.0", ] @@ -3639,7 +3695,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ - "socket2", + "socket2 0.4.9", "widestring 0.5.1", "winapi", "winreg", @@ -3647,9 +3703,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itertools" @@ -3723,11 +3779,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.2.0" +version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f4f04699947111ec1733e71778d763555737579e44b85844cae8e1940a1828" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.13.1", + "base64 0.21.0", "pem", "ring", "serde", @@ -4021,7 +4077,7 @@ dependencies = [ "instant", "libp2p-identity", "log", - "multiaddr 0.17.0", + "multiaddr 0.17.1", "multihash 0.17.0", "multistream-select 0.12.1", "once_cell", @@ -4103,17 +4159,16 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6c9cb71e2333d31f18e7556b9a5f1d0a2e013effc9325e36f436be65fe7bd2" +checksum = "8a8ea433ae0cea7e3315354305237b9897afe45278b2118a7a57ca744e70fd27" dependencies = [ "bs58", "ed25519-dalek", "log", - "multiaddr 0.17.0", + "multiaddr 0.17.1", "multihash 0.17.0", "prost", - "prost-build", "quick-protobuf", "rand 0.8.5", "thiserror", @@ -4134,7 +4189,7 @@ dependencies = [ "log", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "tokio", "trust-dns-proto", "void", @@ -4262,7 +4317,7 @@ checksum = "9d527d5827582abd44a6d80c07ff8b50b4ee238a8979e05998474179e79dc400" dependencies = [ "heck", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -4277,7 +4332,7 @@ dependencies = [ "libc", "libp2p-core 0.38.0", "log", - "socket2", + "socket2 0.4.9", "tokio", ] @@ -4560,9 +4615,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" [[package]] name = "lmdb-rkv" @@ -4787,7 +4842,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -4807,6 +4862,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "migrations_internals" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c493c09323068c01e54c685f7da41a9ccf9219735c3766fbfd6099806ea08fbc" +dependencies = [ + "serde", + "toml", +] + +[[package]] +name = "migrations_macros" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a8ff27a350511de30cdabb77147501c36ef02e0451d957abea2f30caffb2b58" +dependencies = [ + "migrations_internals", + "proc-macro2", + "quote", +] + [[package]] name = "milagro_bls" version = "1.4.2" @@ -4821,9 +4897,9 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" @@ -4926,13 +5002,14 @@ dependencies = [ [[package]] name = "multiaddr" -version = "0.17.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b53e0cc5907a5c216ba6584bf74be8ab47d6d6289f72793b2dddbf15dc3bf8c" +checksum = "2b36f567c7099511fa8612bbbb52dda2419ce0bdbacf31714e3a5ffdb766d3bd" dependencies = [ "arrayref", "byteorder", "data-encoding", + "log", "multibase", "multihash 0.17.0", "percent-encoding", @@ -4989,7 +5066,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] @@ -5421,14 +5498,14 @@ dependencies = [ "bytes", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "openssl" -version = "0.10.48" +version = "0.10.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "518915b97df115dd36109bfa429a48b8f737bd05508cf9588977b599648926d2" +checksum = "4d2f106ab837a24e03672c59b1239669a0596406ff657c3c0835b6b7f0f35a33" dependencies = [ "bitflags", "cfg-if", @@ -5441,13 +5518,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -5458,20 +5535,19 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.1+1.1.1t" +version = "111.25.2+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef9a9cc6ea7d9d5e7c4a913dc4b48d0e359eddf01af1dfec96ba7064b4aba10" +checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.83" +version = "0.9.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666416d899cf077260dac8698d60a60b435a46d57e82acb1be3d0dad87284e5b" +checksum = "3a20eace9dc2d82904039cb76dcf50fb1a0bba071cfd1629720b5d6f1ddba0fa" dependencies = [ - "autocfg 1.1.0", "cc", "libc", "openssl-src", @@ -5578,7 +5654,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5590,7 +5666,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5629,7 +5705,7 @@ dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi", ] @@ -5642,7 +5718,7 @@ checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "windows-sys 0.45.0", ] @@ -5721,6 +5797,24 @@ dependencies = [ "rustc_version 0.4.0", ] +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + [[package]] name = "pin-project" version = "1.0.12" @@ -5738,7 +5832,7 @@ checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -5866,6 +5960,35 @@ dependencies = [ "universal-hash 0.5.0", ] +[[package]] +name = "postgres-protocol" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" +dependencies = [ + "base64 0.21.0", + "byteorder", + "bytes", + "fallible-iterator", + "hmac 0.12.1", + "md-5", + "memchr", + "rand 0.8.5", + "sha2 0.10.6", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f028f05971fe20f512bcc679e2c10227e57809a3af86a7606304435bc8896cd6" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -5873,13 +5996,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] -name = "prettyplease" -version = "0.1.24" +name = "pq-sys" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebcd279d20a4a0a2404a33056388e950504d891c855c7975b9a8fef75f3bf04" +checksum = "3b845d6d8ec554f972a2c5298aad68953fd64e7441e846075450b44656a016d1" +dependencies = [ + "vcpkg", +] + +[[package]] +name = "prettyplease" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -5928,7 +6060,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -5951,9 +6083,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.52" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" +checksum = "1d0dd4be24fcdcfeaa12a432d588dc59bbad6cad3510c67e74a2b6b2fc950564" dependencies = [ "unicode-ident", ] @@ -6005,7 +6137,7 @@ checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6035,7 +6167,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn", + "syn 1.0.109", "tempfile", "which", ] @@ -6063,7 +6195,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6148,7 +6280,7 @@ checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6164,9 +6296,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4ced82a24bb281af338b9e8f94429b6eca01b4e66d899f40031f074e74c9" +checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" dependencies = [ "bytes", "rand 0.8.5", @@ -6358,6 +6490,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +dependencies = [ + "bitflags", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -6365,15 +6506,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ "getrandom 0.2.8", - "redox_syscall", + "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -6391,15 +6532,15 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" dependencies = [ "base64 0.21.0", "bytes", @@ -6499,7 +6640,7 @@ checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6568,9 +6709,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" [[package]] name = "rustc-hash" @@ -6613,9 +6754,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.9" +version = "0.37.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "d097081ed288dfe45699b72f5b5d648e5f15d64d900c7080273baa20c16a6849" dependencies = [ "bitflags", "errno", @@ -6712,9 +6853,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "001cf62ece89779fd16105b5f515ad0e5cedcd5440d3dd806bb067978e7c3608" +checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" dependencies = [ "cfg-if", "derive_more", @@ -6724,14 +6865,14 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" +checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -6906,9 +7047,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.155" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71f2b4817415c6d4210bfe1c7bfcf4801b2d904cb4d0e1a8fdb651013c9e86b8" +checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" dependencies = [ "serde_derive", ] @@ -6935,20 +7076,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.155" +version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d071a94a3fac4aff69d023a7f411e33f40f3483f8c5190b1953822b6b76d7630" +checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" dependencies = [ "itoa", "ryu", @@ -6957,13 +7098,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395627de918015623b32e7669714206363a7fc00382bf477e72c1f7533e8eafc" +checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -6997,7 +7138,7 @@ dependencies = [ "darling 0.13.4", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7157,6 +7298,12 @@ dependencies = [ "types", ] +[[package]] +name = "siphasher" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + [[package]] name = "slab" version = "0.4.8" @@ -7358,14 +7505,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" +checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.1", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7383,6 +7530,16 @@ dependencies = [ "winapi", ] +[[package]] +name = "socket2" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc8d618c6641ae355025c449427f9e96b98abf99a772be3cef6708d15c77147a" +dependencies = [ + "libc", + "windows-sys 0.45.0", +] + [[package]] name = "soketto" version = "0.7.1" @@ -7436,7 +7593,7 @@ source = "git+https://github.com/ralexstokes//ssz-rs?rev=adf1a0b14cef90b9536f28e dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -7509,6 +7666,16 @@ dependencies = [ "types", ] +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "strsim" version = "0.8.0" @@ -7540,7 +7707,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 1.0.109", ] [[package]] @@ -7588,7 +7755,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7602,7 +7769,7 @@ dependencies = [ "proc-macro2", "quote", "smallvec", - "syn", + "syn 1.0.109", ] [[package]] @@ -7625,6 +7792,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -7639,7 +7817,7 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -7732,15 +7910,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] @@ -7776,7 +7954,24 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn", + "syn 1.0.109", +] + +[[package]] +name = "testcontainers" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e2b1567ca8a2b819ea7b28c92be35d9f76fb9edb214321dcc86eb96023d1f87" +dependencies = [ + "bollard-stubs", + "futures", + "hex", + "hmac 0.12.1", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "sha2 0.10.6", ] [[package]] @@ -7790,22 +7985,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5ab016db510546d856297882807df8da66a16fb8c4101cb8b30054b0d5b2d9c" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5420d42e90af0c38c3290abcca25b9b3bdf379fc9f55c528f53a269d9c9a267e" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -7933,20 +8128,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.26.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03201d01c3c27a29c8a5cee5b55a93ddae1ccf6f08f65365c2c918f8c1b76f64" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg 1.1.0", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", "signal-hook-registry", - "socket2", + "socket2 0.4.9", "tokio-macros", "windows-sys 0.45.0", ] @@ -7963,13 +8157,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.13", ] [[package]] @@ -7982,6 +8176,30 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-postgres" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e89f6234aa8fd43779746012fcf53603cdb91fdd8399aa0de868c2d56b6dde1" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot 0.12.1", + "percent-encoding", + "phf", + "pin-project-lite 0.2.9", + "postgres-protocol", + "postgres-types", + "socket2 0.5.1", + "tokio", + "tokio-util 0.7.7", +] + [[package]] name = "tokio-rustls" version = "0.22.0" @@ -8154,7 +8372,7 @@ checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8222,7 +8440,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8246,7 +8464,7 @@ version = "0.4.0" dependencies = [ "darling 0.13.4", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -8277,7 +8495,7 @@ dependencies = [ "lazy_static", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -8466,9 +8684,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524b68aca1d05e03fdf03fcdce2c6c94b6daf6d16861ddaa7e4f2b6638a9052c" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" @@ -8707,12 +8925,11 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -8813,7 +9030,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -8847,7 +9064,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8910,6 +9127,39 @@ dependencies = [ "web-sys", ] +[[package]] +name = "watch" +version = "0.1.0" +dependencies = [ + "axum", + "beacon_chain", + "beacon_node", + "bls", + "byteorder", + "clap", + "diesel", + "diesel_migrations", + "env_logger 0.9.3", + "eth2", + "hex", + "http_api", + "hyper", + "log", + "network", + "r2d2", + "rand 0.7.3", + "reqwest", + "serde", + "serde_json", + "serde_yaml", + "testcontainers", + "tokio", + "tokio-postgres", + "types", + "unused_port", + "url", +] + [[package]] name = "web-sys" version = "0.3.61" @@ -9114,7 +9364,7 @@ dependencies = [ "tokio", "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-pre.1", + "x25519-dalek 2.0.0-rc.2", "x509-parser 0.13.2", ] @@ -9149,7 +9399,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f08dfd7a6e3987e255c4dbe710dde5d94d0f0574f8a21afa95d171376c143106" dependencies = [ "log", - "socket2", + "socket2 0.4.9", "thiserror", "tokio", "webrtc-util", @@ -9300,6 +9550,15 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] +[[package]] +name = "windows" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +dependencies = [ + "windows-targets", +] + [[package]] name = "windows-acl" version = "0.3.0" @@ -9479,12 +9738,13 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-pre.1" +version = "2.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" +checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" dependencies = [ - "curve25519-dalek 3.2.0", + "curve25519-dalek 4.0.0-rc.2", "rand_core 0.6.4", + "serde", "zeroize", ] @@ -9574,23 +9834,22 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.13", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ba07de044..0290f2ded 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,8 @@ members = [ "validator_client", "validator_client/slashing_protection", + + "watch", ] resolver = "2" diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 0aa626be0..e251b0485 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -38,15 +38,15 @@ system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } eth2_serde_utils = "0.1.1" operation_pool = { path = "../operation_pool" } +sensitive_url = { path = "../../common/sensitive_url" } +unused_port = {path = "../../common/unused_port"} +logging = { path = "../../common/logging" } +store = { path = "../store" } [dev-dependencies] -store = { path = "../store" } environment = { path = "../../lighthouse/environment" } -sensitive_url = { path = "../../common/sensitive_url" } -logging = { path = "../../common/logging" } serde_json = "1.0.58" proto_array = { path = "../../consensus/proto_array" } -unused_port = {path = "../../common/unused_port"} genesis = { path = "../genesis" } [[test]] diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index aa52466e2..d19187cb4 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -18,6 +18,7 @@ mod standard_block_rewards; mod state_id; mod sync_committee_rewards; mod sync_committees; +pub mod test_utils; mod ui; mod validator_inclusion; mod version; diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/src/test_utils.rs similarity index 96% rename from beacon_node/http_api/tests/common.rs rename to beacon_node/http_api/src/test_utils.rs index 3e34bafe8..6f918e1b9 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,3 +1,4 @@ +use crate::{Config, Context}; use beacon_chain::{ test_utils::{ BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType, @@ -6,7 +7,6 @@ use beacon_chain::{ }; use directory::DEFAULT_ROOT_DIR; use eth2::{BeaconNodeHttpClient, Timeouts}; -use http_api::{Config, Context}; use lighthouse_network::{ discv5::enr::{CombinedKey, EnrBuilder}, libp2p::{ @@ -182,7 +182,7 @@ pub async fn create_api_server_on_port( let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); - let context = Arc::new(Context { + let ctx = Arc::new(Context { config: Config { enabled: true, listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), @@ -193,19 +193,19 @@ pub async fn create_api_server_on_port( data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), spec_fork_name: None, }, - chain: Some(chain.clone()), + chain: Some(chain), network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), log, }); - let ctx = context.clone(); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); let server_shutdown = async { // It's not really interesting why this triggered, just that it happened. let _ = shutdown_rx.await; }; - let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); + let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap(); ApiServer { server, diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 614412356..8a3ba887b 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,11 +1,11 @@ //! Tests for API behaviour across fork boundaries. -use crate::common::*; use beacon_chain::{ test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, StateSkipConfig, }; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; +use http_api::test_utils::*; use std::collections::HashSet; use types::{ test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 7db1b22d6..9763b8037 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,11 +1,11 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` -use crate::common::*; use beacon_chain::{ chain_config::ReOrgThreshold, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, }; use eth2::types::DepositContractData; use execution_layer::{ForkchoiceState, PayloadAttributes}; +use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; use state_processing::{ diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index 88e0032ec..342b72cc7 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -1,6 +1,5 @@ #![cfg(not(debug_assertions))] // Tests are too slow in debug. -pub mod common; pub mod fork_tests; pub mod interactive_tests; pub mod tests; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 2f1d5fd58..0ef27febe 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,4 +1,3 @@ -use crate::common::{create_api_server, create_api_server_on_port, ApiServer}; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, @@ -18,7 +17,10 @@ use execution_layer::test_utils::{ }; use futures::stream::{Stream, StreamExt}; use futures::FutureExt; -use http_api::{BlockId, StateId}; +use http_api::{ + test_utils::{create_api_server, create_api_server_on_port, ApiServer}, + BlockId, StateId, +}; use lighthouse_network::{Enr, EnrExt, PeerId}; use network::NetworkReceivers; use proto_array::ExecutionStatus; diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index d4f68624f..1a7cf2979 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -22,7 +22,7 @@ use lighthouse_network::PeerId; pub use reqwest; use reqwest::{IntoUrl, RequestBuilder, Response}; pub use reqwest::{StatusCode, Url}; -pub use sensitive_url::SensitiveUrl; +pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{de::DeserializeOwned, Serialize}; use std::convert::TryFrom; use std::fmt; diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index e50d9f4dc..bb933dbe1 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -13,7 +13,7 @@ use crate::{ BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, GenericResponse, ValidatorId, }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, + BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode, }; use proto_array::core::ProtoArray; use reqwest::IntoUrl; @@ -566,4 +566,73 @@ impl BeaconNodeHttpClient { self.post_with_response(path, &()).await } + + /// + /// Analysis endpoints. + /// + + /// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot + pub async fn get_lighthouse_analysis_block_rewards( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_rewards"); + + path.query_pairs_mut() + .append_pair("start_slot", &start_slot.to_string()) + .append_pair("end_slot", &end_slot.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_block_packing( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("block_packing_efficiency"); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } + + /// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch + pub async fn get_lighthouse_analysis_attestation_performance( + &self, + start_epoch: Epoch, + end_epoch: Epoch, + target: String, + ) -> Result, Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("analysis") + .push("attestation_performance") + .push(&target); + + path.query_pairs_mut() + .append_pair("start_epoch", &start_epoch.to_string()) + .append_pair("end_epoch", &end_epoch.to_string()); + + self.get(path).await + } } diff --git a/watch/.gitignore b/watch/.gitignore new file mode 100644 index 000000000..5b6b0720c --- /dev/null +++ b/watch/.gitignore @@ -0,0 +1 @@ +config.yaml diff --git a/watch/Cargo.toml b/watch/Cargo.toml new file mode 100644 index 000000000..d1793a9d0 --- /dev/null +++ b/watch/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "watch" +version = "0.1.0" +edition = "2018" + +[lib] +name = "watch" +path = "src/lib.rs" + +[[bin]] +name = "watch" +path = "src/main.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.3" +log = "0.4.14" +env_logger = "0.9.0" +types = { path = "../consensus/types" } +eth2 = { path = "../common/eth2" } +beacon_node = { path = "../beacon_node"} +tokio = { version = "1.14.0", features = ["time"] } +axum = "0.5.15" +hyper = "0.14.20" +serde = "1.0.116" +serde_json = "1.0.58" +reqwest = { version = "0.11.0", features = ["json","stream"] } +url = "2.2.2" +rand = "0.7.3" +diesel = { version = "2.0.2", features = ["postgres", "r2d2"] } +diesel_migrations = { version = "2.0.0", features = ["postgres"] } +byteorder = "1.4.3" +bls = { path = "../crypto/bls" } +hex = "0.4.2" +r2d2 = "0.8.9" +serde_yaml = "0.8.24" + +[dev-dependencies] +tokio-postgres = "0.7.5" +http_api = { path = "../beacon_node/http_api" } +beacon_chain = { path = "../beacon_node/beacon_chain" } +network = { path = "../beacon_node/network" } +testcontainers = "0.14.0" +unused_port = { path = "../common/unused_port" } diff --git a/watch/README.md b/watch/README.md new file mode 100644 index 000000000..18bf39394 --- /dev/null +++ b/watch/README.md @@ -0,0 +1,460 @@ +## beacon.watch + +>beacon.watch is pre-MVP and still under active development and subject to change. + +beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to +data which is: +1. Not already stored natively in the Beacon Chain +2. Too specialized for Block Explorers +3. Too sensitive for public Block Explorers + + +### Requirements +- `git` +- `rust` : https://rustup.rs/ +- `libpg` : https://www.postgresql.org/download/ +- `diesel_cli` : +``` +cargo install diesel_cli --no-default-features --features postgres +``` +- `docker` : https://docs.docker.com/engine/install/ +- `docker-compose` : https://docs.docker.com/compose/install/ + +### Setup +1. Setup the database: +``` +cd postgres_docker_compose +docker-compose up +``` + +1. Ensure the tests pass: +``` +cargo test --release +``` + +1. Drop the database (if it already exists) and run the required migrations: +``` +diesel database reset --database-url postgres://postgres:postgres@localhost/dev +``` + +1. Ensure a synced Lighthouse beacon node with historical states is available +at `localhost:5052`. +The smaller the value of `--slots-per-restore-point` the faster beacon.watch +will be able to sync to the beacon node. + +1. Run the updater daemon: +``` +cargo run --release -- run-updater +``` + +1. Start the HTTP API server: +``` +cargo run --release -- serve +``` + +1. Ensure connectivity: +``` +curl "http://localhost:5059/v1/slots/highest" +``` + +> Functionality on MacOS has not been tested. Windows is not supported. + + +### Configuration +beacon.watch can be configured through the use of a config file. +Available options can be seen in `config.yaml.default`. + +You can specify a config file during runtime: +``` +cargo run -- run-updater --config path/to/config.yaml +cargo run -- serve --config path/to/config.yaml +``` + +You can specify only the parts of the config file which you need changed. +Missing values will remain as their defaults. + +For example, if you wish to run with default settings but only wish to alter `log_level` +your config file would be: +```yaml +# config.yaml +log_level = "info" +``` + +### Available Endpoints +As beacon.watch continues to develop, more endpoints will be added. + +> In these examples any data containing information from blockprint has either been redacted or fabricated. + +#### `/v1/slots/{slot}` +```bash +curl "http://localhost:5059/v1/slots/4635296" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "skipped": false, + "beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + } +] +``` + +#### `/v1/slots/lowest` +```bash +curl "http://localhost:5059/v1/slots/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "skipped": false, + "beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/slots/highest` +```bash +curl "http://localhost:5059/v1/slots/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "skipped": false, + "beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b" +} +``` + +#### `v1/slots/{slot}/block` +```bash +curl "http://localhost:5059/v1/slots/4635296/block" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}` +```bash +curl "http://localhost:5059/v1/blocks/4635296" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks?start_slot={}&end_slot={}` +```bash +curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297" +``` +```json +[ + { + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" + }, + { + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" + } +] +``` + +#### `/v1/blocks/{block_id}/previous` +```bash +curl "http://localhost:5059/v1/blocks/4635297/previous" +# OR +curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/{block_id}/next` +```bash +curl "http://localhost:5059/v1/blocks/4635296/next" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next" +``` +```json +{ + "slot": "4635297", + "root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182", + "parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62" +} +``` + +#### `/v1/blocks/lowest` +```bash +curl "http://localhost:5059/v1/blocks/lowest" +``` +```json +{ + "slot": "4635296", + "root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62", + "parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b" +} +``` + +#### `/v1/blocks/highest` +```bash +curl "http://localhost:5059/v1/blocks/highest" +``` +```json +{ + "slot": "4635358", + "root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b", + "parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2" +} +``` + +#### `/v1/blocks/{block_id}/proposer` +```bash +curl "http://localhost:5059/v1/blocks/4635296/proposer" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer" + +``` +```json +{ + "slot": "4635296", + "proposer_index": 223126, + "graffiti": "" +} +``` + +#### `/v1/blocks/{block_id}/rewards` +```bash +curl "http://localhost:5059/v1/blocks/4635296/reward" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward" + +``` +```json +{ + "slot": "4635296", + "total": 25380059, + "attestation_reward": 24351867, + "sync_committee_reward": 1028192 +} +``` + +#### `/v1/blocks/{block_id}/packing` +```bash +curl "http://localhost:5059/v1/blocks/4635296/packing" +# OR +curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing" + +``` +```json +{ + "slot": "4635296", + "available": 16152, + "included": 13101, + "prior_skip_slots": 0 +} +``` + +#### `/v1/validators/{validator}` +```bash +curl "http://localhost:5059/v1/validators/1" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c" +``` +```json +{ + "index": 1, + "public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c", + "status": "active_ongoing", + "client": null, + "activation_epoch": 0, + "exit_epoch": null +} +``` + +#### `/v1/validators/{validator}/attestation/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/1/attestation/144853" +# OR +curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853" +``` +```json +{ + "index": 1, + "epoch": "144853", + "source": true, + "head": true, + "target": true +} +``` + +#### `/v1/validators/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853" +``` +```json +[ + 63, + 67, + 98, + ... +] +``` + +#### `/v1/validators/missed/{vote}/{epoch}/graffiti` +```bash +curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti" +``` +```json +{ + "Mr F was here": 3, + "Lighthouse/v3.1.0-aa022f4": 5, + ... +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}` +```bash +curl "http://localhost:5059/v1/clients/missed/source/144853" +``` +```json +{ + "Lighthouse": 100, + "Lodestar": 100, + "Nimbus": 100, + "Prysm": 100, + "Teku": 100, + "Unknown": 100 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages` +Note that this endpoint expresses the following: +``` +What percentage of each client implementation missed this vote? +``` + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages" +``` +```json +{ + "Lighthouse": 0.51234567890, + "Lodestar": 0.51234567890, + "Nimbus": 0.51234567890, + "Prysm": 0.09876543210, + "Teku": 0.09876543210, + "Unknown": 0.05647382910 +} +``` + +#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative` +Note that this endpoint expresses the following: +``` +For the validators which did miss this vote, what percentage of them were from each client implementation? +``` +You can check these values against the output of `/v1/clients/percentages` to see any discrepancies. + +```bash +curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative" +``` +```json +{ + "Lighthouse": 11.11111111111111, + "Lodestar": 11.11111111111111, + "Nimbus": 11.11111111111111, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 33.33333333333333 +} + +``` + +#### `/v1/clients` +```bash +curl "http://localhost:5059/v1/clients" +``` +```json +{ + "Lighthouse": 5000, + "Lodestar": 5000, + "Nimbus": 5000, + "Prysm": 5000, + "Teku": 5000, + "Unknown": 5000 +} +``` + +#### `/v1/clients/percentages` +```bash +curl "http://localhost:5059/v1/clients/percentages" +``` +```json +{ + "Lighthouse": 16.66666666666667, + "Lodestar": 16.66666666666667, + "Nimbus": 16.66666666666667, + "Prysm": 16.66666666666667, + "Teku": 16.66666666666667, + "Unknown": 16.66666666666667 +} +``` + +### Future work +- New tables + - `skip_slots`? + + +- More API endpoints + - `/v1/proposers?start_epoch={}&end_epoch={}` and similar + - `/v1/validators/{status}/count` + + +- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills. + + +- Better/prettier (async?) logging. + + +- Connect to a range of beacon_nodes to sync different components concurrently. +Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync. + + +### Architecture +Connection Pooling: +- 1 Pool for Updater (read and write) +- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this) diff --git a/watch/config.yaml.default b/watch/config.yaml.default new file mode 100644 index 000000000..131609237 --- /dev/null +++ b/watch/config.yaml.default @@ -0,0 +1,49 @@ +--- +database: + user: "postgres" + password: "postgres" + dbname: "dev" + default_dbname: "postgres" + host: "localhost" + port: 5432 + connect_timeout_millis: 2000 + +server: + listen_addr: "127.0.0.1" + listen_port: 5059 + +updater: + # The URL of the Beacon Node to perform sync tasks with. + # Cannot yet accept multiple beacon nodes. + beacon_node_url: "http://localhost:5052" + # The number of epochs to backfill. Must be below 100. + max_backfill_size_epochs: 2 + # The epoch at which to stop backfilling. + backfill_stop_epoch: 0 + # Whether to sync the attestations table. + attestations: true + # Whether to sync the proposer_info table. + proposer_info: true + # Whether to sync the block_rewards table. + block_rewards: true + # Whether to sync the block_packing table. + block_packing: true + +blockprint: + # Whether to sync client information from blockprint. + enabled: false + # The URL of the blockprint server. + url: "" + # The username used to authenticate to the blockprint server. + username: "" + # The password used to authenticate to the blockprint server. + password: "" + +# Log level. +# Valid options are: +# - "trace" +# - "debug" +# - "info" +# - "warn" +# - "error" +log_level: "debug" diff --git a/watch/diesel.toml b/watch/diesel.toml new file mode 100644 index 000000000..bfb01bccf --- /dev/null +++ b/watch/diesel.toml @@ -0,0 +1,5 @@ +# For documentation on how to configure this file, +# see diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "src/database/schema.rs" diff --git a/watch/migrations/.gitkeep b/watch/migrations/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/watch/migrations/00000000000000_diesel_initial_setup/down.sql b/watch/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 000000000..a9f526091 --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/watch/migrations/00000000000000_diesel_initial_setup/up.sql b/watch/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 000000000..d68895b1a --- /dev/null +++ b/watch/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/down.sql b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql new file mode 100644 index 000000000..551ed6605 --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/down.sql @@ -0,0 +1 @@ +DROP TABLE canonical_slots diff --git a/watch/migrations/2022-01-01-000000_canonical_slots/up.sql b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql new file mode 100644 index 000000000..2629f11a4 --- /dev/null +++ b/watch/migrations/2022-01-01-000000_canonical_slots/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE canonical_slots ( + slot integer PRIMARY KEY, + root bytea NOT NULL, + skipped boolean NOT NULL, + beacon_block bytea UNIQUE +) diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql new file mode 100644 index 000000000..8901956f4 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/down.sql @@ -0,0 +1 @@ +DROP TABLE beacon_blocks diff --git a/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql new file mode 100644 index 000000000..250c667b2 --- /dev/null +++ b/watch/migrations/2022-01-01-000001_beacon_blocks/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE beacon_blocks ( + slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE, + root bytea REFERENCES canonical_slots(beacon_block) NOT NULL, + parent_root bytea NOT NULL, + attestation_count integer NOT NULL, + transaction_count integer +) diff --git a/watch/migrations/2022-01-01-000002_validators/down.sql b/watch/migrations/2022-01-01-000002_validators/down.sql new file mode 100644 index 000000000..17819fc34 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/down.sql @@ -0,0 +1 @@ +DROP TABLE validators diff --git a/watch/migrations/2022-01-01-000002_validators/up.sql b/watch/migrations/2022-01-01-000002_validators/up.sql new file mode 100644 index 000000000..69cfef677 --- /dev/null +++ b/watch/migrations/2022-01-01-000002_validators/up.sql @@ -0,0 +1,7 @@ +CREATE TABLE validators ( + index integer PRIMARY KEY, + public_key bytea NOT NULL, + status text NOT NULL, + activation_epoch integer, + exit_epoch integer +) diff --git a/watch/migrations/2022-01-01-000003_proposer_info/down.sql b/watch/migrations/2022-01-01-000003_proposer_info/down.sql new file mode 100644 index 000000000..d61330be5 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/down.sql @@ -0,0 +1 @@ +DROP TABLE proposer_info diff --git a/watch/migrations/2022-01-01-000003_proposer_info/up.sql b/watch/migrations/2022-01-01-000003_proposer_info/up.sql new file mode 100644 index 000000000..488aedb27 --- /dev/null +++ b/watch/migrations/2022-01-01-000003_proposer_info/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE proposer_info ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL, + graffiti text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000004_active_config/down.sql b/watch/migrations/2022-01-01-000004_active_config/down.sql new file mode 100644 index 000000000..b4304eb7b --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/down.sql @@ -0,0 +1 @@ +DROP TABLE active_config diff --git a/watch/migrations/2022-01-01-000004_active_config/up.sql b/watch/migrations/2022-01-01-000004_active_config/up.sql new file mode 100644 index 000000000..476a09116 --- /dev/null +++ b/watch/migrations/2022-01-01-000004_active_config/up.sql @@ -0,0 +1,5 @@ +CREATE TABLE active_config ( + id integer PRIMARY KEY CHECK (id=1), + config_name text NOT NULL, + slots_per_epoch integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000010_blockprint/down.sql b/watch/migrations/2022-01-01-000010_blockprint/down.sql new file mode 100644 index 000000000..fa53325da --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/down.sql @@ -0,0 +1 @@ +DROP TABLE blockprint diff --git a/watch/migrations/2022-01-01-000010_blockprint/up.sql b/watch/migrations/2022-01-01-000010_blockprint/up.sql new file mode 100644 index 000000000..2d5741f50 --- /dev/null +++ b/watch/migrations/2022-01-01-000010_blockprint/up.sql @@ -0,0 +1,4 @@ +CREATE TABLE blockprint ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + best_guess text NOT NULL +) diff --git a/watch/migrations/2022-01-01-000011_block_rewards/down.sql b/watch/migrations/2022-01-01-000011_block_rewards/down.sql new file mode 100644 index 000000000..2dc87995c --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/down.sql @@ -0,0 +1 @@ +DROP TABLE block_rewards diff --git a/watch/migrations/2022-01-01-000011_block_rewards/up.sql b/watch/migrations/2022-01-01-000011_block_rewards/up.sql new file mode 100644 index 000000000..47cb4304f --- /dev/null +++ b/watch/migrations/2022-01-01-000011_block_rewards/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_rewards ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + total integer NOT NULL, + attestation_reward integer NOT NULL, + sync_committee_reward integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000012_block_packing/down.sql b/watch/migrations/2022-01-01-000012_block_packing/down.sql new file mode 100644 index 000000000..e9e7755e3 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/down.sql @@ -0,0 +1 @@ +DROP TABLE block_packing diff --git a/watch/migrations/2022-01-01-000012_block_packing/up.sql b/watch/migrations/2022-01-01-000012_block_packing/up.sql new file mode 100644 index 000000000..63a9925f9 --- /dev/null +++ b/watch/migrations/2022-01-01-000012_block_packing/up.sql @@ -0,0 +1,6 @@ +CREATE TABLE block_packing ( + slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE, + available integer NOT NULL, + included integer NOT NULL, + prior_skip_slots integer NOT NULL +) diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql new file mode 100644 index 000000000..0f32b6b4f --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/down.sql @@ -0,0 +1 @@ +DROP TABLE suboptimal_attestations diff --git a/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql new file mode 100644 index 000000000..5352afefc --- /dev/null +++ b/watch/migrations/2022-01-01-000013_suboptimal_attestations/up.sql @@ -0,0 +1,8 @@ +CREATE TABLE suboptimal_attestations ( + epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE, + index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE, + source boolean NOT NULL, + head boolean NOT NULL, + target boolean NOT NULL, + PRIMARY KEY(epoch_start_slot, index) +) diff --git a/watch/migrations/2022-01-01-000020_capella/down.sql b/watch/migrations/2022-01-01-000020_capella/down.sql new file mode 100644 index 000000000..5903b351d --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/down.sql @@ -0,0 +1,2 @@ +ALTER TABLE beacon_blocks +DROP COLUMN withdrawal_count; diff --git a/watch/migrations/2022-01-01-000020_capella/up.sql b/watch/migrations/2022-01-01-000020_capella/up.sql new file mode 100644 index 000000000..b52b4b009 --- /dev/null +++ b/watch/migrations/2022-01-01-000020_capella/up.sql @@ -0,0 +1,3 @@ +ALTER TABLE beacon_blocks +ADD COLUMN withdrawal_count integer; + diff --git a/watch/postgres_docker_compose/compose.yml b/watch/postgres_docker_compose/compose.yml new file mode 100644 index 000000000..eae4de4a2 --- /dev/null +++ b/watch/postgres_docker_compose/compose.yml @@ -0,0 +1,16 @@ +version: "3" + +services: + postgres: + image: postgres:12.3-alpine + restart: always + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_USER: postgres + volumes: + - postgres:/var/lib/postgresql/data + ports: + - 127.0.0.1:5432:5432 + +volumes: + postgres: diff --git a/watch/src/block_packing/database.rs b/watch/src/block_packing/database.rs new file mode 100644 index 000000000..f7375431c --- /dev/null +++ b/watch/src/block_packing/database.rs @@ -0,0 +1,140 @@ +use crate::database::{ + schema::{beacon_blocks, block_packing}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_packing)] +pub struct WatchBlockPacking { + pub slot: WatchSlot, + pub available: i32, + pub included: i32, + pub prior_skip_slots: i32, +} + +/// Insert a batch of values into the `block_packing` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_packing( + conn: &mut PgConn, + packing: Vec, +) -> Result<(), Error> { + use self::block_packing::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_packing) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_packing` table where `slot` is minimum. +pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_packing` table where `slot` is maximum. +pub fn get_highest_block_packing(conn: &mut PgConn) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `root_query`. +pub fn get_block_packing_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_packing); + + let result = join + .select((slot, available, included, prior_skip_slots)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`. +pub fn get_block_packing_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_packing::dsl::*; + let timer = Instant::now(); + + let result = block_packing + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_packing`. +#[allow(dead_code)] +pub fn get_unknown_block_packing( + conn: &mut PgConn, + slots_per_epoch: u64, +) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_packing::dsl::block_packing; + + let join = beacon_blocks.left_join(block_packing); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block packing cannot be retrieved for epoch 0 so we need to exclude them. + .filter(slot.ge(slots_per_epoch as i32)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_packing/mod.rs b/watch/src/block_packing/mod.rs new file mode 100644 index 000000000..5d74fc597 --- /dev/null +++ b/watch/src/block_packing/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; +pub use server::block_packing_routes; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/block_packing`. +/// Formats the response into a vector of `WatchBlockPacking`. +/// +/// Will fail if `start_epoch == 0`. +pub async fn get_block_packing( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_packing(start_epoch, end_epoch) + .await? + .into_iter() + .map(|data| WatchBlockPacking { + slot: WatchSlot::from_slot(data.slot), + available: data.available_attestations as i32, + included: data.included_attestations as i32, + prior_skip_slots: data.prior_skip_slots as i32, + }) + .collect()) +} diff --git a/watch/src/block_packing/server.rs b/watch/src/block_packing/server.rs new file mode 100644 index 000000000..819144562 --- /dev/null +++ b/watch/src/block_packing/server.rs @@ -0,0 +1,31 @@ +use crate::block_packing::database::{ + get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_packing( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_packing_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_packing_routes() -> Router { + Router::new().route("/v1/blocks/:block/packing", get(get_block_packing)) +} diff --git a/watch/src/block_packing/updater.rs b/watch/src/block_packing/updater.rs new file mode 100644 index 000000000..215964901 --- /dev/null +++ b/watch/src/block_packing/updater.rs @@ -0,0 +1,211 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_packing::get_block_packing; + +use eth2::types::{Epoch, EthSpec}; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `block_packing` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_packing` API with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block) + /// `end_epoch` -> epoch of highest beacon block + /// + /// It will resync the latest epoch if it is not fully filled. + /// That is, `if highest_filled_slot % slots_per_epoch != 31` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn fill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_packing` table. + let highest_filled_slot_opt = if self.config.block_packing { + database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot.as_slot() % self.slots_per_epoch + == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? { + lowest_beacon_block + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not fill the `block_packing` table. + warn!("Refusing to fill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `get_block_packing` API endpoint cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Block packing is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Since we pull a full epoch of data but are not guaranteed to have all blocks of + // that epoch available, only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_packing` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_block_packing` function with: + /// `start_epoch` -> epoch of lowest_beacon_block + /// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// This means that if the last slot of an epoch is a skip slot, the whole epoch will be + //// resynced during the next head update. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + pub async fn backfill_block_packing(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_packing_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `block_packing` table. + let lowest_filled_slot_opt = if self.config.block_packing { + database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot) + } else { + return Err(Error::NotEnabled("block_packing".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 { + lowest_filled_slot + .as_slot() + .epoch(self.slots_per_epoch) + .saturating_sub(Epoch::new(1)) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.as_slot().epoch(self.slots_per_epoch) + } + } else { + // No entries in the `block_packing` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot().epoch(self.slots_per_epoch) + } else { + // There are no blocks in the database, do not backfill the `block_packing` table. + warn!("Refusing to backfill block packing as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_epoch <= 1 { + debug!("Block packing backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch); + + if start_epoch >= end_epoch { + debug!("Block packing is up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_packing_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`. + if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) { + start_epoch = end_epoch.saturating_sub(max_block_packing_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) + } + + // The `block_packing` API cannot accept `start_epoch == 0`. + if start_epoch == 0 { + start_epoch += 1 + } + + if let Some(highest_block_slot) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot()) + { + let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?; + + // Only insert blocks with corresponding `beacon_block`s. + packing.retain(|packing| { + packing.slot.as_slot() >= lowest_block_slot + && packing.slot.as_slot() <= highest_block_slot + }); + + database::insert_batch_block_packing(&mut conn, packing)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest block when one exists".to_string(), + ))); + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_packing` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/block_rewards/database.rs b/watch/src/block_rewards/database.rs new file mode 100644 index 000000000..a2bf49f3e --- /dev/null +++ b/watch/src/block_rewards/database.rs @@ -0,0 +1,137 @@ +use crate::database::{ + schema::{beacon_blocks, block_rewards}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = block_rewards)] +pub struct WatchBlockRewards { + pub slot: WatchSlot, + pub total: i32, + pub attestation_reward: i32, + pub sync_committee_reward: i32, +} + +/// Insert a batch of values into the `block_rewards` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_block_rewards( + conn: &mut PgConn, + rewards: Vec, +) -> Result<(), Error> { + use self::block_rewards::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(block_rewards) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `block_rewards` table where `slot` is minimum. +pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `block_rewards` table where `slot` is maximum. +pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`. +pub fn get_block_rewards_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(block_rewards); + + let result = join + .select((slot, total, attestation_reward, sync_committee_reward)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`. +pub fn get_block_rewards_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::block_rewards::dsl::*; + let timer = Instant::now(); + + let result = block_rewards + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `block_rewards`. +#[allow(dead_code)] +pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::block_rewards::dsl::block_rewards; + + let join = beacon_blocks.left_join(block_rewards); + + let result = join + .select(slot) + .filter(root.is_null()) + // Block rewards cannot be retrieved for `slot == 0` so we need to exclude it. + .filter(slot.ne(0)) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} diff --git a/watch/src/block_rewards/mod.rs b/watch/src/block_rewards/mod.rs new file mode 100644 index 000000000..0dac88ea5 --- /dev/null +++ b/watch/src/block_rewards/mod.rs @@ -0,0 +1,38 @@ +pub mod database; +mod server; +mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; +pub use server::block_rewards_routes; + +use eth2::BeaconNodeHttpClient; +use types::Slot; + +/// Sends a request to `lighthouse/analysis/block_rewards`. +/// Formats the response into a vector of `WatchBlockRewards`. +/// +/// Will fail if `start_slot == 0`. +pub async fn get_block_rewards( + bn: &BeaconNodeHttpClient, + start_slot: Slot, + end_slot: Slot, +) -> Result, Error> { + Ok(bn + .get_lighthouse_analysis_block_rewards(start_slot, end_slot) + .await? + .into_iter() + .map(|data| WatchBlockRewards { + slot: WatchSlot::from_slot(data.meta.slot), + total: data.total as i32, + attestation_reward: data.attestation_rewards.total as i32, + sync_committee_reward: data.sync_committee_rewards as i32, + }) + .collect()) +} diff --git a/watch/src/block_rewards/server.rs b/watch/src/block_rewards/server.rs new file mode 100644 index 000000000..480346e25 --- /dev/null +++ b/watch/src/block_rewards/server.rs @@ -0,0 +1,31 @@ +use crate::block_rewards::database::{ + get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_block_rewards( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_block_rewards_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn block_rewards_routes() -> Router { + Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards)) +} diff --git a/watch/src/block_rewards/updater.rs b/watch/src/block_rewards/updater.rs new file mode 100644 index 000000000..ad34b1f07 --- /dev/null +++ b/watch/src/block_rewards/updater.rs @@ -0,0 +1,157 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::block_rewards::get_block_rewards; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `block_rewards` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn fill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `block_rewards` table. + let highest_filled_slot_opt = if self.config.block_rewards { + database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `block_rewards` table. + warn!("Refusing to fill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Block rewards are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + database::insert_batch_block_rewards(&mut conn, rewards)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `block_rewards` tables starting from the entry with the + /// lowest slot. + /// + /// It constructs a request to the `get_block_rewards` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `block_rewards` table. + let lowest_filled_slot_opt = if self.config.block_rewards { + database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot) + } else { + return Err(Error::NotEnabled("block_rewards".to_string())); + }; + + let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `block_rewards` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `block_rewards` table. + warn!("Refusing to backfill block rewards as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Block rewards backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Block rewards are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_block_reward_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`. + if start_slot < end_slot.saturating_sub(max_block_reward_backfill) { + start_slot = end_slot.saturating_sub(max_block_reward_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) + } + + // The `block_rewards` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?; + + if self.config.block_rewards { + database::insert_batch_block_rewards(&mut conn, rewards)?; + } + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the + // `block_rewards` table. This is a critical failure. It usually means someone has + // manually tampered with the database tables and should not occur during normal + // operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/blockprint/config.rs b/watch/src/blockprint/config.rs new file mode 100644 index 000000000..721fa7cb1 --- /dev/null +++ b/watch/src/blockprint/config.rs @@ -0,0 +1,40 @@ +use serde::{Deserialize, Serialize}; + +pub const fn enabled() -> bool { + false +} + +pub const fn url() -> Option { + None +} + +pub const fn username() -> Option { + None +} + +pub const fn password() -> Option { + None +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "enabled")] + pub enabled: bool, + #[serde(default = "url")] + pub url: Option, + #[serde(default = "username")] + pub username: Option, + #[serde(default = "password")] + pub password: Option, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: enabled(), + url: url(), + username: username(), + password: password(), + } + } +} diff --git a/watch/src/blockprint/database.rs b/watch/src/blockprint/database.rs new file mode 100644 index 000000000..afa35c81b --- /dev/null +++ b/watch/src/blockprint/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + self, + schema::{beacon_blocks, blockprint}, + watch_types::{WatchHash, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::sql_types::{Integer, Text}; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Instant; + +type WatchConsensusClient = String; +pub fn list_consensus_clients() -> Vec { + vec![ + "Lighthouse".to_string(), + "Lodestar".to_string(), + "Nimbus".to_string(), + "Prysm".to_string(), + "Teku".to_string(), + "Unknown".to_string(), + ] +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = blockprint)] +pub struct WatchBlockprint { + pub slot: WatchSlot, + pub best_guess: WatchConsensusClient, +} + +#[derive(Debug, QueryableByName, diesel::FromSqlRow)] +pub struct WatchValidatorBlockprint { + #[diesel(sql_type = Integer)] + pub proposer_index: i32, + #[diesel(sql_type = Text)] + pub best_guess: WatchConsensusClient, + #[diesel(sql_type = Integer)] + pub slot: WatchSlot, +} + +/// Insert a batch of values into the `blockprint` table. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_batch_blockprint( + conn: &mut PgConn, + prints: Vec, +) -> Result<(), Error> { + use self::blockprint::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(blockprint) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `blockprint` table where `slot` is minimum. +pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: lowest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `blockprint` table where `slot` is maximum. +pub fn get_highest_blockprint(conn: &mut PgConn) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: highest, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `root_query`. +pub fn get_blockprint_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(blockprint); + + let result = join + .select((slot, best_guess)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`. +pub fn get_blockprint_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::blockprint::dsl::*; + let timer = Instant::now(); + + let result = blockprint + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding +/// row in `blockprint`. +#[allow(dead_code)] +pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result>, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root, slot}; + use self::blockprint::dsl::blockprint; + + let join = beacon_blocks.left_join(blockprint); + + let result = join + .select(slot) + .filter(root.is_null()) + .order_by(slot.desc()) + .nullable() + .load::>(conn)?; + + Ok(result) +} + +/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before +/// `target_slot`. +/// Inserts `"Unknown" if no prior proposals exist. +pub fn construct_validator_blockprints_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + use self::blockprint::dsl::{blockprint, slot}; + + let total_validators = + database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)? + as usize; + + let mut blockprint_map = HashMap::with_capacity(total_validators); + + let latest_proposals = + database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?; + + let latest_proposal_slots: Vec = latest_proposals.clone().into_keys().collect(); + + let result = blockprint + .filter(slot.eq_any(latest_proposal_slots)) + .load::(conn)?; + + // Insert the validators which have available blockprints. + for print in result { + if let Some(proposer) = latest_proposals.get(&print.slot) { + blockprint_map.insert(*proposer, print.best_guess); + } + } + + // Insert the rest of the unknown validators. + for validator_index in 0..total_validators { + blockprint_map + .entry(validator_index as i32) + .or_insert_with(|| "Unknown".to_string()); + } + + Ok(blockprint_map) +} + +/// Counts the number of occurances of each `client` present in the `validators` table at or before some +/// `target_slot`. +pub fn get_validators_clients_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result, Error> { + let mut client_map: HashMap = HashMap::new(); + + // This includes all validators which were activated at or before `target_slot`. + let validator_blockprints = + construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?; + + for client in list_consensus_clients() { + let count = validator_blockprints + .iter() + .filter(|(_, v)| (*v).clone() == client) + .count(); + client_map.insert(client, count); + } + + Ok(client_map) +} diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs new file mode 100644 index 000000000..b8107e5bf --- /dev/null +++ b/watch/src/blockprint/mod.rs @@ -0,0 +1,149 @@ +pub mod database; +pub mod server; +pub mod updater; + +mod config; + +use crate::database::WatchSlot; + +use eth2::SensitiveUrl; +use reqwest::{Client, Response, Url}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::time::Duration; +use types::Slot; + +pub use config::Config; +pub use database::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + list_consensus_clients, WatchBlockprint, +}; +pub use server::blockprint_routes; + +const TIMEOUT: Duration = Duration::from_secs(50); + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), + BlockprintNotSynced, + Other(String), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchBlockprintClient { + pub client: Client, + pub server: SensitiveUrl, + pub username: Option, + pub password: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintSyncingResponse { + pub greatest_block_slot: Slot, + pub synced: bool, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct BlockprintResponse { + pub proposer_index: i32, + pub slot: Slot, + pub best_guess_single: String, +} + +impl WatchBlockprintClient { + async fn get(&self, url: Url) -> Result { + let mut builder = self.client.get(url).timeout(TIMEOUT); + if let Some(username) = &self.username { + builder = builder.basic_auth(username, self.password.as_ref()); + } + let response = builder.send().await.map_err(Error::Reqwest)?; + + if !response.status().is_success() { + return Err(Error::Other(response.text().await?)); + } + + Ok(response) + } + + // Returns the `greatest_block_slot` as reported by the Blockprint server. + // Will error if the Blockprint server is not synced. + #[allow(dead_code)] + pub async fn ensure_synced(&self) -> Result { + let url = self.server.full.join("sync/")?.join("status")?; + + let response = self.get(url).await?; + + let result = response.json::().await?; + if !result.synced { + return Err(Error::BlockprintNotSynced); + } + + Ok(result.greatest_block_slot) + } + + // Pulls the latest blockprint for all validators. + #[allow(dead_code)] + pub async fn blockprint_all_validators( + &self, + highest_validator: i32, + ) -> Result, Error> { + let url = self + .server + .full + .join("validator/")? + .join("blocks/")? + .join("latest")?; + + let response = self.get(url).await?; + + let mut result = response.json::>().await?; + result.retain(|print| print.proposer_index <= highest_validator); + + let mut map: HashMap = HashMap::with_capacity(result.len()); + for print in result { + map.insert(print.proposer_index, print.best_guess_single); + } + + Ok(map) + } + + // Construct a request to the Blockprint server for a range of slots between `start_slot` and + // `end_slot`. + pub async fn get_blockprint( + &self, + start_slot: Slot, + end_slot: Slot, + ) -> Result, Error> { + let url = self + .server + .full + .join("blocks/")? + .join(&format!("{start_slot}/{end_slot}"))?; + + let response = self.get(url).await?; + + let result = response + .json::>() + .await? + .iter() + .map(|response| WatchBlockprint { + slot: WatchSlot::from_slot(response.slot), + best_guess: response.best_guess_single.clone(), + }) + .collect(); + Ok(result) + } +} diff --git a/watch/src/blockprint/server.rs b/watch/src/blockprint/server.rs new file mode 100644 index 000000000..488af1571 --- /dev/null +++ b/watch/src/blockprint/server.rs @@ -0,0 +1,31 @@ +use crate::blockprint::database::{ + get_blockprint_by_root, get_blockprint_by_slot, WatchBlockprint, +}; +use crate::database::{get_connection, PgPool, WatchHash, WatchSlot}; +use crate::server::Error; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use eth2::types::BlockId; +use std::str::FromStr; + +pub async fn get_blockprint( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(get_blockprint_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(get_blockprint_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub fn blockprint_routes() -> Router { + Router::new().route("/v1/blocks/:block/blockprint", get(get_blockprint)) +} diff --git a/watch/src/blockprint/updater.rs b/watch/src/blockprint/updater.rs new file mode 100644 index 000000000..28c318455 --- /dev/null +++ b/watch/src/blockprint/updater.rs @@ -0,0 +1,172 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT: u64 = 1600; + +impl UpdateHandler { + /// Forward fills the `blockprint` table starting from the entry with the + /// highest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> highest filled `blockprint` + 1 (or lowest beacon block) + /// `end_slot` -> highest beacon block + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn fill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + + // Get the slot of the highest entry in the `blockprint` table. + let mut start_slot = if let Some(highest_filled_slot) = + database::get_highest_blockprint(&mut conn)?.map(|print| print.slot) + { + highest_filled_slot.as_slot() + 1 + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(lowest_beacon_block) = + database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot) + { + lowest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not fill the `blockprint` table. + warn!("Refusing to fill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1; + } + + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + let mut end_slot = highest_beacon_block.as_slot(); + + if start_slot > end_slot { + debug!("Blockprint is up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in either + // `blockprint` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + + Ok(()) + } + + /// Backfill the `blockprint` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `get_blockprint` API with: + /// `start_slot` -> lowest_beacon_block + /// `end_slot` -> lowest filled `blockprint` - 1 (or highest beacon block) + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + pub async fn backfill_blockprint(&mut self) -> Result<(), Error> { + // Ensure blockprint in enabled. + if let Some(blockprint_client) = &self.blockprint { + let mut conn = database::get_connection(&self.pool)?; + let max_blockprint_backfill = + self.config.max_backfill_size_epochs * self.slots_per_epoch; + + // Get the slot of the lowest entry in the `blockprint` table. + let end_slot = if let Some(lowest_filled_slot) = + database::get_lowest_blockprint(&mut conn)?.map(|print| print.slot) + { + lowest_filled_slot.as_slot().saturating_sub(1_u64) + } else { + // No entries in the `blockprint` table. Use `beacon_blocks` instead. + if let Some(highest_beacon_block) = + database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot) + { + highest_beacon_block.as_slot() + } else { + // There are no blocks in the database, do not backfill the `blockprint` table. + warn!("Refusing to backfill blockprint as there are no blocks in the database"); + return Ok(()); + } + }; + + if end_slot <= 1 { + debug!("Blockprint backfill is complete"); + return Ok(()); + } + + if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? { + let mut start_slot = lowest_block_slot.slot.as_slot(); + + if start_slot >= end_slot { + debug!("Blockprint are up to date with the base of the database"); + return Ok(()); + } + + // Ensure that the request range does not exceed `max_blockprint_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`. + if start_slot < end_slot.saturating_sub(max_blockprint_backfill) { + start_slot = end_slot.saturating_sub(max_blockprint_backfill) + } + + if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) { + start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) + } + + // The `blockprint` API cannot accept `start_slot == 0`. + if start_slot == 0 { + start_slot += 1 + } + + let mut prints = blockprint_client + .get_blockprint(start_slot, end_slot) + .await?; + + // Ensure the prints returned from blockprint are for slots which exist in the + // `beacon_blocks` table. + prints.retain(|print| { + database::get_beacon_block_by_slot(&mut conn, print.slot) + .ok() + .flatten() + .is_some() + }); + + database::insert_batch_blockprint(&mut conn, prints)?; + } else { + // There are no blocks in the `beacon_blocks` database, but there are entries in the `blockprint` + // table. This is a critical failure. It usually means someone has manually tampered with the + // database tables and should not occur during normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + } + Ok(()) + } +} diff --git a/watch/src/cli.rs b/watch/src/cli.rs new file mode 100644 index 000000000..a8e5f3716 --- /dev/null +++ b/watch/src/cli.rs @@ -0,0 +1,55 @@ +use crate::{config::Config, logger, server, updater}; +use clap::{App, Arg}; +use tokio::sync::oneshot; + +pub const SERVE: &str = "serve"; +pub const RUN_UPDATER: &str = "run-updater"; +pub const CONFIG: &str = "config"; + +fn run_updater<'a, 'b>() -> App<'a, 'b> { + App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp) +} + +fn serve<'a, 'b>() -> App<'a, 'b> { + App::new(SERVE).setting(clap::AppSettings::ColoredHelp) +} + +pub fn app<'a, 'b>() -> App<'a, 'b> { + App::new("beacon_watch_daemon") + .author("Sigma Prime ") + .setting(clap::AppSettings::ColoredHelp) + .arg( + Arg::with_name(CONFIG) + .long(CONFIG) + .value_name("PATH_TO_CONFIG") + .help("Path to configuration file") + .takes_value(true) + .global(true), + ) + .subcommand(run_updater()) + .subcommand(serve()) +} + +pub async fn run() -> Result<(), String> { + let matches = app().get_matches(); + + let config = match matches.value_of(CONFIG) { + Some(path) => Config::load_from_file(path.to_string())?, + None => Config::default(), + }; + + logger::init_logger(&config.log_level); + + match matches.subcommand() { + (RUN_UPDATER, Some(_)) => updater::run_updater(config) + .await + .map_err(|e| format!("Failure: {:?}", e)), + (SERVE, Some(_)) => { + let (_shutdown_tx, shutdown_rx) = oneshot::channel(); + server::serve(config, shutdown_rx) + .await + .map_err(|e| format!("Failure: {:?}", e)) + } + _ => Err("Unsupported subcommand. See --help".into()), + } +} diff --git a/watch/src/client.rs b/watch/src/client.rs new file mode 100644 index 000000000..43aaccde3 --- /dev/null +++ b/watch/src/client.rs @@ -0,0 +1,178 @@ +use crate::block_packing::WatchBlockPacking; +use crate::block_rewards::WatchBlockRewards; +use crate::database::models::{ + WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator, +}; +use crate::suboptimal_attestations::WatchAttestation; + +use eth2::types::BlockId; +use reqwest::Client; +use serde::de::DeserializeOwned; +use types::Hash256; +use url::Url; + +#[derive(Debug)] +pub enum Error { + Reqwest(reqwest::Error), + Url(url::ParseError), +} + +impl From for Error { + fn from(e: reqwest::Error) -> Self { + Error::Reqwest(e) + } +} + +impl From for Error { + fn from(e: url::ParseError) -> Self { + Error::Url(e) + } +} + +pub struct WatchHttpClient { + pub client: Client, + pub server: Url, +} + +impl WatchHttpClient { + async fn get_opt(&self, url: Url) -> Result, Error> { + let response = self.client.get(url).send().await?; + + if response.status() == 404 { + Ok(None) + } else { + response + .error_for_status()? + .json() + .await + .map_err(Into::into) + } + } + + pub async fn get_beacon_blocks( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&block_id.to_string())?; + + self.get_opt(url).await + } + + pub async fn get_lowest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_canonical_slot(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("slots/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_lowest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("lowest")?; + + self.get_opt(url).await + } + + pub async fn get_highest_beacon_block(&self) -> Result, Error> { + let url = self.server.join("v1/")?.join("blocks/")?.join("highest")?; + + self.get_opt(url).await + } + + pub async fn get_next_beacon_block( + &self, + parent: Hash256, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{parent:?}/"))? + .join("next")?; + + self.get_opt(url).await + } + + pub async fn get_validator_by_index( + &self, + index: i32, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join(&format!("{index}"))?; + + self.get_opt(url).await + } + + pub async fn get_proposer_info( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("proposer")?; + + self.get_opt(url).await + } + + pub async fn get_block_reward( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("rewards")?; + + self.get_opt(url).await + } + + pub async fn get_block_packing( + &self, + block_id: BlockId, + ) -> Result, Error> { + let url = self + .server + .join("v1/")? + .join("blocks/")? + .join(&format!("{block_id}/"))? + .join("packing")?; + + self.get_opt(url).await + } + + pub async fn get_all_validators(&self) -> Result>, Error> { + let url = self.server.join("v1/")?.join("validators/")?.join("all")?; + + self.get_opt(url).await + } + + pub async fn get_attestations( + &self, + epoch: i32, + ) -> Result>, Error> { + let url = self + .server + .join("v1/")? + .join("validators/")? + .join("all/")? + .join("attestation/")? + .join(&format!("{epoch}"))?; + + self.get_opt(url).await + } +} diff --git a/watch/src/config.rs b/watch/src/config.rs new file mode 100644 index 000000000..4e61f9df9 --- /dev/null +++ b/watch/src/config.rs @@ -0,0 +1,50 @@ +use crate::blockprint::Config as BlockprintConfig; +use crate::database::Config as DatabaseConfig; +use crate::server::Config as ServerConfig; +use crate::updater::Config as UpdaterConfig; + +use serde::{Deserialize, Serialize}; +use std::fs::File; + +pub const LOG_LEVEL: &str = "debug"; + +fn log_level() -> String { + LOG_LEVEL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default)] + pub blockprint: BlockprintConfig, + #[serde(default)] + pub database: DatabaseConfig, + #[serde(default)] + pub server: ServerConfig, + #[serde(default)] + pub updater: UpdaterConfig, + /// The minimum severity for log messages. + #[serde(default = "log_level")] + pub log_level: String, +} + +impl Default for Config { + fn default() -> Self { + Self { + blockprint: BlockprintConfig::default(), + database: DatabaseConfig::default(), + server: ServerConfig::default(), + updater: UpdaterConfig::default(), + log_level: log_level(), + } + } +} + +impl Config { + pub fn load_from_file(path_to_file: String) -> Result { + let file = + File::open(path_to_file).map_err(|e| format!("Error reading config file: {:?}", e))?; + let config: Config = serde_yaml::from_reader(file) + .map_err(|e| format!("Error parsing config file: {:?}", e))?; + Ok(config) + } +} diff --git a/watch/src/database/compat.rs b/watch/src/database/compat.rs new file mode 100644 index 000000000..b8cda0b21 --- /dev/null +++ b/watch/src/database/compat.rs @@ -0,0 +1,49 @@ +//! Implementations of PostgreSQL compatibility traits. +use crate::database::watch_types::{WatchHash, WatchPK, WatchSlot}; +use diesel::deserialize::{self, FromSql}; +use diesel::pg::{Pg, PgValue}; +use diesel::serialize::{self, Output, ToSql}; +use diesel::sql_types::{Binary, Integer}; + +use std::convert::TryFrom; + +macro_rules! impl_to_from_sql_int { + ($type:ty) => { + impl ToSql for $type + where + i32: ToSql, + { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let v = i32::try_from(self.as_u64()).map_err(|e| Box::new(e))?; + >::to_sql(&v, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Ok(Self::new(i32::from_sql(bytes)? as u64)) + } + } + }; +} + +macro_rules! impl_to_from_sql_binary { + ($type:ty) => { + impl ToSql for $type { + fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result { + let b = self.as_bytes(); + <&[u8] as ToSql>::to_sql(&b, &mut out.reborrow()) + } + } + + impl FromSql for $type { + fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { + Self::from_bytes(bytes.as_bytes()).map_err(|e| e.to_string().into()) + } + } + }; +} + +impl_to_from_sql_int!(WatchSlot); +impl_to_from_sql_binary!(WatchHash); +impl_to_from_sql_binary!(WatchPK); diff --git a/watch/src/database/config.rs b/watch/src/database/config.rs new file mode 100644 index 000000000..dc0c70832 --- /dev/null +++ b/watch/src/database/config.rs @@ -0,0 +1,74 @@ +use serde::{Deserialize, Serialize}; + +pub const USER: &str = "postgres"; +pub const PASSWORD: &str = "postgres"; +pub const DBNAME: &str = "dev"; +pub const DEFAULT_DBNAME: &str = "postgres"; +pub const HOST: &str = "localhost"; +pub const fn port() -> u16 { + 5432 +} +pub const fn connect_timeout_millis() -> u64 { + 2_000 // 2s +} + +fn user() -> String { + USER.to_string() +} + +fn password() -> String { + PASSWORD.to_string() +} + +fn dbname() -> String { + DBNAME.to_string() +} + +fn default_dbname() -> String { + DEFAULT_DBNAME.to_string() +} + +fn host() -> String { + HOST.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "user")] + pub user: String, + #[serde(default = "password")] + pub password: String, + #[serde(default = "dbname")] + pub dbname: String, + #[serde(default = "default_dbname")] + pub default_dbname: String, + #[serde(default = "host")] + pub host: String, + #[serde(default = "port")] + pub port: u16, + #[serde(default = "connect_timeout_millis")] + pub connect_timeout_millis: u64, +} + +impl Default for Config { + fn default() -> Self { + Self { + user: user(), + password: password(), + dbname: dbname(), + default_dbname: default_dbname(), + host: host(), + port: port(), + connect_timeout_millis: connect_timeout_millis(), + } + } +} + +impl Config { + pub fn build_database_url(&self) -> String { + format!( + "postgres://{}:{}@{}:{}/{}", + self.user, self.password, self.host, self.port, self.dbname + ) + } +} diff --git a/watch/src/database/error.rs b/watch/src/database/error.rs new file mode 100644 index 000000000..8c5088fa1 --- /dev/null +++ b/watch/src/database/error.rs @@ -0,0 +1,55 @@ +use bls::Error as BlsError; +use diesel::result::{ConnectionError, Error as PgError}; +use eth2::SensitiveError; +use r2d2::Error as PoolError; +use std::fmt; +use types::BeaconStateError; + +#[derive(Debug)] +pub enum Error { + BeaconState(BeaconStateError), + Database(PgError), + DatabaseCorrupted, + InvalidSig(BlsError), + PostgresConnection(ConnectionError), + Pool(PoolError), + SensitiveUrl(SensitiveError), + InvalidRoot, + Other(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Error::BeaconState(e) + } +} + +impl From for Error { + fn from(e: ConnectionError) -> Self { + Error::PostgresConnection(e) + } +} + +impl From for Error { + fn from(e: PgError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: PoolError) -> Self { + Error::Pool(e) + } +} + +impl From for Error { + fn from(e: BlsError) -> Self { + Error::InvalidSig(e) + } +} diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs new file mode 100644 index 000000000..b9a7a900a --- /dev/null +++ b/watch/src/database/mod.rs @@ -0,0 +1,782 @@ +mod config; +mod error; + +pub mod compat; +pub mod models; +pub mod schema; +pub mod utils; +pub mod watch_types; + +use self::schema::{ + active_config, beacon_blocks, canonical_slots, proposer_info, suboptimal_attestations, + validators, +}; + +use diesel::dsl::max; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel::r2d2::{Builder, ConnectionManager, Pool, PooledConnection}; +use diesel::upsert::excluded; +use log::{debug, info}; +use std::collections::HashMap; +use std::time::Instant; +use types::{EthSpec, SignedBeaconBlock}; + +pub use self::error::Error; +pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; +pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; + +pub use crate::block_rewards::{ + get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, + get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, + WatchBlockRewards, +}; + +pub use crate::block_packing::{ + get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, + get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, + WatchBlockPacking, +}; + +pub use crate::suboptimal_attestations::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use crate::blockprint::{ + get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, + get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, + WatchBlockprint, +}; + +pub use config::Config; + +/// Batch inserts cannot exceed a certain size. +/// See https://github.com/diesel-rs/diesel/issues/2414. +/// For some reason, this seems to translate to 65535 / 5 (13107) records. +pub const MAX_SIZE_BATCH_INSERT: usize = 13107; + +pub type PgPool = Pool>; +pub type PgConn = PooledConnection>; + +/// Connect to a Postgresql database and build a connection pool. +pub fn build_connection_pool(config: &Config) -> Result { + let database_url = config.clone().build_database_url(); + info!("Building connection pool at: {database_url}"); + let pg = ConnectionManager::::new(&database_url); + Builder::new().build(pg).map_err(Error::Pool) +} + +/// Retrieve an idle connection from the pool. +pub fn get_connection(pool: &PgPool) -> Result { + pool.get().map_err(Error::Pool) +} + +/// Insert the active config into the database. This is used to check if the connected beacon node +/// is compatible with the database. These values will not change (except +/// `current_blockprint_checkpoint`). +pub fn insert_active_config( + conn: &mut PgConn, + new_config_name: String, + new_slots_per_epoch: u64, +) -> Result<(), Error> { + use self::active_config::dsl::*; + + diesel::insert_into(active_config) + .values(&vec![( + id.eq(1), + config_name.eq(new_config_name), + slots_per_epoch.eq(new_slots_per_epoch as i32), + )]) + .on_conflict_do_nothing() + .execute(conn)?; + + Ok(()) +} + +/// Get the active config from the database. +pub fn get_active_config(conn: &mut PgConn) -> Result, Error> { + use self::active_config::dsl::*; + Ok(active_config + .select((config_name, slots_per_epoch)) + .filter(id.eq(1)) + .first::<(String, i32)>(conn) + .optional()?) +} + +/// +/// INSERT statements +/// + +/// Inserts a single row into the `canonical_slots` table. +/// If `new_slot.beacon_block` is `None`, the value in the row will be `null`. +/// +/// On a conflict, it will do nothing, leaving the old value. +pub fn insert_canonical_slot(conn: &mut PgConn, new_slot: WatchCanonicalSlot) -> Result<(), Error> { + diesel::insert_into(canonical_slots::table) + .values(&new_slot) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Canonical slot inserted: {}", new_slot.slot); + Ok(()) +} + +pub fn insert_beacon_block( + conn: &mut PgConn, + block: SignedBeaconBlock, + root: WatchHash, +) -> Result<(), Error> { + use self::canonical_slots::dsl::{beacon_block, slot as canonical_slot}; + + let block_message = block.message(); + + // Pull out relevant values from the block. + let slot = WatchSlot::from_slot(block.slot()); + let parent_root = WatchHash::from_hash(block.parent_root()); + let proposer_index = block_message.proposer_index() as i32; + let graffiti = block_message.body().graffiti().as_utf8_lossy(); + let attestation_count = block_message.body().attestations().len() as i32; + + let full_payload = block_message.execution_payload().ok(); + + let transaction_count: Option = if let Some(bellatrix_payload) = + full_payload.and_then(|payload| payload.execution_payload_merge().ok()) + { + Some(bellatrix_payload.transactions.len() as i32) + } else { + full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.transactions.len() as i32) + }; + + let withdrawal_count: Option = full_payload + .and_then(|payload| payload.execution_payload_capella().ok()) + .map(|payload| payload.withdrawals.len() as i32); + + let block_to_add = WatchBeaconBlock { + slot, + root, + parent_root, + attestation_count, + transaction_count, + withdrawal_count, + }; + + let proposer_info_to_add = WatchProposerInfo { + slot, + proposer_index, + graffiti, + }; + + // Update the canonical slots table. + diesel::update(canonical_slots::table) + .set(beacon_block.eq(root)) + .filter(canonical_slot.eq(slot)) + // Do not overwrite the value if it already exists. + .filter(beacon_block.is_null()) + .execute(conn)?; + + diesel::insert_into(beacon_blocks::table) + .values(block_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + diesel::insert_into(proposer_info::table) + .values(proposer_info_to_add) + .on_conflict_do_nothing() + .execute(conn)?; + + debug!("Beacon block inserted at slot: {slot}, root: {root}, parent: {parent_root}"); + Ok(()) +} + +/// Insert a validator into the `validators` table +/// +/// On a conflict, it will only overwrite `status`, `activation_epoch` and `exit_epoch`. +pub fn insert_validator(conn: &mut PgConn, validator: WatchValidator) -> Result<(), Error> { + use self::validators::dsl::*; + let new_index = validator.index; + let new_public_key = validator.public_key; + + diesel::insert_into(validators) + .values(validator) + .on_conflict(index) + .do_update() + .set(( + status.eq(excluded(status)), + activation_epoch.eq(excluded(activation_epoch)), + exit_epoch.eq(excluded(exit_epoch)), + )) + .execute(conn)?; + + debug!("Validator inserted, index: {new_index}, public_key: {new_public_key}"); + Ok(()) +} + +/// Insert a batch of values into the `validators` table. +/// +/// On a conflict, it will do nothing. +/// +/// Should not be used when updating validators. +/// Validators should be updated through the `insert_validator` function which contains the correct +/// `on_conflict` clauses. +pub fn insert_batch_validators( + conn: &mut PgConn, + all_validators: Vec, +) -> Result<(), Error> { + use self::validators::dsl::*; + + let mut count = 0; + + for chunk in all_validators.chunks(1000) { + count += diesel::insert_into(validators) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + debug!("Validators inserted, count: {count}"); + Ok(()) +} + +/// +/// SELECT statements +/// + +/// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`. +pub fn get_canonical_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `canonical_slots` table corresponding to a given `root_query`. +/// Only returns the non-skipped slot which matches `root`. +pub fn get_canonical_slot_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(root.eq(root_query)) + .filter(skipped.eq(false)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical root requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `root` from a single row of the `canonical_slots` table corresponding to a given +/// `slot_query`. +#[allow(dead_code)] +pub fn get_root_at_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .select(root) + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot`. +pub fn get_lowest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value +/// of `slot` and where `skipped == false`. +pub fn get_lowest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: lowest_non_skipped, time taken: {time_taken:?})"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot`. +pub fn get_highest_canonical_slot(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value +/// of `slot` and where `skipped == false`. +pub fn get_highest_non_skipped_canonical_slot( + conn: &mut PgConn, +) -> Result, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Canonical slot requested: highest_non_skipped, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `canonical_slots` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_canonical_slots_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::canonical_slots::dsl::*; + let timer = Instant::now(); + + let result = canonical_slots + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Canonical slots by range requested, start_slot: {}, end_slot: {}, time_taken: {:?}", + start_slot.as_u64(), + end_slot.as_u64(), + time_taken + ); + Ok(result) +} + +/// Selects `root` from all rows of the `canonical_slots` table which have `beacon_block == null` +/// and `skipped == false` +pub fn get_unknown_canonical_blocks(conn: &mut PgConn) -> Result, Error> { + use self::canonical_slots::dsl::*; + + let result = canonical_slots + .select(root) + .filter(beacon_block.is_null()) + .filter(skipped.eq(false)) + .order_by(slot.desc()) + .load::(conn)?; + + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is minimum. +pub fn get_lowest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.asc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: lowest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `slot` is maximum. +pub fn get_highest_beacon_block(conn: &mut PgConn) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .order_by(slot.desc()) + .limit(1) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon block requested: highest, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `root_query`. +pub fn get_beacon_block_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `beacon_blocks` table corresponding to a given `slot_query`. +pub fn get_beacon_block_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + let time_taken = timer.elapsed(); + debug!("Beacon block requested: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects the row from the `beacon_blocks` table where `parent_root` equals the given `parent`. +/// This fetches the next block in the database. +/// +/// Will return `Ok(None)` if there are no matching blocks (e.g. the tip of the chain). +pub fn get_beacon_block_with_parent( + conn: &mut PgConn, + parent: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(parent_root.eq(parent)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Next beacon block requested: {parent}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Select all rows of the `beacon_blocks` table where `slot >= `start_slot && slot <= +/// `end_slot`. +pub fn get_beacon_blocks_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::beacon_blocks::dsl::*; + let timer = Instant::now(); + + let result = beacon_blocks + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Beacon blocks by range requested, start_slot: {start_slot}, end_slot: {end_slot}, time_taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `root_query`. +pub fn get_proposer_info_by_root( + conn: &mut PgConn, + root_query: WatchHash, +) -> Result, Error> { + use self::beacon_blocks::dsl::{beacon_blocks, root}; + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let join = beacon_blocks.inner_join(proposer_info); + + let result = join + .select((slot, proposer_index, graffiti)) + .filter(root.eq(root_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for block: {root_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +pub fn get_proposer_info_by_slot( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.eq(slot_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Proposer info requested for slot: {slot_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects multiple rows of the `proposer_info` table between `start_slot` and `end_slot`. +/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`. +#[allow(dead_code)] +pub fn get_proposer_info_by_range( + conn: &mut PgConn, + start_slot: WatchSlot, + end_slot: WatchSlot, +) -> Result>, Error> { + use self::proposer_info::dsl::*; + let timer = Instant::now(); + + let result = proposer_info + .filter(slot.ge(start_slot)) + .filter(slot.le(end_slot)) + .load::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!( + "Proposer info requested for range: {start_slot} to {end_slot}, time taken: {time_taken:?}" + ); + Ok(result) +} + +pub fn get_validators_latest_proposer_info( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let proposers = proposer_info + .filter(proposer_index.eq_any(indices_query)) + .load::(conn)?; + + let mut result = HashMap::new(); + for proposer in proposers { + result + .entry(proposer.proposer_index) + .or_insert_with(|| proposer.clone()); + let entry = result + .get_mut(&proposer.proposer_index) + .ok_or_else(|| Error::Other("An internal error occured".to_string()))?; + if proposer.slot > entry.slot { + entry.slot = proposer.slot + } + } + + Ok(result) +} + +/// Selects the max(`slot`) and `proposer_index` of each unique index in the +/// `proposer_info` table and returns them formatted as a `HashMap`. +/// Only returns rows which have `slot <= target_slot`. +/// +/// Ideally, this would return the full row, but I have not found a way to do that without using +/// a much more expensive SQL query. +pub fn get_all_validators_latest_proposer_info_at_slot( + conn: &mut PgConn, + target_slot: WatchSlot, +) -> Result, Error> { + use self::proposer_info::dsl::*; + + let latest_proposals: Vec<(i32, Option)> = proposer_info + .group_by(proposer_index) + .select((proposer_index, max(slot))) + .filter(slot.le(target_slot)) + .load::<(i32, Option)>(conn)?; + + let mut result = HashMap::new(); + + for proposal in latest_proposals { + if let Some(latest_slot) = proposal.1 { + result.insert(latest_slot, proposal.0); + } + } + + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `validator_index_query`. +pub fn get_validator_by_index( + conn: &mut PgConn, + validator_index_query: i32, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(index.eq(validator_index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {validator_index_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `validators` table corresponding to a given +/// `public_key_query`. +pub fn get_validator_by_public_key( + conn: &mut PgConn, + public_key_query: WatchPK, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators + .filter(public_key.eq(public_key_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Validator requested: {public_key_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects all rows from the `validators` table which have an `index` contained in +/// the `indices_query`. +#[allow(dead_code)] +pub fn get_validators_by_indices( + conn: &mut PgConn, + indices_query: Vec, +) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let query_len = indices_query.len(); + let result = validators + .filter(index.eq_any(indices_query)) + .load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("{query_len} validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +// Selects all rows from the `validators` table. +pub fn get_all_validators(conn: &mut PgConn) -> Result, Error> { + use self::validators::dsl::*; + let timer = Instant::now(); + + let result = validators.load::(conn)?; + + let time_taken = timer.elapsed(); + debug!("All validators requested, time taken: {time_taken:?}"); + Ok(result) +} + +/// Counts the number of rows in the `validators` table. +#[allow(dead_code)] +pub fn count_validators(conn: &mut PgConn) -> Result { + use self::validators::dsl::*; + + validators.count().get_result(conn).map_err(Error::Database) +} + +/// Counts the number of rows in the `validators` table where +/// `activation_epoch <= target_slot.epoch()`. +pub fn count_validators_activated_before_slot( + conn: &mut PgConn, + target_slot: WatchSlot, + slots_per_epoch: u64, +) -> Result { + use self::validators::dsl::*; + + let target_epoch = target_slot.epoch(slots_per_epoch); + + validators + .count() + .filter(activation_epoch.le(target_epoch.as_u64() as i32)) + .get_result(conn) + .map_err(Error::Database) +} + +/// +/// DELETE statements. +/// + +/// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`. +/// +/// Due to the ON DELETE CASCADE clause present in the database migration SQL, deleting rows from +/// `canonical_slots` will delete all corresponding rows in `beacon_blocks, `block_rewards`, +/// `block_packing` and `proposer_info`. +pub fn delete_canonical_slots_above( + conn: &mut PgConn, + slot_query: WatchSlot, +) -> Result { + use self::canonical_slots::dsl::*; + + let result = diesel::delete(canonical_slots) + .filter(slot.gt(slot_query)) + .execute(conn)?; + + debug!("Deleted canonical slots above {slot_query}: {result} rows deleted"); + Ok(result) +} + +/// Deletes all rows of the `suboptimal_attestations` table which have `epoch_start_slot` greater +/// than `epoch_start_slot_query`. +pub fn delete_suboptimal_attestations_above( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result { + use self::suboptimal_attestations::dsl::*; + + let result = diesel::delete(suboptimal_attestations) + .filter(epoch_start_slot.gt(epoch_start_slot_query)) + .execute(conn)?; + + debug!("Deleted attestations above: {epoch_start_slot_query}, rows deleted: {result}"); + Ok(result) +} diff --git a/watch/src/database/models.rs b/watch/src/database/models.rs new file mode 100644 index 000000000..f42444d66 --- /dev/null +++ b/watch/src/database/models.rs @@ -0,0 +1,67 @@ +use crate::database::{ + schema::{beacon_blocks, canonical_slots, proposer_info, validators}, + watch_types::{WatchHash, WatchPK, WatchSlot}, +}; +use diesel::{Insertable, Queryable}; +use serde::{Deserialize, Serialize}; +use std::hash::{Hash, Hasher}; + +pub type WatchEpoch = i32; + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = canonical_slots)] +pub struct WatchCanonicalSlot { + pub slot: WatchSlot, + pub root: WatchHash, + pub skipped: bool, + pub beacon_block: Option, +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = beacon_blocks)] +pub struct WatchBeaconBlock { + pub slot: WatchSlot, + pub root: WatchHash, + pub parent_root: WatchHash, + pub attestation_count: i32, + pub transaction_count: Option, + pub withdrawal_count: Option, +} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = validators)] +pub struct WatchValidator { + pub index: i32, + pub public_key: WatchPK, + pub status: String, + pub activation_epoch: Option, + pub exit_epoch: Option, +} + +// Implement a minimal version of `Hash` and `Eq` so that we know if a validator status has changed. +impl Hash for WatchValidator { + fn hash(&self, state: &mut H) { + self.index.hash(state); + self.status.hash(state); + self.activation_epoch.hash(state); + self.exit_epoch.hash(state); + } +} + +impl PartialEq for WatchValidator { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + && self.status == other.status + && self.activation_epoch == other.activation_epoch + && self.exit_epoch == other.exit_epoch + } +} +impl Eq for WatchValidator {} + +#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = proposer_info)] +pub struct WatchProposerInfo { + pub slot: WatchSlot, + pub proposer_index: i32, + pub graffiti: String, +} diff --git a/watch/src/database/schema.rs b/watch/src/database/schema.rs new file mode 100644 index 000000000..32f22d506 --- /dev/null +++ b/watch/src/database/schema.rs @@ -0,0 +1,102 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + active_config (id) { + id -> Int4, + config_name -> Text, + slots_per_epoch -> Int4, + } +} + +diesel::table! { + beacon_blocks (slot) { + slot -> Int4, + root -> Bytea, + parent_root -> Bytea, + attestation_count -> Int4, + transaction_count -> Nullable, + withdrawal_count -> Nullable, + } +} + +diesel::table! { + block_packing (slot) { + slot -> Int4, + available -> Int4, + included -> Int4, + prior_skip_slots -> Int4, + } +} + +diesel::table! { + block_rewards (slot) { + slot -> Int4, + total -> Int4, + attestation_reward -> Int4, + sync_committee_reward -> Int4, + } +} + +diesel::table! { + blockprint (slot) { + slot -> Int4, + best_guess -> Text, + } +} + +diesel::table! { + canonical_slots (slot) { + slot -> Int4, + root -> Bytea, + skipped -> Bool, + beacon_block -> Nullable, + } +} + +diesel::table! { + proposer_info (slot) { + slot -> Int4, + proposer_index -> Int4, + graffiti -> Text, + } +} + +diesel::table! { + suboptimal_attestations (epoch_start_slot, index) { + epoch_start_slot -> Int4, + index -> Int4, + source -> Bool, + head -> Bool, + target -> Bool, + } +} + +diesel::table! { + validators (index) { + index -> Int4, + public_key -> Bytea, + status -> Text, + activation_epoch -> Nullable, + exit_epoch -> Nullable, + } +} + +diesel::joinable!(block_packing -> beacon_blocks (slot)); +diesel::joinable!(block_rewards -> beacon_blocks (slot)); +diesel::joinable!(blockprint -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> beacon_blocks (slot)); +diesel::joinable!(proposer_info -> validators (proposer_index)); +diesel::joinable!(suboptimal_attestations -> canonical_slots (epoch_start_slot)); +diesel::joinable!(suboptimal_attestations -> validators (index)); + +diesel::allow_tables_to_appear_in_same_query!( + active_config, + beacon_blocks, + block_packing, + block_rewards, + blockprint, + canonical_slots, + proposer_info, + suboptimal_attestations, + validators, +); diff --git a/watch/src/database/utils.rs b/watch/src/database/utils.rs new file mode 100644 index 000000000..7e450f0ce --- /dev/null +++ b/watch/src/database/utils.rs @@ -0,0 +1,29 @@ +#![allow(dead_code)] +use crate::database::config::Config; +use diesel::pg::PgConnection; +use diesel::prelude::*; +use diesel_migrations::{FileBasedMigrations, MigrationHarness}; + +/// Sets `config.dbname` to `config.default_dbname` and returns `(new_config, old_dbname)`. +/// +/// This is useful for creating or dropping databases, since these actions must be done by +/// logging into another database. +pub fn get_config_using_default_db(config: &Config) -> (Config, String) { + let mut config = config.clone(); + let new_dbname = std::mem::replace(&mut config.dbname, config.default_dbname.clone()); + (config, new_dbname) +} + +/// Runs the set of migrations as detected in the local directory. +/// Equivalent to `diesel migration run`. +/// +/// Contains `unwrap`s so is only suitable for test code. +/// TODO(mac) refactor to return Result +pub fn run_migrations(config: &Config) -> PgConnection { + let database_url = config.clone().build_database_url(); + let mut conn = PgConnection::establish(&database_url).unwrap(); + let migrations = FileBasedMigrations::find_migrations_directory().unwrap(); + conn.run_pending_migrations(migrations).unwrap(); + conn.begin_test_transaction().unwrap(); + conn +} diff --git a/watch/src/database/watch_types.rs b/watch/src/database/watch_types.rs new file mode 100644 index 000000000..0b3ba2c30 --- /dev/null +++ b/watch/src/database/watch_types.rs @@ -0,0 +1,119 @@ +use crate::database::error::Error; +use diesel::{ + sql_types::{Binary, Integer}, + AsExpression, FromSqlRow, +}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::str::FromStr; +use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +#[derive( + Clone, + Copy, + Debug, + AsExpression, + FromSqlRow, + Deserialize, + Serialize, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, +)] +#[diesel(sql_type = Integer)] +pub struct WatchSlot(Slot); + +impl fmt::Display for WatchSlot { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl WatchSlot { + pub fn new(slot: u64) -> Self { + Self(Slot::new(slot)) + } + + pub fn from_slot(slot: Slot) -> Self { + Self(slot) + } + + pub fn as_slot(self) -> Slot { + self.0 + } + + pub fn as_u64(self) -> u64 { + self.0.as_u64() + } + + pub fn epoch(self, slots_per_epoch: u64) -> Epoch { + self.as_slot().epoch(slots_per_epoch) + } +} + +#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Deserialize, Serialize)] +#[diesel(sql_type = Binary)] +pub struct WatchHash(Hash256); + +impl fmt::Display for WatchHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchHash { + pub fn as_hash(&self) -> Hash256 { + self.0 + } + + pub fn from_hash(hash: Hash256) -> Self { + WatchHash(hash) + } + + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } + + pub fn from_bytes(src: &[u8]) -> Result { + if src.len() == 32 { + Ok(WatchHash(Hash256::from_slice(src))) + } else { + Err(Error::InvalidRoot) + } + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, AsExpression, FromSqlRow, Serialize, Deserialize)] +#[diesel(sql_type = Binary)] +pub struct WatchPK(PublicKeyBytes); + +impl fmt::Display for WatchPK { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl WatchPK { + pub fn as_bytes(&self) -> &[u8] { + self.0.as_serialized() + } + + pub fn from_bytes(src: &[u8]) -> Result { + Ok(WatchPK(PublicKeyBytes::deserialize(src)?)) + } + + pub fn from_pubkey(key: PublicKeyBytes) -> Self { + WatchPK(key) + } +} + +impl FromStr for WatchPK { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(WatchPK( + PublicKeyBytes::from_str(s).map_err(|e| format!("Cannot be parsed: {}", e))?, + )) + } +} diff --git a/watch/src/lib.rs b/watch/src/lib.rs new file mode 100644 index 000000000..664c94516 --- /dev/null +++ b/watch/src/lib.rs @@ -0,0 +1,12 @@ +#![cfg(unix)] +pub mod block_packing; +pub mod block_rewards; +pub mod blockprint; +pub mod cli; +pub mod client; +pub mod config; +pub mod database; +pub mod logger; +pub mod server; +pub mod suboptimal_attestations; +pub mod updater; diff --git a/watch/src/logger.rs b/watch/src/logger.rs new file mode 100644 index 000000000..49310b42a --- /dev/null +++ b/watch/src/logger.rs @@ -0,0 +1,24 @@ +use env_logger::Builder; +use log::{info, LevelFilter}; +use std::process; + +pub fn init_logger(log_level: &str) { + let log_level = match log_level.to_lowercase().as_str() { + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "warn" => LevelFilter::Warn, + "error" => LevelFilter::Error, + _ => { + eprintln!("Unsupported log level"); + process::exit(1) + } + }; + + let mut builder = Builder::new(); + builder.filter(Some("watch"), log_level); + + builder.init(); + + info!("Logger initialized with log-level: {log_level}"); +} diff --git a/watch/src/main.rs b/watch/src/main.rs new file mode 100644 index 000000000..f971747da --- /dev/null +++ b/watch/src/main.rs @@ -0,0 +1,41 @@ +#[cfg(unix)] +use std::process; + +#[cfg(unix)] +mod block_packing; +#[cfg(unix)] +mod block_rewards; +#[cfg(unix)] +mod blockprint; +#[cfg(unix)] +mod cli; +#[cfg(unix)] +mod config; +#[cfg(unix)] +mod database; +#[cfg(unix)] +mod logger; +#[cfg(unix)] +mod server; +#[cfg(unix)] +mod suboptimal_attestations; +#[cfg(unix)] +mod updater; + +#[cfg(unix)] +#[tokio::main] +async fn main() { + match cli::run().await { + Ok(()) => process::exit(0), + Err(e) => { + eprintln!("Command failed with: {}", e); + drop(e); + process::exit(1) + } + } +} + +#[cfg(windows)] +fn main() { + eprintln!("Windows is not supported. Exiting."); +} diff --git a/watch/src/server/config.rs b/watch/src/server/config.rs new file mode 100644 index 000000000..a7d38e706 --- /dev/null +++ b/watch/src/server/config.rs @@ -0,0 +1,28 @@ +use serde::{Deserialize, Serialize}; +use std::net::IpAddr; + +pub const LISTEN_ADDR: &str = "127.0.0.1"; + +pub const fn listen_port() -> u16 { + 5059 +} +fn listen_addr() -> IpAddr { + LISTEN_ADDR.parse().expect("Server address is not valid") +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + #[serde(default = "listen_addr")] + pub listen_addr: IpAddr, + #[serde(default = "listen_port")] + pub listen_port: u16, +} + +impl Default for Config { + fn default() -> Self { + Self { + listen_addr: listen_addr(), + listen_port: listen_port(), + } + } +} diff --git a/watch/src/server/error.rs b/watch/src/server/error.rs new file mode 100644 index 000000000..d1542f784 --- /dev/null +++ b/watch/src/server/error.rs @@ -0,0 +1,50 @@ +use crate::database::Error as DbError; +use axum::Error as AxumError; +use axum::{http::StatusCode, response::IntoResponse, Json}; +use hyper::Error as HyperError; +use serde_json::json; + +#[derive(Debug)] +pub enum Error { + Axum(AxumError), + Hyper(HyperError), + Database(DbError), + BadRequest, + NotFound, + Other(String), +} + +impl IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + let (status, error_message) = match self { + Self::BadRequest => (StatusCode::BAD_REQUEST, "Bad Request"), + Self::NotFound => (StatusCode::NOT_FOUND, "Not Found"), + _ => (StatusCode::INTERNAL_SERVER_ERROR, "Internal Server Error"), + }; + (status, Json(json!({ "error": error_message }))).into_response() + } +} + +impl From for Error { + fn from(e: HyperError) -> Self { + Error::Hyper(e) + } +} + +impl From for Error { + fn from(e: AxumError) -> Self { + Error::Axum(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: String) -> Self { + Error::Other(e) + } +} diff --git a/watch/src/server/handler.rs b/watch/src/server/handler.rs new file mode 100644 index 000000000..677702686 --- /dev/null +++ b/watch/src/server/handler.rs @@ -0,0 +1,266 @@ +use crate::database::{ + self, Error as DbError, PgPool, WatchBeaconBlock, WatchCanonicalSlot, WatchHash, WatchPK, + WatchProposerInfo, WatchSlot, WatchValidator, +}; +use crate::server::Error; +use axum::{ + extract::{Path, Query}, + Extension, Json, +}; +use eth2::types::BlockId; +use std::collections::HashMap; +use std::str::FromStr; + +pub async fn get_slot( + Path(slot): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_canonical_slot( + &mut conn, + WatchSlot::new(slot), + )?)) +} + +pub async fn get_slot_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slot_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_canonical_slot(&mut conn)?)) +} + +pub async fn get_slots_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_canonical_slots_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + let block_id: BlockId = BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)?; + match block_id { + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + BlockId::Root(root) => Ok(Json(database::get_beacon_block_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_lowest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_lowest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_highest( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_highest_beacon_block(&mut conn)?)) +} + +pub async fn get_block_previous( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => { + if let Some(block) = + database::get_beacon_block_by_root(&mut conn, WatchHash::from_hash(root))? + .map(|block| block.parent_root) + { + Ok(Json(database::get_beacon_block_by_root(&mut conn, block)?)) + } else { + Err(Error::NotFound) + } + } + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::new(slot.as_u64().checked_sub(1_u64).ok_or(Error::NotFound)?), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_block_next( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_beacon_block_with_parent( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot( + &mut conn, + WatchSlot::from_slot(slot + 1_u64), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_blocks_by_range( + Query(query): Query>, + Extension(pool): Extension, +) -> Result>>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if let Some(start_slot) = query.get("start_slot") { + if let Some(end_slot) = query.get("end_slot") { + if start_slot > end_slot { + Err(Error::BadRequest) + } else { + Ok(Json(database::get_beacon_blocks_by_range( + &mut conn, + WatchSlot::new(*start_slot), + WatchSlot::new(*end_slot), + )?)) + } + } else { + Err(Error::BadRequest) + } + } else { + Err(Error::BadRequest) + } +} + +pub async fn get_block_proposer( + Path(block_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? { + BlockId::Root(root) => Ok(Json(database::get_proposer_info_by_root( + &mut conn, + WatchHash::from_hash(root), + )?)), + BlockId::Slot(slot) => Ok(Json(database::get_proposer_info_by_slot( + &mut conn, + WatchSlot::from_slot(slot), + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validator( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_public_key( + &mut conn, pubkey, + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validator_by_index(&mut conn, index)?)) + } +} + +pub async fn get_all_validators( + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + Ok(Json(database::get_all_validators(&mut conn)?)) +} + +pub async fn get_validator_latest_proposal( + Path(validator_query): Path, + Extension(pool): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + let validator = + database::get_validator_by_public_key(&mut conn, pubkey)?.ok_or(Error::NotFound)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![validator.index], + )?)) + } else { + let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + Ok(Json(database::get_validators_latest_proposer_info( + &mut conn, + vec![index], + )?)) + } +} + +pub async fn get_client_breakdown( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + Ok(Json(database::get_validators_clients_at_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?)) + } else { + Err(Error::Database(DbError::Other( + "No slots found in database.".to_string(), + ))) + } +} + +pub async fn get_client_breakdown_percentages( + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = database::get_connection(&pool).map_err(Error::Database)?; + + let mut result = HashMap::new(); + if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? { + let total = database::count_validators_activated_before_slot( + &mut conn, + target_slot.slot, + slots_per_epoch, + )?; + let clients = + database::get_validators_clients_at_slot(&mut conn, target_slot.slot, slots_per_epoch)?; + for (client, number) in clients.iter() { + let percentage: f64 = *number as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} diff --git a/watch/src/server/mod.rs b/watch/src/server/mod.rs new file mode 100644 index 000000000..09d5ec6aa --- /dev/null +++ b/watch/src/server/mod.rs @@ -0,0 +1,134 @@ +use crate::block_packing::block_packing_routes; +use crate::block_rewards::block_rewards_routes; +use crate::blockprint::blockprint_routes; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool}; +use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes}; +use axum::{ + handler::Handler, + http::{StatusCode, Uri}, + routing::get, + Extension, Json, Router, +}; +use eth2::types::ErrorMessage; +use log::info; +use std::future::Future; +use std::net::SocketAddr; +use tokio::sync::oneshot; + +pub use config::Config; +pub use error::Error; + +mod config; +mod error; +mod handler; + +pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> { + let db = database::build_connection_pool(&config.database)?; + let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)? + .ok_or_else(|| { + Error::Other( + "Database not found. Please run the updater prior to starting the server" + .to_string(), + ) + })?; + + let server = start_server(&config, slots_per_epoch as u64, db, async { + let _ = shutdown.await; + })?; + + server.await?; + + Ok(()) +} + +/// Creates a server that will serve requests using information from `config`. +/// +/// The server will create its own connection pool to serve connections to the database. +/// This is separate to the connection pool that is used for the `updater`. +/// +/// The server will shut down gracefully when the `shutdown` future resolves. +/// +/// ## Returns +/// +/// This function will bind the server to the address specified in the config and then return a +/// Future representing the actual server that will need to be awaited. +/// +/// ## Errors +/// +/// Returns an error if the server is unable to bind or there is another error during +/// configuration. +pub fn start_server( + config: &FullConfig, + slots_per_epoch: u64, + pool: PgPool, + shutdown: impl Future + Send + Sync + 'static, +) -> Result> + 'static, Error> { + let mut routes = Router::new() + .route("/v1/slots", get(handler::get_slots_by_range)) + .route("/v1/slots/:slot", get(handler::get_slot)) + .route("/v1/slots/lowest", get(handler::get_slot_lowest)) + .route("/v1/slots/highest", get(handler::get_slot_highest)) + .route("/v1/slots/:slot/block", get(handler::get_block)) + .route("/v1/blocks", get(handler::get_blocks_by_range)) + .route("/v1/blocks/:block", get(handler::get_block)) + .route("/v1/blocks/lowest", get(handler::get_block_lowest)) + .route("/v1/blocks/highest", get(handler::get_block_highest)) + .route( + "/v1/blocks/:block/previous", + get(handler::get_block_previous), + ) + .route("/v1/blocks/:block/next", get(handler::get_block_next)) + .route( + "/v1/blocks/:block/proposer", + get(handler::get_block_proposer), + ) + .route("/v1/validators/:validator", get(handler::get_validator)) + .route("/v1/validators/all", get(handler::get_all_validators)) + .route( + "/v1/validators/:validator/latest_proposal", + get(handler::get_validator_latest_proposal), + ) + .route("/v1/clients", get(handler::get_client_breakdown)) + .route( + "/v1/clients/percentages", + get(handler::get_client_breakdown_percentages), + ) + .merge(attestation_routes()) + .merge(blockprint_routes()) + .merge(block_packing_routes()) + .merge(block_rewards_routes()); + + if config.blockprint.enabled && config.updater.attestations { + routes = routes.merge(blockprint_attestation_routes()) + } + + let app = routes + .fallback(route_not_found.into_service()) + .layer(Extension(pool)) + .layer(Extension(slots_per_epoch)); + + let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port); + + let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service()); + + let server = server.with_graceful_shutdown(async { + shutdown.await; + }); + + info!("HTTP server listening on {}", addr); + + Ok(server) +} + +// The default route indicating that no available routes matched the request. +async fn route_not_found(uri: Uri) -> (StatusCode, Json) { + ( + StatusCode::METHOD_NOT_ALLOWED, + Json(ErrorMessage { + code: StatusCode::METHOD_NOT_ALLOWED.as_u16(), + message: format!("No route for {uri}"), + stacktraces: vec![], + }), + ) +} diff --git a/watch/src/suboptimal_attestations/database.rs b/watch/src/suboptimal_attestations/database.rs new file mode 100644 index 000000000..cb947d250 --- /dev/null +++ b/watch/src/suboptimal_attestations/database.rs @@ -0,0 +1,224 @@ +use crate::database::{ + schema::{suboptimal_attestations, validators}, + watch_types::{WatchPK, WatchSlot}, + Error, PgConn, MAX_SIZE_BATCH_INSERT, +}; + +use diesel::prelude::*; +use diesel::{Insertable, Queryable}; +use log::debug; +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +use types::Epoch; + +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct WatchAttestation { + pub index: i32, + pub epoch: Epoch, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchAttestation { + pub fn optimal(index: i32, epoch: Epoch) -> WatchAttestation { + WatchAttestation { + index, + epoch, + source: true, + head: true, + target: true, + } + } +} + +#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)] +#[diesel(table_name = suboptimal_attestations)] +pub struct WatchSuboptimalAttestation { + pub epoch_start_slot: WatchSlot, + pub index: i32, + pub source: bool, + pub head: bool, + pub target: bool, +} + +impl WatchSuboptimalAttestation { + pub fn to_attestation(&self, slots_per_epoch: u64) -> WatchAttestation { + WatchAttestation { + index: self.index, + epoch: self.epoch_start_slot.epoch(slots_per_epoch), + source: self.source, + head: self.head, + target: self.target, + } + } +} + +/// Insert a batch of values into the `suboptimal_attestations` table +/// +/// Since attestations technically occur per-slot but we only store them per-epoch (via its +/// `start_slot`) so if any slot in the epoch changes, we need to resync the whole epoch as a +/// 'suboptimal' attestation could now be 'optimal'. +/// +/// This is handled in the update code, where in the case of a re-org, the affected epoch is +/// deleted completely. +/// +/// On a conflict, it will do nothing. +pub fn insert_batch_suboptimal_attestations( + conn: &mut PgConn, + attestations: Vec, +) -> Result<(), Error> { + use self::suboptimal_attestations::dsl::*; + + let mut count = 0; + let timer = Instant::now(); + + for chunk in attestations.chunks(MAX_SIZE_BATCH_INSERT) { + count += diesel::insert_into(suboptimal_attestations) + .values(chunk) + .on_conflict_do_nothing() + .execute(conn)?; + } + + let time_taken = timer.elapsed(); + debug!("Attestations inserted, count: {count}, time taken: {time_taken:?}"); + Ok(()) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is minimum. +pub fn get_lowest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.asc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is maximum. +pub fn get_highest_attestation( + conn: &mut PgConn, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .order_by(epoch_start_slot.desc()) + .limit(1) + .first::(conn) + .optional()?) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding to a given +/// `index_query` and `epoch_query`. +pub fn get_attestation_by_index( + conn: &mut PgConn, + index_query: i32, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + let timer = Instant::now(); + + let result = suboptimal_attestations + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(index.eq(index_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {index_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects a single row from the `suboptimal_attestations` table corresponding +/// to a given `pubkey_query` and `epoch_query`. +#[allow(dead_code)] +pub fn get_attestation_by_pubkey( + conn: &mut PgConn, + pubkey_query: WatchPK, + epoch_query: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + use self::validators::dsl::{public_key, validators}; + let timer = Instant::now(); + + let join = validators.inner_join(suboptimal_attestations); + + let result = join + .select((epoch_start_slot, index, source, head, target)) + .filter(epoch_start_slot.eq(WatchSlot::from_slot( + epoch_query.start_slot(slots_per_epoch), + ))) + .filter(public_key.eq(pubkey_query)) + .first::(conn) + .optional()?; + + let time_taken = timer.elapsed(); + debug!("Attestation requested for validator: {pubkey_query}, epoch: {epoch_query}, time taken: {time_taken:?}"); + Ok(result) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `source == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_source( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(source.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `head == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_head( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(head.eq(false)) + .load::(conn)?) +} + +/// Selects `index` for all validators in the suboptimal_attestations table +/// that have `target == false` for the corresponding `epoch_start_slot_query`. +pub fn get_validators_missed_target( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .select(index) + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .filter(target.eq(false)) + .load::(conn)?) +} + +/// Selects all rows from the `suboptimal_attestations` table for the given +/// `epoch_start_slot_query`. +pub fn get_all_suboptimal_attestations_for_epoch( + conn: &mut PgConn, + epoch_start_slot_query: WatchSlot, +) -> Result, Error> { + use self::suboptimal_attestations::dsl::*; + + Ok(suboptimal_attestations + .filter(epoch_start_slot.eq(epoch_start_slot_query)) + .load::(conn)?) +} diff --git a/watch/src/suboptimal_attestations/mod.rs b/watch/src/suboptimal_attestations/mod.rs new file mode 100644 index 000000000..a94532e8a --- /dev/null +++ b/watch/src/suboptimal_attestations/mod.rs @@ -0,0 +1,56 @@ +pub mod database; +pub mod server; +pub mod updater; + +use crate::database::watch_types::WatchSlot; +use crate::updater::error::Error; + +pub use database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, + get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, + WatchAttestation, WatchSuboptimalAttestation, +}; + +pub use server::{attestation_routes, blockprint_attestation_routes}; + +use eth2::BeaconNodeHttpClient; +use types::Epoch; + +/// Sends a request to `lighthouse/analysis/attestation_performance`. +/// Formats the response into a vector of `WatchSuboptimalAttestation`. +/// +/// Any attestations with `source == true && head == true && target == true` are ignored. +pub async fn get_attestation_performances( + bn: &BeaconNodeHttpClient, + start_epoch: Epoch, + end_epoch: Epoch, + slots_per_epoch: u64, +) -> Result, Error> { + let mut output = Vec::new(); + let result = bn + .get_lighthouse_analysis_attestation_performance( + start_epoch, + end_epoch, + "global".to_string(), + ) + .await?; + for index in result { + for epoch in index.epochs { + if epoch.1.active { + // Check if the attestation is suboptimal. + if !epoch.1.source || !epoch.1.head || !epoch.1.target { + output.push(WatchSuboptimalAttestation { + epoch_start_slot: WatchSlot::from_slot( + Epoch::new(epoch.0).start_slot(slots_per_epoch), + ), + index: index.index as i32, + source: epoch.1.source, + head: epoch.1.head, + target: epoch.1.target, + }) + } + } + } + } + Ok(output) +} diff --git a/watch/src/suboptimal_attestations/server.rs b/watch/src/suboptimal_attestations/server.rs new file mode 100644 index 000000000..391db9a41 --- /dev/null +++ b/watch/src/suboptimal_attestations/server.rs @@ -0,0 +1,299 @@ +use crate::database::{ + get_canonical_slot, get_connection, get_validator_by_index, get_validator_by_public_key, + get_validators_clients_at_slot, get_validators_latest_proposer_info, PgPool, WatchPK, + WatchSlot, +}; + +use crate::blockprint::database::construct_validator_blockprints_at_slot; +use crate::server::Error; +use crate::suboptimal_attestations::database::{ + get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, + get_validators_missed_head, get_validators_missed_source, get_validators_missed_target, + WatchAttestation, WatchSuboptimalAttestation, +}; + +use axum::{extract::Path, routing::get, Extension, Json, Router}; +use std::collections::{HashMap, HashSet}; +use std::str::FromStr; +use types::Epoch; + +// Will return Ok(None) if the epoch is not synced or if the validator does not exist. +// In the future it might be worth differentiating these events. +pub async fn get_validator_attestation( + Path((validator_query, epoch_query)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + let epoch = Epoch::new(epoch_query); + + // Ensure the database has synced the target epoch. + if get_canonical_slot( + &mut conn, + WatchSlot::from_slot(epoch.end_slot(slots_per_epoch)), + )? + .is_none() + { + // Epoch is not fully synced. + return Ok(Json(None)); + } + + let index = if validator_query.starts_with("0x") { + let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?; + get_validator_by_public_key(&mut conn, pubkey)? + .ok_or(Error::NotFound)? + .index + } else { + i32::from_str(&validator_query).map_err(|_| Error::BadRequest)? + }; + let attestation = if let Some(suboptimal_attestation) = + get_attestation_by_index(&mut conn, index, epoch, slots_per_epoch)? + { + Some(suboptimal_attestation.to_attestation(slots_per_epoch)) + } else { + // Attestation was not in database. Check if the validator was active. + match get_validator_by_index(&mut conn, index)? { + Some(validator) => { + if let Some(activation_epoch) = validator.activation_epoch { + if activation_epoch <= epoch.as_u64() as i32 { + if let Some(exit_epoch) = validator.exit_epoch { + if exit_epoch > epoch.as_u64() as i32 { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } else { + // Validator has exited. + None + } + } else { + // Validator is active and has not yet exited. + Some(WatchAttestation::optimal(index, epoch)) + } + } else { + // Validator is not yet active. + None + } + } else { + // Validator is not yet active. + None + } + } + None => return Err(Error::Other("Validator index does not exist".to_string())), + } + }; + Ok(Json(attestation)) +} + +pub async fn get_all_validators_attestations( + Path(epoch): Path, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + Ok(Json(get_all_suboptimal_attestations_for_epoch( + &mut conn, + epoch_start_slot, + )?)) +} + +pub async fn get_validators_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + match vote.to_lowercase().as_str() { + "source" => Ok(Json(get_validators_missed_source( + &mut conn, + epoch_start_slot, + )?)), + "head" => Ok(Json(get_validators_missed_head( + &mut conn, + epoch_start_slot, + )?)), + "target" => Ok(Json(get_validators_missed_target( + &mut conn, + epoch_start_slot, + )?)), + _ => Err(Error::BadRequest), + } +} + +pub async fn get_validators_missed_vote_graffiti( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let graffitis = get_validators_latest_proposer_info(&mut conn, indices)? + .values() + .map(|info| info.graffiti.clone()) + .collect::>(); + + let mut result = HashMap::new(); + for graffiti in graffitis { + if !result.contains_key(&graffiti) { + result.insert(graffiti.clone(), 0); + } + *result + .get_mut(&graffiti) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + + Ok(Json(result)) +} + +pub fn attestation_routes() -> Router { + Router::new() + .route( + "/v1/validators/:validator/attestation/:epoch", + get(get_validator_attestation), + ) + .route( + "/v1/validators/all/attestation/:epoch", + get(get_all_validators_attestations), + ) + .route( + "/v1/validators/missed/:vote/:epoch", + get(get_validators_missed_vote), + ) + .route( + "/v1/validators/missed/:vote/:epoch/graffiti", + get(get_validators_missed_vote_graffiti), + ) +} + +/// The functions below are dependent on Blockprint and if it is disabled, the endpoints will be +/// disabled. +pub async fn get_clients_missed_vote( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let mut conn = get_connection(&pool).map_err(Error::Database)?; + + let Json(indices) = get_validators_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + // All validators which missed the vote. + let indices_map = indices.into_iter().collect::>(); + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + // All validators. + let client_map = + construct_validator_blockprints_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + + for index in indices_map { + if let Some(print) = client_map.get(&index) { + if !result.contains_key(print) { + result.insert(print.clone(), 0); + } + *result + .get_mut(print) + .ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1; + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool.clone()), + Extension(slots_per_epoch), + ) + .await?; + + let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch)); + + let mut conn = get_connection(&pool)?; + let totals = get_validators_clients_at_slot(&mut conn, target_slot, slots_per_epoch)?; + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + let client_total: f64 = *totals + .get(client) + .ok_or_else(|| Error::Other("Client type mismatch".to_string()))? + as f64; + // `client_total` should never be `0`, but if it is, return `0` instead of `inf`. + if client_total == 0.0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / client_total * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub async fn get_clients_missed_vote_percentages_relative( + Path((vote, epoch)): Path<(String, u64)>, + Extension(pool): Extension, + Extension(slots_per_epoch): Extension, +) -> Result>, Error> { + let Json(clients_counts) = get_clients_missed_vote( + Path((vote, epoch)), + Extension(pool), + Extension(slots_per_epoch), + ) + .await?; + + let mut total: u64 = 0; + for (_, count) in clients_counts.iter() { + total += *count + } + + let mut result = HashMap::new(); + for (client, count) in clients_counts.iter() { + // `total` should never be 0, but if it is, return `-` instead of `inf`. + if total == 0 { + result.insert(client.to_string(), 0.0); + } else { + let percentage: f64 = *count as f64 / total as f64 * 100.0; + result.insert(client.to_string(), percentage); + } + } + + Ok(Json(result)) +} + +pub fn blockprint_attestation_routes() -> Router { + Router::new() + .route( + "/v1/clients/missed/:vote/:epoch", + get(get_clients_missed_vote), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages", + get(get_clients_missed_vote_percentages), + ) + .route( + "/v1/clients/missed/:vote/:epoch/percentages/relative", + get(get_clients_missed_vote_percentages_relative), + ) +} diff --git a/watch/src/suboptimal_attestations/updater.rs b/watch/src/suboptimal_attestations/updater.rs new file mode 100644 index 000000000..aeabff203 --- /dev/null +++ b/watch/src/suboptimal_attestations/updater.rs @@ -0,0 +1,236 @@ +use crate::database::{self, Error as DbError}; +use crate::updater::{Error, UpdateHandler}; + +use crate::suboptimal_attestations::get_attestation_performances; + +use eth2::types::EthSpec; +use log::{debug, error, warn}; + +const MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS: u64 = 50; + +impl UpdateHandler { + /// Forward fills the `suboptimal_attestations` table starting from the entry with the highest + /// slot. + /// + /// It construts a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest canonical slot) + /// `end_epoch` -> epoch of highest canonical slot + /// + /// It will resync the latest epoch if it is not fully filled but will not overwrite existing + /// values unless there is a re-org. + /// That is, `if highest_filled_slot % slots_per_epoch != 31`. + /// + /// In the event the most recent epoch has no suboptimal attestations, it will attempt to + /// resync that epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn fill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let highest_filled_slot_opt = if self.config.attestations { + database::get_highest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt { + if highest_filled_slot % self.slots_per_epoch == self.slots_per_epoch.saturating_sub(1) + { + // The whole epoch is filled so we can begin syncing the next one. + highest_filled_slot.epoch(self.slots_per_epoch) + 1 + } else { + // The epoch is only partially synced. Try to sync it fully. + highest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No rows present in the `suboptimal_attestations` table. Use `canonical_slots` + // instead. + if let Some(lowest_canonical_slot) = database::get_lowest_canonical_slot(&mut conn)? { + lowest_canonical_slot + .slot + .as_slot() + .epoch(self.slots_per_epoch) + } else { + // There are no slots in the database, do not fill the `suboptimal_attestations` + // table. + warn!("Refusing to fill the `suboptimal_attestations` table as there are no slots in the database"); + return Ok(()); + } + }; + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut end_epoch = highest_canonical_slot.epoch(self.slots_per_epoch); + + // The `lighthouse/analysis/attestation_performance` endpoint can only retrieve attestations + // which are more than 1 epoch old. + // We assume that `highest_canonical_slot` is near the head of the chain. + end_epoch = end_epoch.saturating_sub(2_u64); + + // If end_epoch == 0 then the chain just started so we need to wait until + // `current_epoch >= 2`. + if end_epoch == 0 { + debug!("Chain just begun, refusing to sync attestations"); + return Ok(()); + } + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the head of the database"); + return Ok(()); + } + + // Ensure the size of the request does not exceed the maximum allowed value. + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert attestations with corresponding `canonical_slot`s. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest canonical slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slots` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } + + /// Backfill the `suboptimal_attestations` table starting from the entry with the lowest slot. + /// + /// It constructs a request to the `attestation_performance` API endpoint with: + /// `start_epoch` -> epoch of the lowest `canonical_slot`. + /// `end_epoch` -> epoch of the lowest filled `suboptimal_attestation` - 1 (or epoch of highest + /// canonical slot) + /// + /// It will resync the lowest epoch if it is not fully filled. + /// That is, `if lowest_filled_slot % slots_per_epoch != 0` + /// + /// In the event there are no suboptimal attestations present in the lowest epoch, it will attempt to + /// resync the epoch. The odds of this occuring on mainnet are vanishingly small so it is not + /// accounted for. + /// + /// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + pub async fn backfill_suboptimal_attestations(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let max_attestation_backfill = self.config.max_backfill_size_epochs; + + // Get the slot of the lowest entry in the `suboptimal_attestations` table. + let lowest_filled_slot_opt = if self.config.attestations { + database::get_lowest_attestation(&mut conn)? + .map(|attestation| attestation.epoch_start_slot.as_slot()) + } else { + return Err(Error::NotEnabled("attestations".to_string())); + }; + + let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt { + if lowest_filled_slot % self.slots_per_epoch == 0 { + lowest_filled_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64) + } else { + // The epoch is only partially synced. Try to sync it fully. + lowest_filled_slot.epoch(self.slots_per_epoch) + } + } else { + // No entries in the `suboptimal_attestations` table. Use `canonical_slots` instead. + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + // Subtract 2 since `end_epoch` must be less than the current epoch - 1. + // We assume that `highest_canonical_slot` is near the head of the chain. + highest_canonical_slot + .epoch(self.slots_per_epoch) + .saturating_sub(2_u64) + } else { + // There are no slots in the database, do not backfill the + // `suboptimal_attestations` table. + warn!("Refusing to backfill attestations as there are no slots in the database"); + return Ok(()); + } + }; + + if end_epoch == 0 { + debug!("Attestations backfill is complete"); + return Ok(()); + } + + if let Some(lowest_canonical_slot) = + database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut start_epoch = lowest_canonical_slot.epoch(self.slots_per_epoch); + + if start_epoch > end_epoch { + debug!("Attestations are up to date with the base of the database"); + return Ok(()); + } + + // Ensure the request range does not exceed `max_attestation_backfill` or + // `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`. + if start_epoch < end_epoch.saturating_sub(max_attestation_backfill) { + start_epoch = end_epoch.saturating_sub(max_attestation_backfill) + } + if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) { + start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) + } + + if let Some(highest_canonical_slot) = + database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot()) + { + let mut attestations = get_attestation_performances( + &self.bn, + start_epoch, + end_epoch, + self.slots_per_epoch, + ) + .await?; + + // Only insert `suboptimal_attestations` with corresponding `canonical_slots`. + attestations.retain(|attestation| { + attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot + && attestation.epoch_start_slot.as_slot() <= highest_canonical_slot + }); + + database::insert_batch_suboptimal_attestations(&mut conn, attestations)?; + } else { + return Err(Error::Database(DbError::Other( + "Database did not return a lowest slot when one exists".to_string(), + ))); + } + } else { + // There are no slots in the `canonical_slot` table, but there are entries in the + // `suboptimal_attestations` table. This is a critical failure. It usually means + // someone has manually tampered with the database tables and should not occur during + // normal operation. + error!("Database is corrupted. Please re-sync the database"); + return Err(Error::Database(DbError::DatabaseCorrupted)); + } + + Ok(()) + } +} diff --git a/watch/src/updater/config.rs b/watch/src/updater/config.rs new file mode 100644 index 000000000..0179be73d --- /dev/null +++ b/watch/src/updater/config.rs @@ -0,0 +1,65 @@ +use serde::{Deserialize, Serialize}; + +pub const BEACON_NODE_URL: &str = "http://127.0.0.1:5052"; + +pub const fn max_backfill_size_epochs() -> u64 { + 2 +} +pub const fn backfill_stop_epoch() -> u64 { + 0 +} +pub const fn attestations() -> bool { + true +} +pub const fn proposer_info() -> bool { + true +} +pub const fn block_rewards() -> bool { + true +} +pub const fn block_packing() -> bool { + true +} + +fn beacon_node_url() -> String { + BEACON_NODE_URL.to_string() +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// The URL of the beacon you wish to sync from. + #[serde(default = "beacon_node_url")] + pub beacon_node_url: String, + /// The maximum size each backfill iteration will allow per request (in epochs). + #[serde(default = "max_backfill_size_epochs")] + pub max_backfill_size_epochs: u64, + /// The epoch at which to never backfill past. + #[serde(default = "backfill_stop_epoch")] + pub backfill_stop_epoch: u64, + /// Whether to sync the suboptimal_attestations table. + #[serde(default = "attestations")] + pub attestations: bool, + /// Whether to sync the proposer_info table. + #[serde(default = "proposer_info")] + pub proposer_info: bool, + /// Whether to sync the block_rewards table. + #[serde(default = "block_rewards")] + pub block_rewards: bool, + /// Whether to sync the block_packing table. + #[serde(default = "block_packing")] + pub block_packing: bool, +} + +impl Default for Config { + fn default() -> Self { + Self { + beacon_node_url: beacon_node_url(), + max_backfill_size_epochs: max_backfill_size_epochs(), + backfill_stop_epoch: backfill_stop_epoch(), + attestations: attestations(), + proposer_info: proposer_info(), + block_rewards: block_rewards(), + block_packing: block_packing(), + } + } +} diff --git a/watch/src/updater/error.rs b/watch/src/updater/error.rs new file mode 100644 index 000000000..74091c8f2 --- /dev/null +++ b/watch/src/updater/error.rs @@ -0,0 +1,56 @@ +use crate::blockprint::Error as BlockprintError; +use crate::database::Error as DbError; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{Error as Eth2Error, SensitiveError}; +use std::fmt; + +#[derive(Debug)] +pub enum Error { + BeaconChain(BeaconChainError), + Eth2(Eth2Error), + SensitiveUrl(SensitiveError), + Database(DbError), + Blockprint(BlockprintError), + UnableToGetRemoteHead, + BeaconNodeSyncing, + NotEnabled(String), + NoValidatorsFound, + BeaconNodeNotCompatible(String), + InvalidConfig(String), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl From for Error { + fn from(e: BeaconChainError) -> Self { + Error::BeaconChain(e) + } +} + +impl From for Error { + fn from(e: Eth2Error) -> Self { + Error::Eth2(e) + } +} + +impl From for Error { + fn from(e: SensitiveError) -> Self { + Error::SensitiveUrl(e) + } +} + +impl From for Error { + fn from(e: DbError) -> Self { + Error::Database(e) + } +} + +impl From for Error { + fn from(e: BlockprintError) -> Self { + Error::Blockprint(e) + } +} diff --git a/watch/src/updater/handler.rs b/watch/src/updater/handler.rs new file mode 100644 index 000000000..1e1662bf7 --- /dev/null +++ b/watch/src/updater/handler.rs @@ -0,0 +1,471 @@ +use crate::blockprint::WatchBlockprintClient; +use crate::config::Config as FullConfig; +use crate::database::{self, PgPool, WatchCanonicalSlot, WatchHash, WatchSlot}; +use crate::updater::{Config, Error, WatchSpec}; +use beacon_node::beacon_chain::BeaconChainError; +use eth2::{ + types::{BlockId, SyncingData}, + BeaconNodeHttpClient, SensitiveUrl, +}; +use log::{debug, error, info, warn}; +use std::collections::HashSet; +use std::iter::FromIterator; +use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot}; + +use crate::updater::{get_beacon_block, get_header, get_validators}; + +const MAX_EXPECTED_REORG_LENGTH: u64 = 32; + +/// Ensure the existing database is valid for this run. +pub async fn ensure_valid_database( + spec: &WatchSpec, + pool: &mut PgPool, +) -> Result<(), Error> { + let mut conn = database::get_connection(pool)?; + + let bn_slots_per_epoch = spec.slots_per_epoch(); + let bn_config_name = spec.network.clone(); + + if let Some((db_config_name, db_slots_per_epoch)) = database::get_active_config(&mut conn)? { + if db_config_name != bn_config_name || db_slots_per_epoch != bn_slots_per_epoch as i32 { + Err(Error::InvalidConfig( + "The config stored in the database does not match the beacon node.".to_string(), + )) + } else { + // Configs match. + Ok(()) + } + } else { + // No config exists in the DB. + database::insert_active_config(&mut conn, bn_config_name, bn_slots_per_epoch)?; + Ok(()) + } +} + +pub struct UpdateHandler { + pub pool: PgPool, + pub bn: BeaconNodeHttpClient, + pub blockprint: Option, + pub config: Config, + pub slots_per_epoch: u64, + pub spec: WatchSpec, +} + +impl UpdateHandler { + pub async fn new( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, + ) -> Result, Error> { + let blockprint = if config.blockprint.enabled { + if let Some(server) = config.blockprint.url { + let blockprint_url = SensitiveUrl::parse(&server).map_err(Error::SensitiveUrl)?; + Some(WatchBlockprintClient { + client: reqwest::Client::new(), + server: blockprint_url, + username: config.blockprint.username, + password: config.blockprint.password, + }) + } else { + return Err(Error::NotEnabled( + "blockprint was enabled but url was not set".to_string(), + )); + } + } else { + None + }; + + let mut pool = database::build_connection_pool(&config.database)?; + + ensure_valid_database(&spec, &mut pool).await?; + + Ok(Self { + pool, + bn, + blockprint, + config: config.updater, + slots_per_epoch: spec.slots_per_epoch(), + spec, + }) + } + + /// Gets the syncing status of the connected beacon node. + pub async fn get_bn_syncing_status(&mut self) -> Result { + Ok(self.bn.get_node_syncing().await?.data) + } + + /// Gets a list of block roots from the database which do not yet contain a corresponding + /// entry in the `beacon_blocks` table and inserts them. + pub async fn update_unknown_blocks(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let roots = database::get_unknown_canonical_blocks(&mut conn)?; + for root in roots { + let block_opt: Option> = + get_beacon_block(&self.bn, BlockId::Root(root.as_hash())).await?; + if let Some(block) = block_opt { + database::insert_beacon_block(&mut conn, block, root)?; + } + } + + Ok(()) + } + + /// Performs a head update with the following steps: + /// 1. Pull the latest header from the beacon node and the latest canonical slot from the + /// database. + /// 2. Loop back through the beacon node and database to find the first matching slot -> root + /// pair. + /// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is + /// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover. + /// 4. Remove any invalid slots from the database. + /// 5. Sync all blocks between the first valid block of the database and the head of the beacon + /// chain. + /// + /// In the event there are no slots present in the database, it will sync from the head block + /// block back to the first slot of the epoch. + /// This will ensure backfills are always done in full epochs (which helps keep certain syncing + /// tasks efficient). + pub async fn perform_head_update(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + // Load the head from the beacon node. + let bn_header = get_header(&self.bn, BlockId::Head) + .await? + .ok_or(Error::UnableToGetRemoteHead)?; + let header_root = bn_header.canonical_root(); + + if let Some(latest_matching_canonical_slot) = + self.get_first_matching_block(bn_header.clone()).await? + { + // Check for reorgs. + let latest_db_slot = self.check_for_reorg(latest_matching_canonical_slot).await?; + + // Remove all slots above `latest_db_slot` from the database. + let result = database::delete_canonical_slots_above( + &mut conn, + WatchSlot::from_slot(latest_db_slot), + )?; + info!("{result} old records removed during head update"); + + if result > 0 { + // If slots were removed, we need to resync the suboptimal_attestations table for + // the epoch since they will have changed and cannot be fixed by a simple update. + let epoch = latest_db_slot + .epoch(self.slots_per_epoch) + .saturating_sub(1_u64); + debug!("Preparing to resync attestations above epoch {epoch}"); + database::delete_suboptimal_attestations_above( + &mut conn, + WatchSlot::from_slot(epoch.start_slot(self.slots_per_epoch)), + )?; + } + + // Since we are syncing backwards, `start_slot > `end_slot`. + let start_slot = bn_header.slot; + let end_slot = latest_db_slot + 1; + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + + // Attempt to sync new blocks with blockprint. + //self.sync_blockprint_until(start_slot).await?; + } else { + // There are no matching parent blocks. Sync from the head block back until the first + // block of the epoch. + let start_slot = bn_header.slot; + let end_slot = start_slot.saturating_sub(start_slot % self.slots_per_epoch); + self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot) + .await?; + info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}"); + } + + Ok(()) + } + + /// Attempt to find a row in the `canonical_slots` table which matches the `canonical_root` of + /// the block header as reported by the beacon node. + /// + /// Any blocks above this value are not canonical according to the beacon node. + /// + /// Note: In the event that there are skip slots above the slot returned by the function, + /// they will not be returned, so may be pruned or re-synced by other code despite being + /// canonical. + pub async fn get_first_matching_block( + &mut self, + mut bn_header: BeaconBlockHeader, + ) -> Result, Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Load latest non-skipped canonical slot from database. + if let Some(db_canonical_slot) = + database::get_highest_non_skipped_canonical_slot(&mut conn)? + { + // Check if the header or parent root matches the entry in the database. + if bn_header.parent_root == db_canonical_slot.root.as_hash() + || bn_header.canonical_root() == db_canonical_slot.root.as_hash() + { + Ok(Some(db_canonical_slot)) + } else { + // Header is not the child of the highest entry in the database. + // From here we need to iterate backwards through the database until we find + // a slot -> root pair that matches the beacon node. + loop { + // Store working `parent_root`. + let parent_root = bn_header.parent_root; + + // Try the next header. + let next_header = get_header(&self.bn, BlockId::Root(parent_root)).await?; + if let Some(header) = next_header { + bn_header = header.clone(); + if let Some(db_canonical_slot) = database::get_canonical_slot_by_root( + &mut conn, + WatchHash::from_hash(header.parent_root), + )? { + // Check if the entry in the database matches the parent of + // the header. + if header.parent_root == db_canonical_slot.root.as_hash() { + return Ok(Some(db_canonical_slot)); + } else { + // Move on to the next header. + continue; + } + } else { + // Database does not have the referenced root. Try the next header. + continue; + } + } else { + // If we get this error it means that the `parent_root` of the header + // did not reference a canonical block. + return Err(Error::BeaconChain(BeaconChainError::MissingBeaconBlock( + parent_root, + ))); + } + } + } + } else { + // There are no non-skipped blocks present in the database. + Ok(None) + } + } + + /// Given the latest slot in the database which matches a root in the beacon node, + /// traverse back through the database for `MAX_EXPECTED_REORG_LENGTH` slots to ensure the tip + /// of the database is consistent with the beacon node (in the case that reorgs have occured). + /// + /// Returns the slot before the oldest canonical_slot which has an invalid child. + pub async fn check_for_reorg( + &mut self, + latest_canonical_slot: WatchCanonicalSlot, + ) -> Result { + let mut conn = database::get_connection(&self.pool)?; + + let end_slot = latest_canonical_slot.slot.as_u64(); + let start_slot = end_slot.saturating_sub(MAX_EXPECTED_REORG_LENGTH); + + for i in start_slot..end_slot { + let slot = Slot::new(i); + let db_canonical_slot_opt = + database::get_canonical_slot(&mut conn, WatchSlot::from_slot(slot))?; + if let Some(db_canonical_slot) = db_canonical_slot_opt { + let header_opt = get_header(&self.bn, BlockId::Slot(slot)).await?; + if let Some(header) = header_opt { + if header.canonical_root() == db_canonical_slot.root.as_hash() { + // The roots match (or are both skip slots). + continue; + } else { + // The block roots do not match. We need to re-sync from here. + warn!("Block {slot} does not match the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else if !db_canonical_slot.skipped { + // The block exists in the database, but does not exist on the beacon node. + // We need to re-sync from here. + warn!("Block {slot} does not exist on the beacon node. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } else { + // This slot does not exist in the database. + let lowest_slot = database::get_lowest_canonical_slot(&mut conn)? + .map(|canonical_slot| canonical_slot.slot.as_slot()); + if lowest_slot > Some(slot) { + // The database has not back-filled this slot yet, so skip it. + continue; + } else { + // The database does not contain this block, but has back-filled past it. + // We need to resync from here. + warn!("Slot {slot} missing from database. Resyncing"); + return Ok(slot.saturating_sub(1_u64)); + } + } + } + + // The database is consistent with the beacon node, so return the head of the database. + Ok(latest_canonical_slot.slot.as_slot()) + } + + /// Fills the canonical slots table beginning from `start_slot` and ending at `end_slot`. + /// It fills in reverse order, that is, `start_slot` is higher than `end_slot`. + /// + /// Skip slots set `root` to the root of the previous non-skipped slot and also sets + /// `skipped == true`. + /// + /// Since it uses `insert_canonical_slot` to interact with the database, it WILL NOT overwrite + /// existing rows. This means that any part of the chain within `end_slot..=start_slot` that + /// needs to be resynced, must first be deleted from the database. + pub async fn reverse_fill_canonical_slots( + &mut self, + mut header: BeaconBlockHeader, + mut header_root: Hash256, + mut skipped: bool, + start_slot: Slot, + end_slot: Slot, + ) -> Result { + let mut count = 0; + + let mut conn = database::get_connection(&self.pool)?; + + // Iterate, descending from `start_slot` (higher) to `end_slot` (lower). + for slot in (end_slot.as_u64()..=start_slot.as_u64()).rev() { + // Insert header. + database::insert_canonical_slot( + &mut conn, + WatchCanonicalSlot { + slot: WatchSlot::new(slot), + root: WatchHash::from_hash(header_root), + skipped, + beacon_block: None, + }, + )?; + count += 1; + + // Load the next header: + // We must use BlockId::Slot since we want to include skip slots. + header = if let Some(new_header) = get_header( + &self.bn, + BlockId::Slot(Slot::new(slot.saturating_sub(1_u64))), + ) + .await? + { + header_root = new_header.canonical_root(); + skipped = false; + new_header + } else { + if header.slot == 0 { + info!("Reverse fill exhausted at slot 0"); + break; + } + // Slot was skipped, so use the parent_root (most recent non-skipped block). + skipped = true; + header_root = header.parent_root; + header + }; + } + + Ok(count) + } + + /// Backfills the `canonical_slots` table starting from the lowest non-skipped slot and + /// stopping after `max_backfill_size_epochs` epochs. + pub async fn backfill_canonical_slots(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + let backfill_stop_slot = self.config.backfill_stop_epoch * self.slots_per_epoch; + // Check to see if we have finished backfilling. + if let Some(lowest_slot) = database::get_lowest_canonical_slot(&mut conn)? { + if lowest_slot.slot.as_slot() == backfill_stop_slot { + debug!("Backfill sync complete, all slots filled"); + return Ok(()); + } + } + + let backfill_slot_count = self.config.max_backfill_size_epochs * self.slots_per_epoch; + + if let Some(lowest_non_skipped_canonical_slot) = + database::get_lowest_non_skipped_canonical_slot(&mut conn)? + { + // Set `start_slot` equal to the lowest non-skipped slot in the database. + // While this will attempt to resync some parts of the bottom of the chain, it reduces + // complexity when dealing with skip slots. + let start_slot = lowest_non_skipped_canonical_slot.slot.as_slot(); + let mut end_slot = lowest_non_skipped_canonical_slot + .slot + .as_slot() + .saturating_sub(backfill_slot_count); + + // Ensure end_slot doesn't go below `backfill_stop_epoch` + if end_slot <= backfill_stop_slot { + end_slot = Slot::new(backfill_stop_slot); + } + + let header_opt = get_header(&self.bn, BlockId::Slot(start_slot)).await?; + + if let Some(header) = header_opt { + let header_root = header.canonical_root(); + let count = self + .reverse_fill_canonical_slots(header, header_root, false, start_slot, end_slot) + .await?; + + info!("Backfill completed to slot: {end_slot}, records added: {count}"); + } else { + // The lowest slot of the database is inconsistent with the beacon node. + // Currently we have no way to recover from this. The entire database will need to + // be re-synced. + error!( + "Database is inconsistent with the beacon node. \ + Please ensure your beacon node is set to the right network, \ + otherwise you may need to resync" + ); + } + } else { + // There are no blocks in the database. Forward sync needs to happen first. + info!("Backfill was not performed since there are no blocks in the database"); + return Ok(()); + }; + + Ok(()) + } + + // Attempt to update the validator set. + // This downloads the latest validator set from the beacon node, and pulls the known validator + // set from the database. + // We then take any new or updated validators and insert them into the database (overwriting + // exiting validators). + // + // In the event there are no validators in the database, it will initialize the validator set. + pub async fn update_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + let current_validators = database::get_all_validators(&mut conn)?; + + if !current_validators.is_empty() { + let old_validators = HashSet::from_iter(current_validators); + + // Pull the new validator set from the beacon node. + let new_validators = get_validators(&self.bn).await?; + + // The difference should only contain validators that contain either a new `exit_epoch` (implying an + // exit) or a new `index` (implying a validator activation). + let val_diff = new_validators.difference(&old_validators); + + for diff in val_diff { + database::insert_validator(&mut conn, diff.clone())?; + } + } else { + info!("No validators present in database. Initializing the validator set"); + self.initialize_validator_set().await?; + } + + Ok(()) + } + + // Initialize the validator set by downloading it from the beacon node, inserting blockprint + // data (if required) and writing it to the database. + pub async fn initialize_validator_set(&mut self) -> Result<(), Error> { + let mut conn = database::get_connection(&self.pool)?; + + // Pull all validators from the beacon node. + let validators = Vec::from_iter(get_validators(&self.bn).await?); + + database::insert_batch_validators(&mut conn, validators)?; + + Ok(()) + } +} diff --git a/watch/src/updater/mod.rs b/watch/src/updater/mod.rs new file mode 100644 index 000000000..1fbb0107a --- /dev/null +++ b/watch/src/updater/mod.rs @@ -0,0 +1,234 @@ +use crate::config::Config as FullConfig; +use crate::database::{WatchPK, WatchValidator}; +use eth2::{ + types::{BlockId, StateId}, + BeaconNodeHttpClient, SensitiveUrl, Timeouts, +}; +use log::{debug, error, info}; +use std::collections::{HashMap, HashSet}; +use std::marker::PhantomData; +use std::time::{Duration, Instant}; +use types::{BeaconBlockHeader, EthSpec, GnosisEthSpec, MainnetEthSpec, SignedBeaconBlock}; + +pub use config::Config; +pub use error::Error; +pub use handler::UpdateHandler; + +mod config; +pub mod error; +pub mod handler; + +const FAR_FUTURE_EPOCH: u64 = u64::MAX; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +const MAINNET: &str = "mainnet"; +const GNOSIS: &str = "gnosis"; + +pub struct WatchSpec { + network: String, + spec: PhantomData, +} + +impl WatchSpec { + fn slots_per_epoch(&self) -> u64 { + T::slots_per_epoch() + } +} + +impl WatchSpec { + pub fn mainnet(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +impl WatchSpec { + fn gnosis(network: String) -> Self { + Self { + network, + spec: PhantomData, + } + } +} + +pub async fn run_updater(config: FullConfig) -> Result<(), Error> { + let beacon_node_url = + SensitiveUrl::parse(&config.updater.beacon_node_url).map_err(Error::SensitiveUrl)?; + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + + let config_map = bn.get_config_spec::>().await?.data; + + let config_name = config_map + .get("CONFIG_NAME") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field CONFIG_NAME on beacon node spec".to_string()) + })? + .clone(); + + match config_map + .get("PRESET_BASE") + .ok_or_else(|| { + Error::BeaconNodeNotCompatible("No field PRESET_BASE on beacon node spec".to_string()) + })? + .to_lowercase() + .as_str() + { + MAINNET => { + let spec = WatchSpec::mainnet(config_name); + run_once(bn, spec, config).await + } + GNOSIS => { + let spec = WatchSpec::gnosis(config_name); + run_once(bn, spec, config).await + } + _ => unimplemented!("unsupported PRESET_BASE"), + } +} + +pub async fn run_once( + bn: BeaconNodeHttpClient, + spec: WatchSpec, + config: FullConfig, +) -> Result<(), Error> { + let mut watch = UpdateHandler::new(bn, spec, config.clone()).await?; + + let sync_data = watch.get_bn_syncing_status().await?; + if sync_data.is_syncing { + error!( + "Connected beacon node is still syncing: head_slot => {:?}, distance => {}", + sync_data.head_slot, sync_data.sync_distance + ); + return Err(Error::BeaconNodeSyncing); + } + + info!("Performing head update"); + let head_timer = Instant::now(); + watch.perform_head_update().await?; + let head_timer_elapsed = head_timer.elapsed(); + debug!("Head update complete, time taken: {head_timer_elapsed:?}"); + + info!("Performing block backfill"); + let block_backfill_timer = Instant::now(); + watch.backfill_canonical_slots().await?; + let block_backfill_timer_elapsed = block_backfill_timer.elapsed(); + debug!("Block backfill complete, time taken: {block_backfill_timer_elapsed:?}"); + + info!("Updating validator set"); + let validator_timer = Instant::now(); + watch.update_validator_set().await?; + let validator_timer_elapsed = validator_timer.elapsed(); + debug!("Validator update complete, time taken: {validator_timer_elapsed:?}"); + + // Update blocks after updating the validator set since the `proposer_index` must exist in the + // `validators` table. + info!("Updating unknown blocks"); + let unknown_block_timer = Instant::now(); + watch.update_unknown_blocks().await?; + let unknown_block_timer_elapsed = unknown_block_timer.elapsed(); + debug!("Unknown block update complete, time taken: {unknown_block_timer_elapsed:?}"); + + // Run additional modules + if config.updater.attestations { + info!("Updating suboptimal attestations"); + let attestation_timer = Instant::now(); + watch.fill_suboptimal_attestations().await?; + watch.backfill_suboptimal_attestations().await?; + let attestation_timer_elapsed = attestation_timer.elapsed(); + debug!("Attestation update complete, time taken: {attestation_timer_elapsed:?}"); + } + + if config.updater.block_rewards { + info!("Updating block rewards"); + let rewards_timer = Instant::now(); + watch.fill_block_rewards().await?; + watch.backfill_block_rewards().await?; + let rewards_timer_elapsed = rewards_timer.elapsed(); + debug!("Block Rewards update complete, time taken: {rewards_timer_elapsed:?}"); + } + + if config.updater.block_packing { + info!("Updating block packing statistics"); + let packing_timer = Instant::now(); + watch.fill_block_packing().await?; + watch.backfill_block_packing().await?; + let packing_timer_elapsed = packing_timer.elapsed(); + debug!("Block packing update complete, time taken: {packing_timer_elapsed:?}"); + } + + if config.blockprint.enabled { + info!("Updating blockprint"); + let blockprint_timer = Instant::now(); + watch.fill_blockprint().await?; + watch.backfill_blockprint().await?; + let blockprint_timer_elapsed = blockprint_timer.elapsed(); + debug!("Blockprint update complete, time taken: {blockprint_timer_elapsed:?}"); + } + + Ok(()) +} + +/// Queries the beacon node for a given `BlockId` and returns the `BeaconBlockHeader` if it exists. +pub async fn get_header( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result, Error> { + let resp = bn + .get_beacon_headers_block_id(block_id) + .await? + .map(|resp| (resp.data.root, resp.data.header.message)); + // When quering with root == 0x000... , slot 0 will be returned with parent_root == 0x0000... + // This check escapes the loop. + if let Some((root, header)) = resp { + if root == header.parent_root { + return Ok(None); + } else { + return Ok(Some(header)); + } + } + Ok(None) +} + +pub async fn get_beacon_block( + bn: &BeaconNodeHttpClient, + block_id: BlockId, +) -> Result>, Error> { + let block = bn.get_beacon_blocks(block_id).await?.map(|resp| resp.data); + + Ok(block) +} + +/// Queries the beacon node for the current validator set. +pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result, Error> { + let mut validator_map = HashSet::new(); + + let validators = bn + .get_beacon_states_validators(StateId::Head, None, None) + .await? + .ok_or(Error::NoValidatorsFound)? + .data; + + for val in validators { + // Only store `activation_epoch` if it not the `FAR_FUTURE_EPOCH`. + let activation_epoch = if val.validator.activation_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.activation_epoch.as_u64() as i32) + }; + // Only store `exit_epoch` if it is not the `FAR_FUTURE_EPOCH`. + let exit_epoch = if val.validator.exit_epoch.as_u64() == FAR_FUTURE_EPOCH { + None + } else { + Some(val.validator.exit_epoch.as_u64() as i32) + }; + validator_map.insert(WatchValidator { + index: val.index as i32, + public_key: WatchPK::from_pubkey(val.validator.pubkey), + status: val.status.to_string(), + activation_epoch, + exit_epoch, + }); + } + Ok(validator_map) +} diff --git a/watch/tests/tests.rs b/watch/tests/tests.rs new file mode 100644 index 000000000..acdda8c30 --- /dev/null +++ b/watch/tests/tests.rs @@ -0,0 +1,1254 @@ +#![recursion_limit = "256"] +#![cfg(unix)] + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; +use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use http_api::test_utils::{create_api_server, ApiServer}; +use network::NetworkReceivers; + +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use tokio::sync::oneshot; +use types::{Hash256, MainnetEthSpec, Slot}; +use url::Url; +use watch::{ + client::WatchHttpClient, + config::Config, + database::{self, Config as DatabaseConfig, PgPool, WatchSlot}, + server::{start_server, Config as ServerConfig}, + updater::{handler::*, run_updater, Config as UpdaterConfig, WatchSpec}, +}; + +use log::error; +use std::net::SocketAddr; +use std::time::Duration; +use tokio::{runtime, task::JoinHandle}; +use tokio_postgres::{config::Config as PostgresConfig, Client, NoTls}; +use unused_port::unused_tcp4_port; + +use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; + +type E = MainnetEthSpec; + +const VALIDATOR_COUNT: usize = 32; +const SLOTS_PER_EPOCH: u64 = 32; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +fn build_test_config(config: &DatabaseConfig) -> PostgresConfig { + let mut postgres_config = PostgresConfig::new(); + postgres_config + .user(&config.user) + .password(&config.password) + .dbname(&config.default_dbname) + .host(&config.host) + .port(config.port) + .connect_timeout(Duration::from_millis(config.connect_timeout_millis)); + postgres_config +} + +async fn connect(config: &DatabaseConfig) -> (Client, JoinHandle<()>) { + let db_config = build_test_config(config); + let (client, conn) = db_config + .connect(NoTls) + .await + .expect("Could not connect to db"); + let connection = runtime::Handle::current().spawn(async move { + if let Err(e) = conn.await { + error!("Connection error {:?}", e); + } + }); + + (client, connection) +} + +pub async fn create_test_database(config: &DatabaseConfig) { + let (db, _) = connect(config).await; + + db.execute(&format!("CREATE DATABASE {};", config.dbname), &[]) + .await + .expect("Database creation failed"); +} + +struct TesterBuilder { + pub harness: BeaconChainHarness>, + pub config: Config, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, +} + +impl TesterBuilder { + pub async fn new() -> TesterBuilder { + let harness = BeaconChainHarness::builder(E::default()) + .default_spec() + .deterministic_keypairs(VALIDATOR_COUNT) + .fresh_ephemeral_store() + .build(); + + /* + * Spawn a Beacon Node HTTP API. + */ + let ApiServer { + server, + listening_socket: bn_api_listening_socket, + shutdown_tx: _bn_api_shutdown_tx, + network_rx: _bn_network_rx, + .. + } = create_api_server(harness.chain.clone(), harness.logger().clone()).await; + tokio::spawn(server); + + /* + * Create a watch configuration + */ + let database_port = unused_tcp4_port().expect("Unable to find unused port."); + let server_port = unused_tcp4_port().expect("Unable to find unused port."); + let config = Config { + database: DatabaseConfig { + dbname: random_dbname(), + port: database_port, + ..Default::default() + }, + server: ServerConfig { + listen_port: server_port, + ..Default::default() + }, + updater: UpdaterConfig { + beacon_node_url: format!( + "http://{}:{}", + bn_api_listening_socket.ip(), + bn_api_listening_socket.port() + ), + ..Default::default() + }, + ..Default::default() + }; + + Self { + harness, + config, + _bn_network_rx, + _bn_api_shutdown_tx, + } + } + pub async fn build(self, pool: PgPool) -> Tester { + /* + * Spawn a Watch HTTP API. + */ + let (_watch_shutdown_tx, watch_shutdown_rx) = oneshot::channel(); + let watch_server = start_server(&self.config, SLOTS_PER_EPOCH, pool, async { + let _ = watch_shutdown_rx.await; + }) + .unwrap(); + tokio::spawn(watch_server); + + let addr = SocketAddr::new( + self.config.server.listen_addr, + self.config.server.listen_port, + ); + + /* + * Create a HTTP client to talk to the watch HTTP API. + */ + let client = WatchHttpClient { + client: reqwest::Client::new(), + server: Url::parse(&format!("http://{}:{}", addr.ip(), addr.port())).unwrap(), + }; + + /* + * Create a HTTP client to talk to the Beacon Node API. + */ + let beacon_node_url = SensitiveUrl::parse(&self.config.updater.beacon_node_url).unwrap(); + let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT)); + let spec = WatchSpec::mainnet("mainnet".to_string()); + + /* + * Build update service + */ + let updater = UpdateHandler::new(bn, spec, self.config.clone()) + .await + .unwrap(); + + Tester { + harness: self.harness, + client, + config: self.config, + updater, + _bn_network_rx: self._bn_network_rx, + _bn_api_shutdown_tx: self._bn_api_shutdown_tx, + _watch_shutdown_tx, + } + } + async fn initialize_database(&self) -> PgPool { + create_test_database(&self.config.database).await; + database::utils::run_migrations(&self.config.database); + database::build_connection_pool(&self.config.database) + .expect("Could not build connection pool") + } +} + +struct Tester { + pub harness: BeaconChainHarness>, + pub client: WatchHttpClient, + pub config: Config, + pub updater: UpdateHandler, + _bn_network_rx: NetworkReceivers, + _bn_api_shutdown_tx: oneshot::Sender<()>, + _watch_shutdown_tx: oneshot::Sender<()>, +} + +impl Tester { + /// Extend the chain on the beacon chain harness. Do not update the beacon watch database. + pub async fn extend_chain(&mut self, num_blocks: u64) -> &mut Self { + self.harness.advance_slot(); + self.harness + .extend_chain( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + // Advance the slot clock without a block. This results in a skipped slot. + pub fn skip_slot(&mut self) -> &mut Self { + self.harness.advance_slot(); + self + } + + // Perform a single slot re-org. + pub async fn reorg_chain(&mut self) -> &mut Self { + let previous_slot = self.harness.get_current_slot(); + self.harness.advance_slot(); + let first_slot = self.harness.get_current_slot(); + self.harness + .extend_chain( + 1, + BlockStrategy::ForkCanonicalChainAt { + previous_slot, + first_slot, + }, + AttestationStrategy::AllValidators, + ) + .await; + self + } + + /// Run the watch updater service. + pub async fn run_update_service(&mut self, num_runs: usize) -> &mut Self { + for _ in 0..num_runs { + run_updater(self.config.clone()).await.unwrap(); + } + self + } + + pub async fn perform_head_update(&mut self) -> &mut Self { + self.updater.perform_head_update().await.unwrap(); + self + } + + pub async fn perform_backfill(&mut self) -> &mut Self { + self.updater.backfill_canonical_slots().await.unwrap(); + self + } + + pub async fn update_unknown_blocks(&mut self) -> &mut Self { + self.updater.update_unknown_blocks().await.unwrap(); + self + } + + pub async fn update_validator_set(&mut self) -> &mut Self { + self.updater.update_validator_set().await.unwrap(); + self + } + + pub async fn fill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater.fill_suboptimal_attestations().await.unwrap(); + + self + } + + pub async fn backfill_suboptimal_attestations(&mut self) -> &mut Self { + self.updater + .backfill_suboptimal_attestations() + .await + .unwrap(); + + self + } + + pub async fn fill_block_rewards(&mut self) -> &mut Self { + self.updater.fill_block_rewards().await.unwrap(); + + self + } + + pub async fn backfill_block_rewards(&mut self) -> &mut Self { + self.updater.backfill_block_rewards().await.unwrap(); + + self + } + + pub async fn fill_block_packing(&mut self) -> &mut Self { + self.updater.fill_block_packing().await.unwrap(); + + self + } + + pub async fn backfill_block_packing(&mut self) -> &mut Self { + self.updater.backfill_block_packing().await.unwrap(); + + self + } + + pub async fn assert_canonical_slots_empty(&mut self) -> &mut Self { + let lowest_slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .map(|slot| slot.slot.as_slot()); + + assert_eq!(lowest_slot, None); + + self + } + + pub async fn assert_lowest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_highest_canonical_slot(&mut self, expected: u64) -> &mut Self { + let slot = self + .client + .get_highest_canonical_slot() + .await + .unwrap() + .unwrap() + .slot + .as_slot(); + + assert_eq!(slot, Slot::new(expected)); + + self + } + + pub async fn assert_canonical_slots_not_empty(&mut self) -> &mut Self { + self.client + .get_lowest_canonical_slot() + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_slot_is_skipped(&mut self, slot: u64) -> &mut Self { + assert!(self + .client + .get_beacon_blocks(BlockId::Slot(Slot::new(slot))) + .await + .unwrap() + .is_none()); + self + } + + pub async fn assert_all_validators_exist(&mut self) -> &mut Self { + assert_eq!( + self.client + .get_all_validators() + .await + .unwrap() + .unwrap() + .len(), + VALIDATOR_COUNT + ); + self + } + + pub async fn assert_lowest_block_has_proposer_info(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_proposer_info(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_proposer_info(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_rewards(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + if block.slot.as_slot() == 0 { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_rewards(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_reward(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_lowest_block_has_block_packing(&mut self) -> &mut Self { + let mut block = self + .client + .get_lowest_beacon_block() + .await + .unwrap() + .unwrap(); + + while block.slot.as_slot() <= SLOTS_PER_EPOCH { + block = self + .client + .get_next_beacon_block(block.root.as_hash()) + .await + .unwrap() + .unwrap() + } + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + pub async fn assert_highest_block_has_block_packing(&mut self) -> &mut Self { + let block = self + .client + .get_highest_beacon_block() + .await + .unwrap() + .unwrap(); + + self.client + .get_block_packing(BlockId::Root(block.root.as_hash())) + .await + .unwrap() + .unwrap(); + + self + } + + /// Check that the canonical chain in watch matches that of the harness. Also check that all + /// canonical blocks can be retrieved. + pub async fn assert_canonical_chain_consistent(&mut self, last_slot: u64) -> &mut Self { + let head_root = self.harness.chain.head_beacon_block_root(); + let mut chain: Vec<(Hash256, Slot)> = self + .harness + .chain + .rev_iter_block_roots_from(head_root) + .unwrap() + .map(Result::unwrap) + .collect(); + + // `chain` contains skip slots, but the `watch` API will not return blocks that do not + // exist. + // We need to filter them out. + chain.reverse(); + chain.dedup_by(|(hash1, _), (hash2, _)| hash1 == hash2); + + // Remove any slots below `last_slot` since it is known that the database has not + // backfilled past it. + chain.retain(|(_, slot)| slot.as_u64() >= last_slot); + + for (root, slot) in &chain { + let block = self + .client + .get_beacon_blocks(BlockId::Root(*root)) + .await + .unwrap() + .unwrap(); + assert_eq!(block.slot.as_slot(), *slot); + } + + self + } + + /// Check that every block in the `beacon_blocks` table has corresponding entries in the + /// `proposer_info`, `block_rewards` and `block_packing` tables. + pub async fn assert_all_blocks_have_metadata(&mut self) -> &mut Self { + let pool = database::build_connection_pool(&self.config.database).unwrap(); + + let mut conn = database::get_connection(&pool).unwrap(); + let highest_block_slot = database::get_highest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + let lowest_block_slot = database::get_lowest_beacon_block(&mut conn) + .unwrap() + .unwrap() + .slot + .as_slot(); + for slot in lowest_block_slot.as_u64()..=highest_block_slot.as_u64() { + let canonical_slot = database::get_canonical_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + if !canonical_slot.skipped { + database::get_block_rewards_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_proposer_info_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + database::get_block_packing_by_slot(&mut conn, WatchSlot::new(slot)) + .unwrap() + .unwrap(); + } + } + + self + } +} + +pub fn random_dbname() -> String { + let mut s: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + // Postgres gets weird about capitals in database names. + s.make_ascii_lowercase(); + format!("test_{}", s) +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(16) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_sync_starts_on_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .skip_slot() + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .extend_chain(6) + .await + .skip_slot() + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_canonical_chain_consistent(0) + .await + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_skip_slot() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(7) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn short_chain_with_reorg() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + .extend_chain(5) + .await + .assert_canonical_slots_empty() + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_canonical_slots_not_empty() + .await + .assert_highest_canonical_slot(5) + .await + .assert_lowest_canonical_slot(0) + .await + .assert_canonical_chain_consistent(0) + .await + .skip_slot() + .reorg_chain() + .await + .extend_chain(1) + .await + .run_update_service(1) + .await + .assert_all_validators_exist() + .await + .assert_highest_canonical_slot(8) + .await + .assert_slot_is_skipped(6) + .await + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + tester + // Apply four blocks to the chain. + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(5) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(7) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_with_metadata_and_multiple_skip_slots() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + + // Apply four blocks to the chain. + tester + .extend_chain(4) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(4) + // And also backfill to the epoch boundary. + .await + .assert_lowest_canonical_slot(0) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(4) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // Add multiple skip slots. + .skip_slot() + .skip_slot() + .skip_slot() + // Apply one block to the chain. + .extend_chain(1) + .await + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(8) + .await + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(10) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await + // Get other chain data. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_rewards() + .await + .fill_block_rewards() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn chain_grows_to_second_epoch() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(40) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(40) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(32) + .await + // Fill back to genesis. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(0) + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(40) + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + // All blocks should be present. + .assert_lowest_canonical_slot(0) + .await + .assert_highest_canonical_slot(43) + .await + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // Update new block_packing + // Backfill before forward fill to ensure order is arbitrary + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent + .assert_canonical_chain_consistent(0) + .await; +} + +#[cfg(unix)] +#[tokio::test] +async fn large_chain() { + let builder = TesterBuilder::new().await; + + let docker = Cli::default(); + let image = RunnableImage::from(Postgres::default()) + .with_mapped_port((builder.config.database.port, 5432)); + let _node = docker.run(image); + + let pool = builder.initialize_database().await; + let mut tester = builder.build(pool).await; + // Apply 40 blocks to the chain. + tester + .extend_chain(400) + .await + .perform_head_update() + .await + // Head update should insert the head block. + .assert_highest_canonical_slot(400) + .await + // And also backfill to the epoch boundary. + .assert_lowest_canonical_slot(384) + .await + // Backfill 2 epochs as per default config. + .perform_backfill() + .await + // Insert all validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(384) + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packings. + .fill_block_packing() + .await + .backfill_block_packing() + .await + // Should have backfilled 2 more epochs. + .assert_lowest_canonical_slot(320) + .await + .assert_highest_canonical_slot(400) + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Skip a slot + .skip_slot() + // Apply two blocks to the chain. + .extend_chain(2) + .await + // Update the head. + .perform_head_update() + .await + .perform_backfill() + .await + // Should have backfilled 2 more epochs + .assert_lowest_canonical_slot(256) + .await + .assert_highest_canonical_slot(403) + .await + // Update validators + .update_validator_set() + .await + // Insert all blocks. + .update_unknown_blocks() + .await + // All validators should be present. + .assert_all_validators_exist() + .await + // Get suboptimal attestations. + .fill_suboptimal_attestations() + .await + .backfill_suboptimal_attestations() + .await + // Get block rewards and proposer info. + .fill_block_rewards() + .await + .backfill_block_rewards() + .await + // Get block packing. + // Backfill before forward fill to ensure order is arbitrary. + .backfill_block_packing() + .await + .fill_block_packing() + .await + // All rewards should be present. + .assert_lowest_block_has_block_rewards() + .await + .assert_highest_block_has_block_rewards() + .await + // All proposers should be present. + .assert_lowest_block_has_proposer_info() + .await + .assert_highest_block_has_proposer_info() + .await + // All packings should be present. + .assert_lowest_block_has_block_packing() + .await + .assert_highest_block_has_block_packing() + .await + // Check the chain is consistent. + .assert_canonical_chain_consistent(256) + .await + // Check every block has rewards, proposer info and packing statistics. + .assert_all_blocks_have_metadata() + .await; +}