Implement standard eth2.0 API (#1569)

- Resolves #1550
- Resolves #824
- Resolves #825
- Resolves #1131
- Resolves #1411
- Resolves #1256
- Resolve #1177

- Includes the `ShufflingId` struct initially defined in #1492. That PR is now closed and the changes are included here, with significant bug fixes.
- Implement the https://github.com/ethereum/eth2.0-APIs in a new `http_api` crate using `warp`. This replaces the `rest_api` crate.
- Add a new `common/eth2` crate which provides a wrapper around `reqwest`, providing the HTTP client that is used by the validator client and for testing. This replaces the `common/remote_beacon_node` crate.
- Create a `http_metrics` crate which is a dedicated server for Prometheus metrics (they are no longer served on the same port as the REST API). We now have flags for `--metrics`, `--metrics-address`, etc.
- Allow the `subnet_id` to be an optional parameter for `VerifiedUnaggregatedAttestation::verify`. This means it does not need to be provided unnecessarily by the validator client.
- Move `fn map_attestation_committee` in `mod beacon_chain::attestation_verification` to a new `fn with_committee_cache` on the `BeaconChain` so the same cache can be used for obtaining validator duties.
- Add some other helpers to `BeaconChain` to assist with common API duties (e.g., `block_root_at_slot`, `head_beacon_block_root`).
- Change the `NaiveAggregationPool` so it can index attestations by `hash_tree_root(attestation.data)`. This is a requirement of the API.
- Add functions to `BeaconChainHarness` to allow it to create slashings and exits.
- Allow for `eth1::Eth1NetworkId` to go to/from a `String`.
- Add functions to the `OperationPool` to allow getting all objects in the pool.
- Add function to `BeaconState` to check if a committee cache is initialized.
- Fix bug where `seconds_per_eth1_block` was not transferring over from `YamlConfig` to `ChainSpec`.
- Add the `deposit_contract_address` to `YamlConfig` and `ChainSpec`. We needed to be able to return it in an API response.
- Change some uses of serde `serialize_with` and `deserialize_with` to a single use of `with` (code quality).
- Impl `Display` and `FromStr` for several BLS fields.
- Check for clock discrepancy when VC polls BN for sync state (with +/- 1 slot tolerance). This is not intended to be comprehensive, it was just easy to do.

- See #1434 for a per-endpoint overview.
- Seeking clarity here: https://github.com/ethereum/eth2.0-APIs/issues/75

- [x] Add docs for prom port to close #1256
- [x] Follow up on this #1177
- [x] ~~Follow up with #1424~~ Will fix in future PR.
- [x] Follow up with #1411
- [x] ~~Follow up with  #1260~~ Will fix in future PR.
- [x] Add quotes to all integers.
- [x] Remove `rest_types`
- [x] Address missing beacon block error. (#1629)
- [x] ~~Add tests for lighthouse/peers endpoints~~ Wontfix
- [x] ~~Follow up with validator status proposal~~ Tracked in #1434
- [x] Unify graffiti structs
- [x] ~~Start server when waiting for genesis?~~ Will fix in future PR.
- [x] TODO in http_api tests
- [x] Move lighthouse endpoints off /eth/v1
- [x] Update docs to link to standard

- ~~Blocked on #1586~~

Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
Paul Hauner 2020-09-29 03:46:54 +00:00
parent 8e20176337
commit cdec3cec18
No known key found for this signature in database
GPG Key ID: 5E2CFF9B75FA63DF
156 changed files with 8862 additions and 8916 deletions

452
Cargo.lock generated
View File

@ -227,12 +227,6 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "assert_matches"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5"
[[package]] [[package]]
name = "async-tls" name = "async-tls"
version = "0.8.0" version = "0.8.0"
@ -349,7 +343,6 @@ dependencies = [
"rand 0.7.3", "rand 0.7.3",
"rand_core 0.5.1", "rand_core 0.5.1",
"rayon", "rayon",
"regex",
"safe_arith", "safe_arith",
"serde", "serde",
"serde_derive", "serde_derive",
@ -519,7 +512,7 @@ dependencies = [
"rand 0.7.3", "rand 0.7.3",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_hex", "serde_utils",
"tree_hash", "tree_hash",
"zeroize", "zeroize",
] ]
@ -575,6 +568,16 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "buf_redux"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f"
dependencies = [
"memchr",
"safemem",
]
[[package]] [[package]]
name = "bumpalo" name = "bumpalo"
version = "3.4.0" version = "3.4.0"
@ -770,13 +773,14 @@ dependencies = [
"eth2_ssz", "eth2_ssz",
"futures 0.3.5", "futures 0.3.5",
"genesis", "genesis",
"http_api",
"http_metrics",
"lazy_static", "lazy_static",
"lighthouse_metrics", "lighthouse_metrics",
"network", "network",
"parking_lot 0.11.0", "parking_lot 0.11.0",
"prometheus", "prometheus",
"reqwest", "reqwest",
"rest_api",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_yaml", "serde_yaml",
@ -1460,6 +1464,22 @@ dependencies = [
"web3", "web3",
] ]
[[package]]
name = "eth2"
version = "0.1.0"
dependencies = [
"eth2_libp2p",
"hex 0.4.2",
"procinfo",
"proto_array",
"psutil",
"reqwest",
"serde",
"serde_json",
"serde_utils",
"types",
]
[[package]] [[package]]
name = "eth2_config" name = "eth2_config"
version = "0.2.0" version = "0.2.0"
@ -1600,7 +1620,7 @@ dependencies = [
"eth2_ssz", "eth2_ssz",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_hex", "serde_utils",
"tree_hash", "tree_hash",
"tree_hash_derive", "tree_hash_derive",
"typenum", "typenum",
@ -2148,6 +2168,31 @@ dependencies = [
"tokio 0.2.22", "tokio 0.2.22",
] ]
[[package]]
name = "headers"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f"
dependencies = [
"base64 0.12.3",
"bitflags 1.2.1",
"bytes 0.5.6",
"headers-core",
"http 0.2.1",
"mime 0.3.16",
"sha-1 0.8.2",
"time 0.1.44",
]
[[package]]
name = "headers-core"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
dependencies = [
"http 0.2.1",
]
[[package]] [[package]]
name = "heck" name = "heck"
version = "0.3.1" version = "0.3.1"
@ -2269,6 +2314,58 @@ dependencies = [
"http 0.2.1", "http 0.2.1",
] ]
[[package]]
name = "http_api"
version = "0.1.0"
dependencies = [
"beacon_chain",
"discv5",
"environment",
"eth1",
"eth2",
"eth2_libp2p",
"fork_choice",
"hex 0.4.2",
"lazy_static",
"lighthouse_metrics",
"lighthouse_version",
"network",
"parking_lot 0.11.0",
"serde",
"slog",
"slot_clock",
"state_processing",
"store",
"tokio 0.2.22",
"tree_hash",
"types",
"warp",
"warp_utils",
]
[[package]]
name = "http_metrics"
version = "0.1.0"
dependencies = [
"beacon_chain",
"environment",
"eth2",
"eth2_libp2p",
"lazy_static",
"lighthouse_metrics",
"lighthouse_version",
"prometheus",
"reqwest",
"serde",
"slog",
"slot_clock",
"store",
"tokio 0.2.22",
"types",
"warp",
"warp_utils",
]
[[package]] [[package]]
name = "httparse" name = "httparse"
version = "1.3.4" version = "1.3.4"
@ -2448,6 +2545,15 @@ dependencies = [
"hashbrown 0.9.1", "hashbrown 0.9.1",
] ]
[[package]]
name = "input_buffer"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754"
dependencies = [
"bytes 0.5.6",
]
[[package]] [[package]]
name = "instant" name = "instant"
version = "0.1.7" version = "0.1.7"
@ -3259,6 +3365,24 @@ version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333"
[[package]]
name = "multipart"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8209c33c951f07387a8497841122fc6f712165e3f9bda3e6be4645b58188f676"
dependencies = [
"buf_redux",
"httparse",
"log 0.4.11",
"mime 0.3.16",
"mime_guess",
"quick-error",
"rand 0.6.5",
"safemem",
"tempfile",
"twoway",
]
[[package]] [[package]]
name = "multistream-select" name = "multistream-select"
version = "0.8.2" version = "0.8.2"
@ -3339,7 +3463,6 @@ dependencies = [
"num_cpus", "num_cpus",
"parking_lot 0.11.0", "parking_lot 0.11.0",
"rand 0.7.3", "rand 0.7.3",
"rest_types",
"rlp", "rlp",
"slog", "slog",
"sloggers", "sloggers",
@ -3372,10 +3495,10 @@ version = "0.2.0"
dependencies = [ dependencies = [
"beacon_node", "beacon_node",
"environment", "environment",
"eth2",
"eth2_config", "eth2_config",
"futures 0.3.5", "futures 0.3.5",
"genesis", "genesis",
"remote_beacon_node",
"reqwest", "reqwest",
"serde", "serde",
"tempdir", "tempdir",
@ -4054,6 +4177,25 @@ dependencies = [
"winapi 0.3.9", "winapi 0.3.9",
] ]
[[package]]
name = "rand"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
dependencies = [
"autocfg 0.1.7",
"libc",
"rand_chacha 0.1.1",
"rand_core 0.4.2",
"rand_hc 0.1.0",
"rand_isaac",
"rand_jitter",
"rand_os",
"rand_pcg",
"rand_xorshift 0.1.1",
"winapi 0.3.9",
]
[[package]] [[package]]
name = "rand" name = "rand"
version = "0.7.3" version = "0.7.3"
@ -4062,9 +4204,19 @@ checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [ dependencies = [
"getrandom", "getrandom",
"libc", "libc",
"rand_chacha", "rand_chacha 0.2.2",
"rand_core 0.5.1", "rand_core 0.5.1",
"rand_hc", "rand_hc 0.2.0",
]
[[package]]
name = "rand_chacha"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef"
dependencies = [
"autocfg 0.1.7",
"rand_core 0.3.1",
] ]
[[package]] [[package]]
@ -4101,6 +4253,15 @@ dependencies = [
"getrandom", "getrandom",
] ]
[[package]]
name = "rand_hc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4"
dependencies = [
"rand_core 0.3.1",
]
[[package]] [[package]]
name = "rand_hc" name = "rand_hc"
version = "0.2.0" version = "0.2.0"
@ -4110,6 +4271,59 @@ dependencies = [
"rand_core 0.5.1", "rand_core 0.5.1",
] ]
[[package]]
name = "rand_isaac"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08"
dependencies = [
"rand_core 0.3.1",
]
[[package]]
name = "rand_jitter"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b"
dependencies = [
"libc",
"rand_core 0.4.2",
"winapi 0.3.9",
]
[[package]]
name = "rand_os"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071"
dependencies = [
"cloudabi 0.0.3",
"fuchsia-cprng",
"libc",
"rand_core 0.4.2",
"rdrand",
"winapi 0.3.9",
]
[[package]]
name = "rand_pcg"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
dependencies = [
"autocfg 0.1.7",
"rand_core 0.4.2",
]
[[package]]
name = "rand_xorshift"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
dependencies = [
"rand_core 0.3.1",
]
[[package]] [[package]]
name = "rand_xorshift" name = "rand_xorshift"
version = "0.2.0" version = "0.2.0"
@ -4197,24 +4411,6 @@ version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
[[package]]
name = "remote_beacon_node"
version = "0.2.0"
dependencies = [
"eth2_config",
"eth2_ssz",
"futures 0.3.5",
"hex 0.4.2",
"operation_pool",
"proto_array",
"reqwest",
"rest_types",
"serde",
"serde_json",
"types",
"url 2.1.1",
]
[[package]] [[package]]
name = "remove_dir_all" name = "remove_dir_all"
version = "0.5.3" version = "0.5.3"
@ -4260,73 +4456,6 @@ dependencies = [
"winreg", "winreg",
] ]
[[package]]
name = "rest_api"
version = "0.2.0"
dependencies = [
"assert_matches",
"beacon_chain",
"bls",
"bus",
"environment",
"eth2_config",
"eth2_libp2p",
"eth2_ssz",
"eth2_ssz_derive",
"futures 0.3.5",
"hex 0.4.2",
"http 0.2.1",
"hyper 0.13.8",
"itertools 0.9.0",
"lazy_static",
"lighthouse_metrics",
"lighthouse_version",
"network",
"node_test_rig",
"operation_pool",
"parking_lot 0.11.0",
"remote_beacon_node",
"rest_types",
"serde",
"serde_json",
"serde_yaml",
"slog",
"slog-async",
"slog-term",
"slot_clock",
"state_processing",
"store",
"tokio 0.2.22",
"tree_hash",
"types",
"uhttp_sse",
"url 2.1.1",
]
[[package]]
name = "rest_types"
version = "0.2.0"
dependencies = [
"beacon_chain",
"bls",
"environment",
"eth2_hashing",
"eth2_ssz",
"eth2_ssz_derive",
"hyper 0.13.8",
"procinfo",
"psutil",
"rayon",
"serde",
"serde_json",
"serde_yaml",
"state_processing",
"store",
"tokio 0.2.22",
"tree_hash",
"types",
]
[[package]] [[package]]
name = "ring" name = "ring"
version = "0.16.12" version = "0.16.12"
@ -4615,14 +4744,6 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "serde_hex"
version = "0.2.0"
dependencies = [
"hex 0.4.2",
"serde",
]
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.57" version = "1.0.57"
@ -4661,6 +4782,7 @@ dependencies = [
name = "serde_utils" name = "serde_utils"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"hex 0.4.2",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_json", "serde_json",
@ -5668,6 +5790,19 @@ dependencies = [
"tokio 0.2.22", "tokio 0.2.22",
] ]
[[package]]
name = "tokio-tungstenite"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d9e878ad426ca286e4dcae09cbd4e1973a7f8987d97570e2469703dd7f5720c"
dependencies = [
"futures-util",
"log 0.4.11",
"pin-project",
"tokio 0.2.22",
"tungstenite",
]
[[package]] [[package]]
name = "tokio-udp" name = "tokio-udp"
version = "0.1.6" version = "0.1.6"
@ -5769,6 +5904,16 @@ dependencies = [
"lazy_static", "lazy_static",
] ]
[[package]]
name = "tracing-futures"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c"
dependencies = [
"pin-project",
"tracing",
]
[[package]] [[package]]
name = "trackable" name = "trackable"
version = "1.0.0" version = "1.0.0"
@ -5822,6 +5967,34 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
[[package]]
name = "tungstenite"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0308d80d86700c5878b9ef6321f020f29b1bb9d5ff3cab25e75e23f3a492a23"
dependencies = [
"base64 0.12.3",
"byteorder",
"bytes 0.5.6",
"http 0.2.1",
"httparse",
"input_buffer",
"log 0.4.11",
"rand 0.7.3",
"sha-1 0.9.1",
"url 2.1.1",
"utf-8",
]
[[package]]
name = "twoway"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "typeable" name = "typeable"
version = "0.1.2" version = "0.1.2"
@ -5856,13 +6029,15 @@ dependencies = [
"log 0.4.11", "log 0.4.11",
"merkle_proof", "merkle_proof",
"rand 0.7.3", "rand 0.7.3",
"rand_xorshift", "rand_xorshift 0.2.0",
"rayon", "rayon",
"regex",
"rusqlite", "rusqlite",
"safe_arith", "safe_arith",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_json", "serde_json",
"serde_utils",
"serde_yaml", "serde_yaml",
"slog", "slog",
"swap_or_not_shuffle", "swap_or_not_shuffle",
@ -5872,12 +6047,6 @@ dependencies = [
"tree_hash_derive", "tree_hash_derive",
] ]
[[package]]
name = "uhttp_sse"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6ff93345ba2206230b1bb1aa3ece1a63dd9443b7531024575d16a0680a59444"
[[package]] [[package]]
name = "uint" name = "uint"
version = "0.8.5" version = "0.8.5"
@ -6018,6 +6187,18 @@ dependencies = [
"percent-encoding 2.1.0", "percent-encoding 2.1.0",
] ]
[[package]]
name = "urlencoding"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593"
[[package]]
name = "utf-8"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7"
[[package]] [[package]]
name = "uuid" name = "uuid"
version = "0.8.1" version = "0.8.1"
@ -6040,6 +6221,7 @@ dependencies = [
"directory", "directory",
"dirs", "dirs",
"environment", "environment",
"eth2",
"eth2_config", "eth2_config",
"eth2_interop_keypairs", "eth2_interop_keypairs",
"eth2_keystore", "eth2_keystore",
@ -6052,8 +6234,6 @@ dependencies = [
"logging", "logging",
"parking_lot 0.11.0", "parking_lot 0.11.0",
"rayon", "rayon",
"remote_beacon_node",
"rest_types",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_json", "serde_json",
@ -6148,6 +6328,46 @@ dependencies = [
"try-lock", "try-lock",
] ]
[[package]]
name = "warp"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f41be6df54c97904af01aa23e613d4521eed7ab23537cede692d4058f6449407"
dependencies = [
"bytes 0.5.6",
"futures 0.3.5",
"headers",
"http 0.2.1",
"hyper 0.13.8",
"log 0.4.11",
"mime 0.3.16",
"mime_guess",
"multipart",
"pin-project",
"scoped-tls 1.0.0",
"serde",
"serde_json",
"serde_urlencoded",
"tokio 0.2.22",
"tokio-tungstenite",
"tower-service",
"tracing",
"tracing-futures",
"urlencoding",
]
[[package]]
name = "warp_utils"
version = "0.1.0"
dependencies = [
"beacon_chain",
"eth2",
"safe_arith",
"state_processing",
"types",
"warp",
]
[[package]] [[package]]
name = "wasi" name = "wasi"
version = "0.9.0+wasi-snapshot-preview1" version = "0.9.0+wasi-snapshot-preview1"

View File

@ -7,8 +7,9 @@ members = [
"beacon_node/client", "beacon_node/client",
"beacon_node/eth1", "beacon_node/eth1",
"beacon_node/eth2_libp2p", "beacon_node/eth2_libp2p",
"beacon_node/http_api",
"beacon_node/http_metrics",
"beacon_node/network", "beacon_node/network",
"beacon_node/rest_api",
"beacon_node/store", "beacon_node/store",
"beacon_node/timer", "beacon_node/timer",
"beacon_node/websocket_server", "beacon_node/websocket_server",
@ -21,6 +22,7 @@ members = [
"common/compare_fields_derive", "common/compare_fields_derive",
"common/deposit_contract", "common/deposit_contract",
"common/directory", "common/directory",
"common/eth2",
"common/eth2_config", "common/eth2_config",
"common/eth2_interop_keypairs", "common/eth2_interop_keypairs",
"common/eth2_testnet_config", "common/eth2_testnet_config",
@ -30,10 +32,9 @@ members = [
"common/lighthouse_version", "common/lighthouse_version",
"common/logging", "common/logging",
"common/lru_cache", "common/lru_cache",
"common/remote_beacon_node",
"common/rest_types",
"common/slot_clock", "common/slot_clock",
"common/test_random_derive", "common/test_random_derive",
"common/warp_utils",
"common/validator_dir", "common/validator_dir",
"consensus/cached_tree_hash", "consensus/cached_tree_hash",
@ -44,7 +45,6 @@ members = [
"consensus/ssz", "consensus/ssz",
"consensus/ssz_derive", "consensus/ssz_derive",
"consensus/ssz_types", "consensus/ssz_types",
"consensus/serde_hex",
"consensus/serde_utils", "consensus/serde_utils",
"consensus/state_processing", "consensus/state_processing",
"consensus/swap_or_not_shuffle", "consensus/swap_or_not_shuffle",

View File

@ -58,4 +58,3 @@ environment = { path = "../../lighthouse/environment" }
bus = "2.2.3" bus = "2.2.3"
derivative = "2.1.1" derivative = "2.1.1"
itertools = "0.9.0" itertools = "0.9.0"
regex = "1.3.9"

View File

@ -28,8 +28,7 @@
use crate::{ use crate::{
beacon_chain::{ beacon_chain::{
ATTESTATION_CACHE_LOCK_TIMEOUT, HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
}, },
metrics, metrics,
observed_attestations::ObserveOutcome, observed_attestations::ObserveOutcome,
@ -38,12 +37,10 @@ use crate::{
}; };
use bls::verify_signature_sets; use bls::verify_signature_sets;
use proto_array::Block as ProtoBlock; use proto_array::Block as ProtoBlock;
use slog::debug;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use state_processing::{ use state_processing::{
common::get_indexed_attestation, common::get_indexed_attestation,
per_block_processing::errors::AttestationValidationError, per_block_processing::errors::AttestationValidationError,
per_slot_processing,
signature_sets::{ signature_sets::{
indexed_attestation_signature_set_from_pubkeys, indexed_attestation_signature_set_from_pubkeys,
signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set, signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set,
@ -53,7 +50,7 @@ use std::borrow::Cow;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation,
RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
}; };
/// Returned when an attestation was not successfully verified. It might not have been verified for /// Returned when an attestation was not successfully verified. It might not have been verified for
@ -267,6 +264,7 @@ pub struct VerifiedAggregatedAttestation<T: BeaconChainTypes> {
pub struct VerifiedUnaggregatedAttestation<T: BeaconChainTypes> { pub struct VerifiedUnaggregatedAttestation<T: BeaconChainTypes> {
attestation: Attestation<T::EthSpec>, attestation: Attestation<T::EthSpec>,
indexed_attestation: IndexedAttestation<T::EthSpec>, indexed_attestation: IndexedAttestation<T::EthSpec>,
subnet_id: SubnetId,
} }
/// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive /// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive
@ -276,6 +274,7 @@ impl<T: BeaconChainTypes> Clone for VerifiedUnaggregatedAttestation<T> {
Self { Self {
attestation: self.attestation.clone(), attestation: self.attestation.clone(),
indexed_attestation: self.indexed_attestation.clone(), indexed_attestation: self.indexed_attestation.clone(),
subnet_id: self.subnet_id,
} }
} }
} }
@ -428,6 +427,11 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
pub fn attestation(&self) -> &Attestation<T::EthSpec> { pub fn attestation(&self) -> &Attestation<T::EthSpec> {
&self.signed_aggregate.message.aggregate &self.signed_aggregate.message.aggregate
} }
/// Returns the underlying `signed_aggregate`.
pub fn aggregate(&self) -> &SignedAggregateAndProof<T::EthSpec> {
&self.signed_aggregate
}
} }
impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> { impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
@ -438,7 +442,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
/// verify that it was received on the correct subnet. /// verify that it was received on the correct subnet.
pub fn verify( pub fn verify(
attestation: Attestation<T::EthSpec>, attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId, subnet_id: Option<SubnetId>,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()); let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
@ -513,13 +517,15 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
) )
.map_err(BeaconChainError::from)?; .map_err(BeaconChainError::from)?;
// Ensure the attestation is from the correct subnet. // If a subnet was specified, ensure that subnet is correct.
if let Some(subnet_id) = subnet_id {
if subnet_id != expected_subnet_id { if subnet_id != expected_subnet_id {
return Err(Error::InvalidSubnetId { return Err(Error::InvalidSubnetId {
received: subnet_id, received: subnet_id,
expected: expected_subnet_id, expected: expected_subnet_id,
}); });
} }
};
let validator_index = *indexed_attestation let validator_index = *indexed_attestation
.attesting_indices .attesting_indices
@ -564,6 +570,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
Ok(Self { Ok(Self {
attestation, attestation,
indexed_attestation, indexed_attestation,
subnet_id: expected_subnet_id,
}) })
} }
@ -572,6 +579,11 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
chain.add_to_naive_aggregation_pool(self) chain.add_to_naive_aggregation_pool(self)
} }
/// Returns the correct subnet for the attestation.
pub fn subnet_id(&self) -> SubnetId {
self.subnet_id
}
/// Returns the wrapped `attestation`. /// Returns the wrapped `attestation`.
pub fn attestation(&self) -> &Attestation<T::EthSpec> { pub fn attestation(&self) -> &Attestation<T::EthSpec> {
&self.attestation &self.attestation
@ -587,6 +599,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
} }
/// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain. /// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain.
/// You can use this `shuffling_id` to read from the shuffling cache.
/// ///
/// The block root may not be known for two reasons: /// The block root may not be known for two reasons:
/// ///
@ -615,6 +628,7 @@ fn verify_head_block_is_known<T: BeaconChainTypes>(
}); });
} }
} }
Ok(block) Ok(block)
} else { } else {
Err(Error::UnknownHeadBlock { Err(Error::UnknownHeadBlock {
@ -770,7 +784,7 @@ type CommitteesPerSlot = u64;
/// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the /// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the
/// public keys cached in the `chain`. /// public keys cached in the `chain`.
pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>( fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
attestation: &Attestation<T::EthSpec>, attestation: &Attestation<T::EthSpec>,
) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> { ) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> {
@ -790,8 +804,8 @@ pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
/// ///
/// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state /// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state
/// from disk and then update the `shuffling_cache`. /// from disk and then update the `shuffling_cache`.
pub fn map_attestation_committee<'a, T, F, R>( fn map_attestation_committee<T, F, R>(
chain: &'a BeaconChain<T>, chain: &BeaconChain<T>,
attestation: &Attestation<T::EthSpec>, attestation: &Attestation<T::EthSpec>,
map_fn: F, map_fn: F,
) -> Result<R, Error> ) -> Result<R, Error>
@ -809,97 +823,15 @@ where
// processing an attestation that does not include our latest finalized block in its chain. // processing an attestation that does not include our latest finalized block in its chain.
// //
// We do not delay consideration for later, we simply drop the attestation. // We do not delay consideration for later, we simply drop the attestation.
let target_block = chain if !chain.fork_choice.read().contains_block(&target.root) {
.fork_choice return Err(Error::UnknownTargetRoot(target.root));
.read()
.get_block(&target.root)
.ok_or_else(|| Error::UnknownTargetRoot(target.root))?;
// Obtain the shuffling cache, timing how long we wait.
let cache_wait_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES);
let mut shuffling_cache = chain
.shuffling_cache
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?;
metrics::stop_timer(cache_wait_timer);
if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) {
let committees_per_slot = committee_cache.committees_per_slot();
committee_cache
.get_beacon_committee(attestation.data.slot, attestation.data.index)
.map(|committee| map_fn((committee, committees_per_slot)))
.unwrap_or_else(|| {
Err(Error::NoCommitteeForSlotAndIndex {
slot: attestation.data.slot,
index: attestation.data.index,
})
})
} else {
// Drop the shuffling cache to avoid holding the lock for any longer than
// required.
drop(shuffling_cache);
debug!(
chain.log,
"Attestation processing cache miss";
"attn_epoch" => attestation_epoch.as_u64(),
"target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(),
);
let state_read_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES);
let mut state = chain
.store
.get_inconsistent_state_for_attestation_verification_only(
&target_block.state_root,
Some(target_block.slot),
)
.map_err(BeaconChainError::from)?
.ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?;
metrics::stop_timer(state_read_timer);
let state_skip_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES);
while state.current_epoch() + 1 < attestation_epoch {
// Here we tell `per_slot_processing` to skip hashing the state and just
// use the zero hash instead.
//
// The state roots are not useful for the shuffling, so there's no need to
// compute them.
per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec)
.map_err(BeaconChainError::from)?;
} }
metrics::stop_timer(state_skip_timer);
let committee_building_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES);
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), attestation_epoch)
.map_err(BeaconChainError::IncorrectStateForAttestation)?;
state
.build_committee_cache(relative_epoch, &chain.spec)
.map_err(BeaconChainError::from)?;
let committee_cache = state
.committee_cache(relative_epoch)
.map_err(BeaconChainError::from)?;
chain chain
.shuffling_cache .with_committee_cache(target.root, attestation_epoch, |committee_cache| {
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?
.insert(attestation_epoch, target.root, committee_cache);
metrics::stop_timer(committee_building_timer);
let committees_per_slot = committee_cache.committees_per_slot(); let committees_per_slot = committee_cache.committees_per_slot();
committee_cache
Ok(committee_cache
.get_beacon_committee(attestation.data.slot, attestation.data.index) .get_beacon_committee(attestation.data.slot, attestation.data.index)
.map(|committee| map_fn((committee, committees_per_slot))) .map(|committee| map_fn((committee, committees_per_slot)))
.unwrap_or_else(|| { .unwrap_or_else(|| {
@ -907,6 +839,7 @@ where
slot: attestation.data.slot, slot: attestation.data.slot,
index: attestation.data.index, index: attestation.data.index,
}) })
}))
}) })
} .map_err(BeaconChainError::from)?
} }

View File

@ -21,7 +21,7 @@ use crate::observed_block_producers::ObservedBlockProducers;
use crate::observed_operations::{ObservationOutcome, ObservedOperations}; use crate::observed_operations::{ObservationOutcome, ObservedOperations};
use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::persisted_fork_choice::PersistedForkChoice; use crate::persisted_fork_choice::PersistedForkChoice;
use crate::shuffling_cache::ShufflingCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache};
use crate::snapshot_cache::SnapshotCache; use crate::snapshot_cache::SnapshotCache;
use crate::timeout_rw_lock::TimeoutRwLock; use crate::timeout_rw_lock::TimeoutRwLock;
use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::validator_pubkey_cache::ValidatorPubkeyCache;
@ -31,7 +31,6 @@ use fork_choice::ForkChoice;
use itertools::process_results; use itertools::process_results;
use operation_pool::{OperationPool, PersistedOperationPool}; use operation_pool::{OperationPool, PersistedOperationPool};
use parking_lot::RwLock; use parking_lot::RwLock;
use regex::bytes::Regex;
use slog::{crit, debug, error, info, trace, warn, Logger}; use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use state_processing::{ use state_processing::{
@ -201,6 +200,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>, pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>,
/// The root of the genesis block. /// The root of the genesis block.
pub genesis_block_root: Hash256, pub genesis_block_root: Hash256,
/// The root of the genesis state.
pub genesis_state_root: Hash256,
/// The root of the list of genesis validators, used during syncing. /// The root of the list of genesis validators, used during syncing.
pub genesis_validators_root: Hash256, pub genesis_validators_root: Hash256,
@ -459,6 +460,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
} }
/// Returns the block at the given slot, if any. Only returns blocks in the canonical chain.
///
/// ## Errors
///
/// May return a database error.
pub fn state_root_at_slot(&self, slot: Slot) -> Result<Option<Hash256>, Error> {
process_results(self.rev_iter_state_roots()?, |mut iter| {
iter.find(|(_, this_slot)| *this_slot == slot)
.map(|(root, _)| root)
})
}
/// Returns the block root at the given slot, if any. Only returns roots in the canonical chain.
///
/// ## Errors
///
/// May return a database error.
pub fn block_root_at_slot(&self, slot: Slot) -> Result<Option<Hash256>, Error> {
process_results(self.rev_iter_block_roots()?, |mut iter| {
iter.find(|(_, this_slot)| *this_slot == slot)
.map(|(root, _)| root)
})
}
/// Returns the block at the given root, if any. /// Returns the block at the given root, if any.
/// ///
/// ## Errors /// ## Errors
@ -506,6 +531,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
f(&head_lock) f(&head_lock)
} }
/// Returns the beacon block root at the head of the canonical chain.
///
/// See `Self::head` for more information.
pub fn head_beacon_block_root(&self) -> Result<Hash256, Error> {
self.with_head(|s| Ok(s.beacon_block_root))
}
/// Returns the beacon block at the head of the canonical chain.
///
/// See `Self::head` for more information.
pub fn head_beacon_block(&self) -> Result<SignedBeaconBlock<T::EthSpec>, Error> {
self.with_head(|s| Ok(s.beacon_block.clone()))
}
/// Returns the beacon state at the head of the canonical chain.
///
/// See `Self::head` for more information.
pub fn head_beacon_state(&self) -> Result<BeaconState<T::EthSpec>, Error> {
self.with_head(|s| {
Ok(s.beacon_state
.clone_with(CloneConfig::committee_caches_only()))
})
}
/// Returns info representing the head block and state. /// Returns info representing the head block and state.
/// ///
/// A summarized version of `Self::head` that involves less cloning. /// A summarized version of `Self::head` that involves less cloning.
@ -719,46 +768,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map_err(Into::into) .map_err(Into::into)
} }
/// Returns the attestation slot and committee index for a given validator index. /// Returns the attestation duties for a given validator index.
/// ///
/// Information is read from the current state, so only information from the present and prior /// Information is read from the current state, so only information from the present and prior
/// epoch is available. /// epoch is available.
pub fn validator_attestation_slot_and_index( pub fn validator_attestation_duty(
&self, &self,
validator_index: usize, validator_index: usize,
epoch: Epoch, epoch: Epoch,
) -> Result<Option<(Slot, u64)>, Error> { ) -> Result<Option<AttestationDuty>, Error> {
let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); let head_block_root = self.head_beacon_block_root()?;
let head_state = &self.head()?.beacon_state;
let mut state = if epoch == as_epoch(head_state.slot) { self.with_committee_cache(head_block_root, epoch, |committee_cache| {
self.head()?.beacon_state Ok(committee_cache.get_attestation_duties(validator_index))
} else { })
// The block proposer shuffling is not affected by the state roots, so we don't need to
// calculate them.
self.state_at_slot(
epoch.start_slot(T::EthSpec::slots_per_epoch()),
StateSkipConfig::WithoutStateRoots,
)?
};
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
if as_epoch(state.slot) != epoch {
return Err(Error::InvariantViolated(format!(
"Epochs in consistent in attestation duties lookup: state: {}, requested: {}",
as_epoch(state.slot),
epoch
)));
}
if let Some(attestation_duty) =
state.get_attestation_duties(validator_index, RelativeEpoch::Current)?
{
Ok(Some((attestation_duty.slot, attestation_duty.index)))
} else {
Ok(None)
}
} }
/// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`.
@ -767,11 +790,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn get_aggregated_attestation( pub fn get_aggregated_attestation(
&self, &self,
data: &AttestationData, data: &AttestationData,
) -> Result<Option<Attestation<T::EthSpec>>, Error> { ) -> Option<Attestation<T::EthSpec>> {
self.naive_aggregation_pool.read().get(data)
}
/// Returns an aggregated `Attestation`, if any, that has a matching
/// `attestation.data.tree_hash_root()`.
///
/// The attestation will be obtained from `self.naive_aggregation_pool`.
pub fn get_aggregated_attestation_by_slot_and_root(
&self,
slot: Slot,
attestation_data_root: &Hash256,
) -> Option<Attestation<T::EthSpec>> {
self.naive_aggregation_pool self.naive_aggregation_pool
.read() .read()
.get(data) .get_by_slot_and_root(slot, attestation_data_root)
.map_err(Into::into)
} }
/// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`. /// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`.
@ -898,7 +932,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn verify_unaggregated_attestation_for_gossip( pub fn verify_unaggregated_attestation_for_gossip(
&self, &self,
attestation: Attestation<T::EthSpec>, attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId, subnet_id: Option<SubnetId>,
) -> Result<VerifiedUnaggregatedAttestation<T>, AttestationError> { ) -> Result<VerifiedUnaggregatedAttestation<T>, AttestationError> {
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS); metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS);
let _timer = let _timer =
@ -1320,11 +1354,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block: SignedBeaconBlock<T::EthSpec>, block: SignedBeaconBlock<T::EthSpec>,
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> { ) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
let slot = block.message.slot; let slot = block.message.slot;
#[allow(clippy::invalid_regex)] let graffiti_string = block.message.body.graffiti.as_utf8_lossy();
let re = Regex::new("\\p{C}").expect("regex is valid");
let graffiti_string =
String::from_utf8_lossy(&re.replace_all(&block.message.body.graffiti[..], &b""[..]))
.to_string();
match GossipVerifiedBlock::new(block, self) { match GossipVerifiedBlock::new(block, self) {
Ok(verified) => { Ok(verified) => {
@ -1449,8 +1479,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) -> Result<Hash256, BlockError<T::EthSpec>> { ) -> Result<Hash256, BlockError<T::EthSpec>> {
let signed_block = fully_verified_block.block; let signed_block = fully_verified_block.block;
let block_root = fully_verified_block.block_root; let block_root = fully_verified_block.block_root;
let state = fully_verified_block.state; let mut state = fully_verified_block.state;
let parent_block = fully_verified_block.parent_block;
let current_slot = self.slot()?; let current_slot = self.slot()?;
let mut ops = fully_verified_block.intermediate_states; let mut ops = fully_verified_block.intermediate_states;
@ -1482,29 +1511,25 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)? .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?
.import_new_pubkeys(&state)?; .import_new_pubkeys(&state)?;
// If the imported block is in the previous or current epochs (according to the // For the current and next epoch of this state, ensure we have the shuffling from this
// wall-clock), check to see if this is the first block of the epoch. If so, add the // block in our cache.
// committee to the shuffling cache. for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] {
if state.current_epoch() + 1 >= self.epoch()? let shuffling_id = ShufflingId::new(block_root, &state, *relative_epoch)?;
&& parent_block.slot().epoch(T::EthSpec::slots_per_epoch()) != state.current_epoch()
{ let shuffling_is_cached = self
let mut shuffling_cache = self
.shuffling_cache .shuffling_cache
.try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
.contains(&shuffling_id);
if !shuffling_is_cached {
state.build_committee_cache(*relative_epoch, &self.spec)?;
let committee_cache = state.committee_cache(*relative_epoch)?;
self.shuffling_cache
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?; .ok_or_else(|| Error::AttestationCacheLockTimeout)?
.insert(shuffling_id, committee_cache);
let committee_cache = state.committee_cache(RelativeEpoch::Current)?; }
let epoch_start_slot = state
.current_epoch()
.start_slot(T::EthSpec::slots_per_epoch());
let target_root = if state.slot == epoch_start_slot {
block_root
} else {
*state.get_block_root(epoch_start_slot)?
};
shuffling_cache.insert(state.current_epoch(), target_root, committee_cache);
} }
let mut fork_choice = self.fork_choice.write(); let mut fork_choice = self.fork_choice.write();
@ -1992,6 +2017,129 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(()) Ok(())
} }
/// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head
/// `head_block_root`.
///
/// It's not necessary that `head_block_root` matches our current view of the chain, it can be
/// any block that is:
///
/// - Known to us.
/// - The finalized block or a descendant of the finalized block.
///
/// It would be quite common for attestation verification operations to use a `head_block_root`
/// that differs from our view of the head.
///
/// ## Important
///
/// This function is **not** suitable for determining proposer duties.
///
/// ## Notes
///
/// This function exists in this odd "map" pattern because efficiently obtaining a committee
/// can be complex. It might involve reading straight from the `beacon_chain.shuffling_cache`
/// or it might involve reading it from a state from the DB. Due to the complexities of
/// `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here.
///
/// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the
/// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`.
pub(crate) fn with_committee_cache<F, R>(
&self,
head_block_root: Hash256,
shuffling_epoch: Epoch,
map_fn: F,
) -> Result<R, Error>
where
F: Fn(&CommitteeCache) -> Result<R, Error>,
{
let head_block = self
.fork_choice
.read()
.get_block(&head_block_root)
.ok_or_else(|| Error::MissingBeaconBlock(head_block_root))?;
let shuffling_id = BlockShufflingIds {
current: head_block.current_epoch_shuffling_id.clone(),
next: head_block.next_epoch_shuffling_id.clone(),
block_root: head_block.root,
}
.id_for_epoch(shuffling_epoch)
.ok_or_else(|| Error::InvalidShufflingId {
shuffling_epoch,
head_block_epoch: head_block.slot.epoch(T::EthSpec::slots_per_epoch()),
})?;
// Obtain the shuffling cache, timing how long we wait.
let cache_wait_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES);
let mut shuffling_cache = self
.shuffling_cache
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?;
metrics::stop_timer(cache_wait_timer);
if let Some(committee_cache) = shuffling_cache.get(&shuffling_id) {
map_fn(committee_cache)
} else {
// Drop the shuffling cache to avoid holding the lock for any longer than
// required.
drop(shuffling_cache);
debug!(
self.log,
"Committee cache miss";
"shuffling_epoch" => shuffling_epoch.as_u64(),
"head_block_root" => head_block_root.to_string(),
);
let state_read_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES);
let mut state = self
.store
.get_inconsistent_state_for_attestation_verification_only(
&head_block.state_root,
Some(head_block.slot),
)?
.ok_or_else(|| Error::MissingBeaconState(head_block.state_root))?;
metrics::stop_timer(state_read_timer);
let state_skip_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES);
while state.current_epoch() + 1 < shuffling_epoch {
// Here we tell `per_slot_processing` to skip hashing the state and just
// use the zero hash instead.
//
// The state roots are not useful for the shuffling, so there's no need to
// compute them.
per_slot_processing(&mut state, Some(Hash256::zero()), &self.spec)
.map_err(Error::from)?;
}
metrics::stop_timer(state_skip_timer);
let committee_building_timer =
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES);
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), shuffling_epoch)
.map_err(Error::IncorrectStateForAttestation)?;
state.build_committee_cache(relative_epoch, &self.spec)?;
let committee_cache = state.committee_cache(relative_epoch)?;
self.shuffling_cache
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
.insert(shuffling_id, committee_cache);
metrics::stop_timer(committee_building_timer);
map_fn(&committee_cache)
}
}
/// Returns `true` if the given block root has not been processed. /// Returns `true` if the given block root has not been processed.
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> { pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
Ok(!self Ok(!self

View File

@ -374,7 +374,12 @@ where
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis); let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis);
let fork_choice = ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message) let fork_choice = ForkChoice::from_genesis(
fc_store,
genesis.beacon_block_root,
&genesis.beacon_block.message,
&genesis.beacon_state,
)
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?; .map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?;
self.fork_choice = Some(fork_choice); self.fork_choice = Some(fork_choice);
@ -561,6 +566,7 @@ where
observed_attester_slashings: <_>::default(), observed_attester_slashings: <_>::default(),
eth1_chain: self.eth1_chain, eth1_chain: self.eth1_chain,
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root, genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
genesis_state_root: canonical_head.beacon_state_root,
canonical_head: TimeoutRwLock::new(canonical_head.clone()), canonical_head: TimeoutRwLock::new(canonical_head.clone()),
genesis_block_root, genesis_block_root,
fork_choice: RwLock::new(fork_choice), fork_choice: RwLock::new(fork_choice),

View File

@ -83,6 +83,10 @@ pub enum BeaconChainError {
ObservedBlockProducersError(ObservedBlockProducersError), ObservedBlockProducersError(ObservedBlockProducersError),
PruningError(PruningError), PruningError(PruningError),
ArithError(ArithError), ArithError(ArithError),
InvalidShufflingId {
shuffling_epoch: Epoch,
head_block_epoch: Epoch,
},
} }
easy_from_to!(SlotProcessingError, BeaconChainError); easy_from_to!(SlotProcessingError, BeaconChainError);

View File

@ -1,7 +1,9 @@
use crate::metrics; use crate::metrics;
use std::collections::HashMap; use std::collections::HashMap;
use types::{Attestation, AttestationData, EthSpec, Slot}; use tree_hash::TreeHash;
use types::{Attestation, AttestationData, EthSpec, Hash256, Slot};
type AttestationDataRoot = Hash256;
/// The number of slots that will be stored in the pool. /// The number of slots that will be stored in the pool.
/// ///
/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations /// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations
@ -53,7 +55,7 @@ pub enum Error {
/// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all /// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all
/// `attestation` are from the same slot. /// `attestation` are from the same slot.
struct AggregatedAttestationMap<E: EthSpec> { struct AggregatedAttestationMap<E: EthSpec> {
map: HashMap<AttestationData, Attestation<E>>, map: HashMap<AttestationDataRoot, Attestation<E>>,
} }
impl<E: EthSpec> AggregatedAttestationMap<E> { impl<E: EthSpec> AggregatedAttestationMap<E> {
@ -87,7 +89,9 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
} }
if let Some(existing_attestation) = self.map.get_mut(&a.data) { let attestation_data_root = a.data.tree_hash_root();
if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) {
if existing_attestation if existing_attestation
.aggregation_bits .aggregation_bits
.get(committee_index) .get(committee_index)
@ -107,7 +111,7 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
)); ));
} }
self.map.insert(a.data.clone(), a.clone()); self.map.insert(attestation_data_root, a.clone());
Ok(InsertOutcome::NewAttestationData { committee_index }) Ok(InsertOutcome::NewAttestationData { committee_index })
} }
} }
@ -115,8 +119,13 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
/// Returns an aggregated `Attestation` with the given `data`, if any. /// Returns an aggregated `Attestation` with the given `data`, if any.
/// ///
/// The given `a.data.slot` must match the slot that `self` was initialized with. /// The given `a.data.slot` must match the slot that `self` was initialized with.
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> { pub fn get(&self, data: &AttestationData) -> Option<Attestation<E>> {
Ok(self.map.get(data).cloned()) self.map.get(&data.tree_hash_root()).cloned()
}
/// Returns an aggregated `Attestation` with the given `root`, if any.
pub fn get_by_root(&self, root: &AttestationDataRoot) -> Option<&Attestation<E>> {
self.map.get(root)
} }
/// Iterate all attestations in `self`. /// Iterate all attestations in `self`.
@ -220,12 +229,19 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
} }
/// Returns an aggregated `Attestation` with the given `data`, if any. /// Returns an aggregated `Attestation` with the given `data`, if any.
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> { pub fn get(&self, data: &AttestationData) -> Option<Attestation<E>> {
self.maps.get(&data.slot).and_then(|map| map.get(data))
}
/// Returns an aggregated `Attestation` with the given `data`, if any.
pub fn get_by_slot_and_root(
&self,
slot: Slot,
root: &AttestationDataRoot,
) -> Option<Attestation<E>> {
self.maps self.maps
.iter() .get(&slot)
.find(|(slot, _map)| **slot == data.slot) .and_then(|map| map.get_by_root(root).cloned())
.map(|(_slot, map)| map.get(data))
.unwrap_or_else(|| Ok(None))
} }
/// Iterate all attestations in all slots of `self`. /// Iterate all attestations in all slots of `self`.
@ -338,8 +354,7 @@ mod tests {
let retrieved = pool let retrieved = pool
.get(&a.data) .get(&a.data)
.expect("should not error while getting attestation") .expect("should not error while getting attestation");
.expect("should get an attestation");
assert_eq!( assert_eq!(
retrieved, a, retrieved, a,
"retrieved attestation should equal the one inserted" "retrieved attestation should equal the one inserted"
@ -378,8 +393,7 @@ mod tests {
let retrieved = pool let retrieved = pool
.get(&a_0.data) .get(&a_0.data)
.expect("should not error while getting attestation") .expect("should not error while getting attestation");
.expect("should get an attestation");
let mut a_01 = a_0.clone(); let mut a_01 = a_0.clone();
a_01.aggregate(&a_1); a_01.aggregate(&a_1);
@ -408,8 +422,7 @@ mod tests {
assert_eq!( assert_eq!(
pool.get(&a_0.data) pool.get(&a_0.data)
.expect("should not error while getting attestation") .expect("should not error while getting attestation"),
.expect("should get an attestation"),
retrieved, retrieved,
"should not have aggregated different attestation data" "should not have aggregated different attestation data"
); );

View File

@ -1,6 +1,6 @@
use crate::metrics; use crate::metrics;
use lru::LruCache; use lru::LruCache;
use types::{beacon_state::CommitteeCache, Epoch, Hash256}; use types::{beacon_state::CommitteeCache, Epoch, Hash256, ShufflingId};
/// The size of the LRU cache that stores committee caches for quicker verification. /// The size of the LRU cache that stores committee caches for quicker verification.
/// ///
@ -14,7 +14,7 @@ const CACHE_SIZE: usize = 16;
/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like /// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like
/// a find/replace error. /// a find/replace error.
pub struct ShufflingCache { pub struct ShufflingCache {
cache: LruCache<(Epoch, Hash256), CommitteeCache>, cache: LruCache<ShufflingId, CommitteeCache>,
} }
impl ShufflingCache { impl ShufflingCache {
@ -24,8 +24,8 @@ impl ShufflingCache {
} }
} }
pub fn get(&mut self, epoch: Epoch, root: Hash256) -> Option<&CommitteeCache> { pub fn get(&mut self, key: &ShufflingId) -> Option<&CommitteeCache> {
let opt = self.cache.get(&(epoch, root)); let opt = self.cache.get(key);
if opt.is_some() { if opt.is_some() {
metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS);
@ -36,11 +36,37 @@ impl ShufflingCache {
opt opt
} }
pub fn insert(&mut self, epoch: Epoch, root: Hash256, committee_cache: &CommitteeCache) { pub fn contains(&self, key: &ShufflingId) -> bool {
let key = (epoch, root); self.cache.contains(key)
}
pub fn insert(&mut self, key: ShufflingId, committee_cache: &CommitteeCache) {
if !self.cache.contains(&key) { if !self.cache.contains(&key) {
self.cache.put(key, committee_cache.clone()); self.cache.put(key, committee_cache.clone());
} }
} }
} }
/// Contains the shuffling IDs for a beacon block.
pub struct BlockShufflingIds {
pub current: ShufflingId,
pub next: ShufflingId,
pub block_root: Hash256,
}
impl BlockShufflingIds {
/// Returns the shuffling ID for the given epoch.
///
/// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`.
pub fn id_for_epoch(&self, epoch: Epoch) -> Option<ShufflingId> {
if epoch == self.current.shuffling_epoch {
Some(self.current.clone())
} else if epoch == self.next.shuffling_epoch {
Some(self.next.clone())
} else if epoch > self.next.shuffling_epoch {
Some(ShufflingId::from_components(epoch, self.block_root))
} else {
None
}
}
}

View File

@ -26,9 +26,11 @@ use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, Mem
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, Epoch, AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconState,
EthSpec, Hash256, Keypair, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, BeaconStateHash, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, Hash256, IndexedAttestation,
SignedBeaconBlockHash, SignedRoot, Slot, SubnetId, Keypair, ProposerSlashing, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock,
SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList,
VoluntaryExit,
}; };
pub use types::test_utils::generate_deterministic_keypairs; pub use types::test_utils::generate_deterministic_keypairs;
@ -129,7 +131,7 @@ impl<E: EthSpec> BeaconChainHarness<BlockingMigratorEphemeralHarnessType<E>> {
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
let drain = slog_term::FullFormat::new(decorator).build(); let drain = slog_term::FullFormat::new(decorator).build();
let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical);
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
let config = StoreConfig::default(); let config = StoreConfig::default();
@ -193,7 +195,7 @@ impl<E: EthSpec> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
let drain = slog_term::FullFormat::new(decorator).build(); let drain = slog_term::FullFormat::new(decorator).build();
let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical);
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
let store = HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap(); let store = HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap();
@ -238,7 +240,7 @@ impl<E: EthSpec> BeaconChainHarness<BlockingMigratorDiskHarnessType<E>> {
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter); let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
let drain = slog_term::FullFormat::new(decorator).build(); let drain = slog_term::FullFormat::new(decorator).build();
let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug); let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical);
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!()); let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
let chain = BeaconChainBuilder::new(eth_spec_instance) let chain = BeaconChainBuilder::new(eth_spec_instance)
@ -397,7 +399,7 @@ where
// If we produce two blocks for the same slot, they hash up to the same value and // If we produce two blocks for the same slot, they hash up to the same value and
// BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce
// different blocks each time. // different blocks each time.
self.chain.set_graffiti(self.rng.gen::<[u8; 32]>()); self.chain.set_graffiti(self.rng.gen::<[u8; 32]>().into());
let randao_reveal = { let randao_reveal = {
let epoch = slot.epoch(E::slots_per_epoch()); let epoch = slot.epoch(E::slots_per_epoch());
@ -442,8 +444,8 @@ where
let committee_count = state.get_committee_count_at_slot(state.slot).unwrap(); let committee_count = state.get_committee_count_at_slot(state.slot).unwrap();
state state
.get_beacon_committees_at_slot(state.slot) .get_beacon_committees_at_slot(attestation_slot)
.unwrap() .expect("should get committees")
.iter() .iter()
.map(|bc| { .map(|bc| {
bc.committee bc.committee
@ -570,7 +572,6 @@ where
let aggregate = self let aggregate = self
.chain .chain
.get_aggregated_attestation(&attestation.data) .get_aggregated_attestation(&attestation.data)
.unwrap()
.unwrap_or_else(|| { .unwrap_or_else(|| {
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| { committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| {
agg.aggregate(att); agg.aggregate(att);
@ -601,6 +602,94 @@ where
.collect() .collect()
} }
pub fn make_attester_slashing(&self, validator_indices: Vec<u64>) -> AttesterSlashing<E> {
let mut attestation_1 = IndexedAttestation {
attesting_indices: VariableList::new(validator_indices).unwrap(),
data: AttestationData {
slot: Slot::new(0),
index: 0,
beacon_block_root: Hash256::zero(),
target: Checkpoint {
root: Hash256::zero(),
epoch: Epoch::new(0),
},
source: Checkpoint {
root: Hash256::zero(),
epoch: Epoch::new(0),
},
},
signature: AggregateSignature::infinity(),
};
let mut attestation_2 = attestation_1.clone();
attestation_2.data.index += 1;
for attestation in &mut [&mut attestation_1, &mut attestation_2] {
for &i in &attestation.attesting_indices {
let sk = &self.validators_keypairs[i as usize].sk;
let fork = self.chain.head_info().unwrap().fork;
let genesis_validators_root = self.chain.genesis_validators_root;
let domain = self.chain.spec.get_domain(
attestation.data.target.epoch,
Domain::BeaconAttester,
&fork,
genesis_validators_root,
);
let message = attestation.data.signing_root(domain);
attestation.signature.add_assign(&sk.sign(message));
}
}
AttesterSlashing {
attestation_1,
attestation_2,
}
}
pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing {
let mut block_header_1 = self
.chain
.head_beacon_block()
.unwrap()
.message
.block_header();
block_header_1.proposer_index = validator_index;
let mut block_header_2 = block_header_1.clone();
block_header_2.state_root = Hash256::zero();
let sk = &self.validators_keypairs[validator_index as usize].sk;
let fork = self.chain.head_info().unwrap().fork;
let genesis_validators_root = self.chain.genesis_validators_root;
let mut signed_block_headers = vec![block_header_1, block_header_2]
.into_iter()
.map(|block_header| {
block_header.sign::<E>(&sk, &fork, genesis_validators_root, &self.chain.spec)
})
.collect::<Vec<_>>();
ProposerSlashing {
signed_header_2: signed_block_headers.remove(1),
signed_header_1: signed_block_headers.remove(0),
}
}
pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit {
let sk = &self.validators_keypairs[validator_index as usize].sk;
let fork = self.chain.head_info().unwrap().fork;
let genesis_validators_root = self.chain.genesis_validators_root;
VoluntaryExit {
epoch,
validator_index,
}
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
}
pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock<E>) -> SignedBeaconBlockHash { pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock<E>) -> SignedBeaconBlockHash {
assert_eq!(self.chain.slot().unwrap(), slot); assert_eq!(self.chain.slot().unwrap(), slot);
let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into(); let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into();
@ -612,7 +701,10 @@ where
for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() { for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() {
for (attestation, subnet_id) in unaggregated_attestations { for (attestation, subnet_id) in unaggregated_attestations {
self.chain self.chain
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id) .verify_unaggregated_attestation_for_gossip(
attestation.clone(),
Some(subnet_id),
)
.unwrap() .unwrap()
.add_to_pool(&self.chain) .add_to_pool(&self.chain)
.unwrap(); .unwrap();

View File

@ -570,7 +570,7 @@ fn unaggregated_gossip_verification() {
matches!( matches!(
harness harness
.chain .chain
.verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter) .verify_unaggregated_attestation_for_gossip($attn_getter, Some($subnet_getter))
.err() .err()
.expect(&format!( .expect(&format!(
"{} should error during verify_unaggregated_attestation_for_gossip", "{} should error during verify_unaggregated_attestation_for_gossip",
@ -837,7 +837,7 @@ fn unaggregated_gossip_verification() {
harness harness
.chain .chain
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id) .verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), Some(subnet_id))
.expect("valid attestation should be verified"); .expect("valid attestation should be verified");
/* /*
@ -926,6 +926,6 @@ fn attestation_that_skips_epochs() {
harness harness
.chain .chain
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id) .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id))
.expect("should gossip verify attestation that skips slots"); .expect("should gossip verify attestation that skips slots");
} }

View File

@ -326,7 +326,7 @@ fn epoch_boundary_state_attestation_processing() {
let res = harness let res = harness
.chain .chain
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id));
let current_slot = harness.chain.slot().expect("should get slot"); let current_slot = harness.chain.slot().expect("should get slot");
let expected_attestation_slot = attestation.data.slot; let expected_attestation_slot = attestation.data.slot;

View File

@ -463,7 +463,7 @@ fn attestations_with_increasing_slots() {
for (attestation, subnet_id) in attestations.into_iter().flatten() { for (attestation, subnet_id) in attestations.into_iter().flatten() {
let res = harness let res = harness
.chain .chain
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id); .verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id));
let current_slot = harness.chain.slot().expect("should get slot"); let current_slot = harness.chain.slot().expect("should get slot");
let expected_attestation_slot = attestation.data.slot; let expected_attestation_slot = attestation.data.slot;

View File

@ -14,7 +14,6 @@ store = { path = "../store" }
network = { path = "../network" } network = { path = "../network" }
timer = { path = "../timer" } timer = { path = "../timer" }
eth2_libp2p = { path = "../eth2_libp2p" } eth2_libp2p = { path = "../eth2_libp2p" }
rest_api = { path = "../rest_api" }
parking_lot = "0.11.0" parking_lot = "0.11.0"
websocket_server = { path = "../websocket_server" } websocket_server = { path = "../websocket_server" }
prometheus = "0.9.0" prometheus = "0.9.0"
@ -42,3 +41,5 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
time = "0.2.16" time = "0.2.16"
bus = "2.2.3" bus = "2.2.3"
directory = {path = "../../common/directory"} directory = {path = "../../common/directory"}
http_api = { path = "../http_api" }
http_metrics = { path = "../http_metrics" }

View File

@ -13,15 +13,14 @@ use beacon_chain::{
use bus::Bus; use bus::Bus;
use environment::RuntimeContext; use environment::RuntimeContext;
use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth1::{Config as Eth1Config, Service as Eth1Service};
use eth2_config::Eth2Config;
use eth2_libp2p::NetworkGlobals; use eth2_libp2p::NetworkGlobals;
use genesis::{interop_genesis_state, Eth1GenesisService}; use genesis::{interop_genesis_state, Eth1GenesisService};
use network::{NetworkConfig, NetworkMessage, NetworkService}; use network::{NetworkConfig, NetworkMessage, NetworkService};
use parking_lot::Mutex; use parking_lot::Mutex;
use slog::info; use slog::{debug, info};
use ssz::Decode; use ssz::Decode;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::Path; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use timer::spawn_timer; use timer::spawn_timer;
@ -61,7 +60,10 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
event_handler: Option<T::EventHandler>, event_handler: Option<T::EventHandler>,
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>, network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>,
http_listen_addr: Option<SocketAddr>, db_path: Option<PathBuf>,
freezer_db_path: Option<PathBuf>,
http_api_config: http_api::Config,
http_metrics_config: http_metrics::Config,
websocket_listen_addr: Option<SocketAddr>, websocket_listen_addr: Option<SocketAddr>,
eth_spec_instance: T::EthSpec, eth_spec_instance: T::EthSpec,
} }
@ -103,7 +105,10 @@ where
event_handler: None, event_handler: None,
network_globals: None, network_globals: None,
network_send: None, network_send: None,
http_listen_addr: None, db_path: None,
freezer_db_path: None,
http_api_config: <_>::default(),
http_metrics_config: <_>::default(),
websocket_listen_addr: None, websocket_listen_addr: None,
eth_spec_instance, eth_spec_instance,
} }
@ -280,55 +285,16 @@ where
Ok(self) Ok(self)
} }
/// Immediately starts the beacon node REST API http server. /// Provides configuration for the HTTP API.
pub fn http_server( pub fn http_api_config(mut self, config: http_api::Config) -> Self {
mut self, self.http_api_config = config;
client_config: &ClientConfig, self
eth2_config: &Eth2Config, }
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
) -> Result<Self, String> {
let beacon_chain = self
.beacon_chain
.clone()
.ok_or_else(|| "http_server requires a beacon chain")?;
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "http_server requires a runtime_context")?
.service_context("http".into());
let network_globals = self
.network_globals
.clone()
.ok_or_else(|| "http_server requires a libp2p network")?;
let network_send = self
.network_send
.clone()
.ok_or_else(|| "http_server requires a libp2p network sender")?;
let network_info = rest_api::NetworkInfo { /// Provides configuration for the HTTP server that serves Prometheus metrics.
network_globals, pub fn http_metrics_config(mut self, config: http_metrics::Config) -> Self {
network_chan: network_send, self.http_metrics_config = config;
}; self
let listening_addr = rest_api::start_server(
context.executor,
&client_config.rest_api,
beacon_chain,
network_info,
client_config
.create_db_path()
.map_err(|_| "unable to read data dir")?,
client_config
.create_freezer_db_path()
.map_err(|_| "unable to read freezer DB dir")?,
eth2_config.clone(),
events,
)
.map_err(|e| format!("Failed to start HTTP API: {:?}", e))?;
self.http_listen_addr = Some(listening_addr);
Ok(self)
} }
/// Immediately starts the service that periodically logs information each slot. /// Immediately starts the service that periodically logs information each slot.
@ -367,9 +333,11 @@ where
/// specified. /// specified.
/// ///
/// If type inference errors are being raised, see the comment on the definition of `Self`. /// If type inference errors are being raised, see the comment on the definition of `Self`.
#[allow(clippy::type_complexity)]
pub fn build( pub fn build(
self, self,
) -> Client< ) -> Result<
Client<
Witness< Witness<
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
@ -379,13 +347,71 @@ where
THotStore, THotStore,
TColdStore, TColdStore,
>, >,
>,
String,
> { > {
Client { let runtime_context = self
.runtime_context
.as_ref()
.ok_or_else(|| "build requires a runtime context".to_string())?;
let log = runtime_context.log().clone();
let http_api_listen_addr = if self.http_api_config.enabled {
let ctx = Arc::new(http_api::Context {
config: self.http_api_config.clone(),
chain: self.beacon_chain.clone(),
network_tx: self.network_send.clone(),
network_globals: self.network_globals.clone(),
log: log.clone(),
});
let exit = runtime_context.executor.exit();
let (listen_addr, server) = http_api::serve(ctx, exit)
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
runtime_context
.clone()
.executor
.spawn_without_exit(async move { server.await }, "http-api");
Some(listen_addr)
} else {
info!(log, "HTTP server is disabled");
None
};
let http_metrics_listen_addr = if self.http_metrics_config.enabled {
let ctx = Arc::new(http_metrics::Context {
config: self.http_metrics_config.clone(),
chain: self.beacon_chain.clone(),
db_path: self.db_path.clone(),
freezer_db_path: self.freezer_db_path.clone(),
log: log.clone(),
});
let exit = runtime_context.executor.exit();
let (listen_addr, server) = http_metrics::serve(ctx, exit)
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
runtime_context
.executor
.spawn_without_exit(async move { server.await }, "http-api");
Some(listen_addr)
} else {
debug!(log, "Metrics server is disabled");
None
};
Ok(Client {
beacon_chain: self.beacon_chain, beacon_chain: self.beacon_chain,
network_globals: self.network_globals, network_globals: self.network_globals,
http_listen_addr: self.http_listen_addr, http_api_listen_addr,
http_metrics_listen_addr,
websocket_listen_addr: self.websocket_listen_addr, websocket_listen_addr: self.websocket_listen_addr,
} })
} }
} }
@ -520,6 +546,9 @@ where
.clone() .clone()
.ok_or_else(|| "disk_store requires a chain spec".to_string())?; .ok_or_else(|| "disk_store requires a chain spec".to_string())?;
self.db_path = Some(hot_path.into());
self.freezer_db_path = Some(cold_path.into());
let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone()) let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone())
.map_err(|e| format!("Unable to open database: {:?}", e))?; .map_err(|e| format!("Unable to open database: {:?}", e))?;
self.store = Some(Arc::new(store)); self.store = Some(Arc::new(store));

View File

@ -62,10 +62,11 @@ pub struct Config {
pub genesis: ClientGenesis, pub genesis: ClientGenesis,
pub store: store::StoreConfig, pub store: store::StoreConfig,
pub network: network::NetworkConfig, pub network: network::NetworkConfig,
pub rest_api: rest_api::Config,
pub chain: beacon_chain::ChainConfig, pub chain: beacon_chain::ChainConfig,
pub websocket_server: websocket_server::Config, pub websocket_server: websocket_server::Config,
pub eth1: eth1::Config, pub eth1: eth1::Config,
pub http_api: http_api::Config,
pub http_metrics: http_metrics::Config,
} }
impl Default for Config { impl Default for Config {
@ -79,7 +80,6 @@ impl Default for Config {
store: <_>::default(), store: <_>::default(),
network: NetworkConfig::default(), network: NetworkConfig::default(),
chain: <_>::default(), chain: <_>::default(),
rest_api: <_>::default(),
websocket_server: <_>::default(), websocket_server: <_>::default(),
spec_constants: TESTNET_SPEC_CONSTANTS.into(), spec_constants: TESTNET_SPEC_CONSTANTS.into(),
dummy_eth1_backend: false, dummy_eth1_backend: false,
@ -87,6 +87,8 @@ impl Default for Config {
eth1: <_>::default(), eth1: <_>::default(),
disabled_forks: Vec::new(), disabled_forks: Vec::new(),
graffiti: Graffiti::default(), graffiti: Graffiti::default(),
http_api: <_>::default(),
http_metrics: <_>::default(),
} }
} }
} }

View File

@ -23,7 +23,10 @@ pub use eth2_config::Eth2Config;
pub struct Client<T: BeaconChainTypes> { pub struct Client<T: BeaconChainTypes> {
beacon_chain: Option<Arc<BeaconChain<T>>>, beacon_chain: Option<Arc<BeaconChain<T>>>,
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
http_listen_addr: Option<SocketAddr>, /// Listen address for the standard eth2.0 API, if the service was started.
http_api_listen_addr: Option<SocketAddr>,
/// Listen address for the HTTP server which serves Prometheus metrics.
http_metrics_listen_addr: Option<SocketAddr>,
websocket_listen_addr: Option<SocketAddr>, websocket_listen_addr: Option<SocketAddr>,
} }
@ -33,9 +36,14 @@ impl<T: BeaconChainTypes> Client<T> {
self.beacon_chain.clone() self.beacon_chain.clone()
} }
/// Returns the address of the client's HTTP API server, if it was started. /// Returns the address of the client's standard eth2.0 API server, if it was started.
pub fn http_listen_addr(&self) -> Option<SocketAddr> { pub fn http_api_listen_addr(&self) -> Option<SocketAddr> {
self.http_listen_addr self.http_api_listen_addr
}
/// Returns the address of the client's HTTP Prometheus metrics server, if it was started.
pub fn http_metrics_listen_addr(&self) -> Option<SocketAddr> {
self.http_metrics_listen_addr
} }
/// Returns the address of the client's WebSocket API server, if it was started. /// Returns the address of the client's WebSocket API server, if it was started.

View File

@ -39,19 +39,34 @@ pub enum Eth1NetworkId {
Custom(u64), Custom(u64),
} }
impl Into<u64> for Eth1NetworkId {
fn into(self) -> u64 {
match self {
Eth1NetworkId::Mainnet => 1,
Eth1NetworkId::Goerli => 5,
Eth1NetworkId::Custom(id) => id,
}
}
}
impl From<u64> for Eth1NetworkId {
fn from(id: u64) -> Self {
let into = |x: Eth1NetworkId| -> u64 { x.into() };
match id {
id if id == into(Eth1NetworkId::Mainnet) => Eth1NetworkId::Mainnet,
id if id == into(Eth1NetworkId::Goerli) => Eth1NetworkId::Goerli,
id => Eth1NetworkId::Custom(id),
}
}
}
impl FromStr for Eth1NetworkId { impl FromStr for Eth1NetworkId {
type Err = String; type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { u64::from_str_radix(s, 10)
"1" => Ok(Eth1NetworkId::Mainnet), .map(Into::into)
"5" => Ok(Eth1NetworkId::Goerli), .map_err(|e| format!("Failed to parse eth1 network id {}", e))
custom => {
let network_id = u64::from_str_radix(custom, 10)
.map_err(|e| format!("Failed to parse eth1 network id {}", e))?;
Ok(Eth1NetworkId::Custom(network_id))
}
}
} }
} }

View File

@ -13,4 +13,6 @@ pub use block_cache::{BlockCache, Eth1Block};
pub use deposit_cache::DepositCache; pub use deposit_cache::DepositCache;
pub use deposit_log::DepositLog; pub use deposit_log::DepositLog;
pub use inner::SszEth1Cache; pub use inner::SszEth1Cache;
pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service}; pub use service::{
BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_NETWORK_ID,
};

View File

@ -1,50 +1,34 @@
[package] [package]
name = "rest_api" name = "http_api"
version = "0.2.0" version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@sigmaprime.io>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
bls = { path = "../../crypto/bls" } warp = "0.2.5"
rest_types = { path = "../../common/rest_types" } serde = { version = "1.0.110", features = ["derive"] }
tokio = { version = "0.2.21", features = ["sync"] }
parking_lot = "0.11.0"
types = { path = "../../consensus/types" }
hex = "0.4.2"
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
eth2 = { path = "../../common/eth2", features = ["lighthouse"] }
slog = "2.5.2"
network = { path = "../network" } network = { path = "../network" }
eth2_libp2p = { path = "../eth2_libp2p" } eth2_libp2p = { path = "../eth2_libp2p" }
store = { path = "../store" } eth1 = { path = "../eth1" }
serde = { version = "1.0.110", features = ["derive"] } fork_choice = { path = "../../consensus/fork_choice" }
serde_json = "1.0.52"
serde_yaml = "0.8.11"
slog = "2.5.2"
slog-term = "2.5.0"
slog-async = "2.5.0"
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
types = { path = "../../consensus/types" }
http = "0.2.1"
hyper = "0.13.5"
tokio = { version = "0.2.21", features = ["sync"] }
url = "2.1.1"
lazy_static = "1.4.0"
eth2_config = { path = "../../common/eth2_config" }
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
slot_clock = { path = "../../common/slot_clock" }
hex = "0.4.2"
parking_lot = "0.11.0"
futures = "0.3.5"
operation_pool = { path = "../operation_pool" }
environment = { path = "../../lighthouse/environment" }
uhttp_sse = "0.5.1"
bus = "2.2.3"
itertools = "0.9.0"
lighthouse_version = { path = "../../common/lighthouse_version" } lighthouse_version = { path = "../../common/lighthouse_version" }
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
lazy_static = "1.4.0"
warp_utils = { path = "../../common/warp_utils" }
slot_clock = { path = "../../common/slot_clock" }
[dev-dependencies] [dev-dependencies]
assert_matches = "1.3.0" store = { path = "../store" }
remote_beacon_node = { path = "../../common/remote_beacon_node" } environment = { path = "../../lighthouse/environment" }
node_test_rig = { path = "../../testing/node_test_rig" } tree_hash = { path = "../../consensus/tree_hash" }
tree_hash = "0.1.0" discv5 = { version = "0.1.0-alpha.10", features = ["libp2p"] }
[features]
fake_crypto = []

View File

@ -0,0 +1,185 @@
use crate::metrics;
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
use eth2::types::ProposerData;
use fork_choice::ProtoBlock;
use slot_clock::SlotClock;
use state_processing::per_slot_processing;
use types::{BeaconState, Epoch, EthSpec, Hash256, PublicKeyBytes};
/// This sets a maximum bound on the number of epochs to skip whilst instantiating the cache for
/// the first time.
const EPOCHS_TO_SKIP: u64 = 2;
/// Caches the beacon block proposers for a given `epoch` and `epoch_boundary_root`.
///
/// This cache is only able to contain a single set of proposers and is only
/// intended to cache the proposers for the current epoch according to the head
/// of the chain. A change in epoch or re-org to a different chain may cause a
/// cache miss and rebuild.
pub struct BeaconProposerCache {
epoch: Epoch,
decision_block_root: Hash256,
proposers: Vec<ProposerData>,
}
impl BeaconProposerCache {
/// Create a new cache for the current epoch of the `chain`.
pub fn new<T: BeaconChainTypes>(chain: &BeaconChain<T>) -> Result<Self, BeaconChainError> {
let head_root = chain.head_beacon_block_root()?;
let head_block = chain
.fork_choice
.read()
.get_block(&head_root)
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_root))?;
// If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at
// the epoch of the head. This prevents doing a massive amount of skip slots when starting
// a new database from genesis.
let epoch = {
let epoch_now = chain
.epoch()
.unwrap_or_else(|_| chain.spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch()));
let head_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch());
if epoch_now > head_epoch + EPOCHS_TO_SKIP {
head_epoch
} else {
epoch_now
}
};
Self::for_head_block(chain, epoch, head_root, head_block)
}
/// Create a new cache that contains the shuffling for `current_epoch`,
/// assuming that `head_root` and `head_block` represents the most recent
/// canonical block.
fn for_head_block<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
current_epoch: Epoch,
head_root: Hash256,
head_block: ProtoBlock,
) -> Result<Self, BeaconChainError> {
let _timer = metrics::start_timer(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_TIMES);
let mut head_state = chain
.get_state(&head_block.state_root, Some(head_block.slot))?
.ok_or_else(|| BeaconChainError::MissingBeaconState(head_block.state_root))?;
let decision_block_root = Self::decision_block_root(current_epoch, head_root, &head_state)?;
// We *must* skip forward to the current epoch to obtain valid proposer
// duties. We cannot skip to the previous epoch, like we do with
// attester duties.
while head_state.current_epoch() < current_epoch {
// Skip slots until the current epoch, providing `Hash256::zero()` as the state root
// since we don't require it to be valid to identify producers.
per_slot_processing(&mut head_state, Some(Hash256::zero()), &chain.spec)?;
}
let proposers = current_epoch
.slot_iter(T::EthSpec::slots_per_epoch())
.map(|slot| {
head_state
.get_beacon_proposer_index(slot, &chain.spec)
.map_err(BeaconChainError::from)
.and_then(|i| {
let pubkey = chain
.validator_pubkey(i)?
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?;
Ok(ProposerData {
pubkey: PublicKeyBytes::from(pubkey),
slot,
})
})
})
.collect::<Result<_, _>>()?;
Ok(Self {
epoch: current_epoch,
decision_block_root,
proposers,
})
}
/// Returns a block root which can be used to key the shuffling obtained from the following
/// parameters:
///
/// - `shuffling_epoch`: the epoch for which the shuffling pertains.
/// - `head_block_root`: the block root at the head of the chain.
/// - `head_block_state`: the state of `head_block_root`.
pub fn decision_block_root<E: EthSpec>(
shuffling_epoch: Epoch,
head_block_root: Hash256,
head_block_state: &BeaconState<E>,
) -> Result<Hash256, BeaconChainError> {
let decision_slot = shuffling_epoch
.start_slot(E::slots_per_epoch())
.saturating_sub(1_u64);
// If decision slot is equal to or ahead of the head, the block root is the head block root
if decision_slot >= head_block_state.slot {
Ok(head_block_root)
} else {
head_block_state
.get_block_root(decision_slot)
.map(|root| *root)
.map_err(Into::into)
}
}
/// Return the proposers for the given `Epoch`.
///
/// The cache may be rebuilt if:
///
/// - The epoch has changed since the last cache build.
/// - There has been a re-org that crosses an epoch boundary.
pub fn get_proposers<T: BeaconChainTypes>(
&mut self,
chain: &BeaconChain<T>,
epoch: Epoch,
) -> Result<Vec<ProposerData>, warp::Rejection> {
let current_epoch = chain
.slot_clock
.now_or_genesis()
.ok_or_else(|| {
warp_utils::reject::custom_server_error("unable to read slot clock".to_string())
})?
.epoch(T::EthSpec::slots_per_epoch());
// Disallow requests that are outside the current epoch. This ensures the cache doesn't get
// washed-out with old values.
if current_epoch != epoch {
return Err(warp_utils::reject::custom_bad_request(format!(
"requested epoch is {} but only current epoch {} is allowed",
epoch, current_epoch
)));
}
let (head_block_root, head_decision_block_root) = chain
.with_head(|head| {
Self::decision_block_root(current_epoch, head.beacon_block_root, &head.beacon_state)
.map(|decision_root| (head.beacon_block_root, decision_root))
})
.map_err(warp_utils::reject::beacon_chain_error)?;
let head_block = chain
.fork_choice
.read()
.get_block(&head_block_root)
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_block_root))
.map_err(warp_utils::reject::beacon_chain_error)?;
// Rebuild the cache if this call causes a cache-miss.
if self.epoch != current_epoch || self.decision_block_root != head_decision_block_root {
metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL);
*self = Self::for_head_block(chain, current_epoch, head_block_root, head_block)
.map_err(warp_utils::reject::beacon_chain_error)?;
} else {
metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL);
}
Ok(self.proposers.clone())
}
}

View File

@ -0,0 +1,87 @@
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2::types::BlockId as CoreBlockId;
use std::str::FromStr;
use types::{Hash256, SignedBeaconBlock, Slot};
/// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given
/// `BlockId`.
#[derive(Debug)]
pub struct BlockId(pub CoreBlockId);
impl BlockId {
pub fn from_slot(slot: Slot) -> Self {
Self(CoreBlockId::Slot(slot))
}
pub fn from_root(root: Hash256) -> Self {
Self(CoreBlockId::Root(root))
}
/// Return the block root identified by `self`.
pub fn root<T: BeaconChainTypes>(
&self,
chain: &BeaconChain<T>,
) -> Result<Hash256, warp::Rejection> {
match &self.0 {
CoreBlockId::Head => chain
.head_info()
.map(|head| head.block_root)
.map_err(warp_utils::reject::beacon_chain_error),
CoreBlockId::Genesis => Ok(chain.genesis_block_root),
CoreBlockId::Finalized => chain
.head_info()
.map(|head| head.finalized_checkpoint.root)
.map_err(warp_utils::reject::beacon_chain_error),
CoreBlockId::Justified => chain
.head_info()
.map(|head| head.current_justified_checkpoint.root)
.map_err(warp_utils::reject::beacon_chain_error),
CoreBlockId::Slot(slot) => chain
.block_root_at_slot(*slot)
.map_err(warp_utils::reject::beacon_chain_error)
.and_then(|root_opt| {
root_opt.ok_or_else(|| {
warp_utils::reject::custom_not_found(format!(
"beacon block at slot {}",
slot
))
})
}),
CoreBlockId::Root(root) => Ok(*root),
}
}
/// Return the `SignedBeaconBlock` identified by `self`.
pub fn block<T: BeaconChainTypes>(
&self,
chain: &BeaconChain<T>,
) -> Result<SignedBeaconBlock<T::EthSpec>, warp::Rejection> {
match &self.0 {
CoreBlockId::Head => chain
.head_beacon_block()
.map_err(warp_utils::reject::beacon_chain_error),
_ => {
let root = self.root(chain)?;
chain
.get_block(&root)
.map_err(warp_utils::reject::beacon_chain_error)
.and_then(|root_opt| {
root_opt.ok_or_else(|| {
warp_utils::reject::custom_not_found(format!(
"beacon block with root {}",
root
))
})
})
}
}
}
}
impl FromStr for BlockId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
CoreBlockId::from_str(s).map(Self)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,32 @@
pub use lighthouse_metrics::*;
lazy_static::lazy_static! {
pub static ref HTTP_API_PATHS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"http_api_paths_total",
"Count of HTTP requests received",
&["path"]
);
pub static ref HTTP_API_STATUS_CODES_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"http_api_status_codes_total",
"Count of HTTP status codes returned",
&["status"]
);
pub static ref HTTP_API_PATHS_TIMES: Result<HistogramVec> = try_create_histogram_vec(
"http_api_paths_times",
"Duration to process HTTP requests per path",
&["path"]
);
pub static ref HTTP_API_BEACON_PROPOSER_CACHE_TIMES: Result<Histogram> = try_create_histogram(
"http_api_beacon_proposer_cache_build_times",
"Duration to process HTTP requests per path",
);
pub static ref HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL: Result<IntCounter> = try_create_int_counter(
"http_api_beacon_proposer_cache_hits_total",
"Count of times the proposer cache has been hit",
);
pub static ref HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL: Result<IntCounter> = try_create_int_counter(
"http_api_beacon_proposer_cache_misses_total",
"Count of times the proposer cache has been missed",
);
}

View File

@ -0,0 +1,118 @@
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2::types::StateId as CoreStateId;
use std::str::FromStr;
use types::{BeaconState, EthSpec, Fork, Hash256, Slot};
/// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading
/// states or parts of states from the database.
pub struct StateId(CoreStateId);
impl StateId {
pub fn head() -> Self {
Self(CoreStateId::Head)
}
pub fn slot(slot: Slot) -> Self {
Self(CoreStateId::Slot(slot))
}
/// Return the state root identified by `self`.
pub fn root<T: BeaconChainTypes>(
&self,
chain: &BeaconChain<T>,
) -> Result<Hash256, warp::Rejection> {
let slot = match &self.0 {
CoreStateId::Head => {
return chain
.head_info()
.map(|head| head.state_root)
.map_err(warp_utils::reject::beacon_chain_error)
}
CoreStateId::Genesis => return Ok(chain.genesis_state_root),
CoreStateId::Finalized => chain.head_info().map(|head| {
head.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch())
}),
CoreStateId::Justified => chain.head_info().map(|head| {
head.current_justified_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch())
}),
CoreStateId::Slot(slot) => Ok(*slot),
CoreStateId::Root(root) => return Ok(*root),
}
.map_err(warp_utils::reject::beacon_chain_error)?;
chain
.state_root_at_slot(slot)
.map_err(warp_utils::reject::beacon_chain_error)?
.ok_or_else(|| {
warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot))
})
}
/// Return the `fork` field of the state identified by `self`.
pub fn fork<T: BeaconChainTypes>(
&self,
chain: &BeaconChain<T>,
) -> Result<Fork, warp::Rejection> {
self.map_state(chain, |state| Ok(state.fork))
}
/// Return the `BeaconState` identified by `self`.
pub fn state<T: BeaconChainTypes>(
&self,
chain: &BeaconChain<T>,
) -> Result<BeaconState<T::EthSpec>, warp::Rejection> {
let (state_root, slot_opt) = match &self.0 {
CoreStateId::Head => {
return chain
.head_beacon_state()
.map_err(warp_utils::reject::beacon_chain_error)
}
CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)),
_ => (self.root(chain)?, None),
};
chain
.get_state(&state_root, slot_opt)
.map_err(warp_utils::reject::beacon_chain_error)
.and_then(|opt| {
opt.ok_or_else(|| {
warp_utils::reject::custom_not_found(format!(
"beacon state at root {}",
state_root
))
})
})
}
/// Map a function across the `BeaconState` identified by `self`.
///
/// This function will avoid instantiating/copying a new state when `self` points to the head
/// of the chain.
pub fn map_state<T: BeaconChainTypes, F, U>(
&self,
chain: &BeaconChain<T>,
func: F,
) -> Result<U, warp::Rejection>
where
F: Fn(&BeaconState<T::EthSpec>) -> Result<U, warp::Rejection>,
{
match &self.0 {
CoreStateId::Head => chain
.with_head(|snapshot| Ok(func(&snapshot.beacon_state)))
.map_err(warp_utils::reject::beacon_chain_error)?,
_ => func(&self.state(chain)?),
}
}
}
impl FromStr for StateId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
CoreStateId::from_str(s).map(Self)
}
}

View File

@ -0,0 +1,88 @@
use crate::state_id::StateId;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2::{
lighthouse::{GlobalValidatorInclusionData, ValidatorInclusionData},
types::ValidatorId,
};
use state_processing::per_epoch_processing::ValidatorStatuses;
use types::{Epoch, EthSpec};
/// Returns information about *all validators* (i.e., global) and how they performed during a given
/// epoch.
pub fn global_validator_inclusion_data<T: BeaconChainTypes>(
epoch: Epoch,
chain: &BeaconChain<T>,
) -> Result<GlobalValidatorInclusionData, warp::Rejection> {
let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch());
let state = StateId::slot(target_slot).state(chain)?;
let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec)
.map_err(warp_utils::reject::beacon_state_error)?;
validator_statuses
.process_attestations(&state, &chain.spec)
.map_err(warp_utils::reject::beacon_state_error)?;
let totals = validator_statuses.total_balances;
Ok(GlobalValidatorInclusionData {
current_epoch_active_gwei: totals.current_epoch(),
previous_epoch_active_gwei: totals.previous_epoch(),
current_epoch_attesting_gwei: totals.current_epoch_attesters(),
current_epoch_target_attesting_gwei: totals.current_epoch_target_attesters(),
previous_epoch_attesting_gwei: totals.previous_epoch_attesters(),
previous_epoch_target_attesting_gwei: totals.previous_epoch_target_attesters(),
previous_epoch_head_attesting_gwei: totals.previous_epoch_head_attesters(),
})
}
/// Returns information about a single validator and how it performed during a given epoch.
pub fn validator_inclusion_data<T: BeaconChainTypes>(
epoch: Epoch,
validator_id: &ValidatorId,
chain: &BeaconChain<T>,
) -> Result<Option<ValidatorInclusionData>, warp::Rejection> {
let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch());
let mut state = StateId::slot(target_slot).state(chain)?;
let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec)
.map_err(warp_utils::reject::beacon_state_error)?;
validator_statuses
.process_attestations(&state, &chain.spec)
.map_err(warp_utils::reject::beacon_state_error)?;
state
.update_pubkey_cache()
.map_err(warp_utils::reject::beacon_state_error)?;
let validator_index = match validator_id {
ValidatorId::Index(index) => *index as usize,
ValidatorId::PublicKey(pubkey) => {
if let Some(index) = state
.get_validator_index(pubkey)
.map_err(warp_utils::reject::beacon_state_error)?
{
index
} else {
return Ok(None);
}
}
};
Ok(validator_statuses
.statuses
.get(validator_index)
.map(|vote| ValidatorInclusionData {
is_slashed: vote.is_slashed,
is_withdrawable_in_current_epoch: vote.is_withdrawable_in_current_epoch,
is_active_in_current_epoch: vote.is_active_in_current_epoch,
is_active_in_previous_epoch: vote.is_active_in_previous_epoch,
current_epoch_effective_balance_gwei: vote.current_epoch_effective_balance,
is_current_epoch_attester: vote.is_current_epoch_attester,
is_current_epoch_target_attester: vote.is_current_epoch_target_attester,
is_previous_epoch_attester: vote.is_previous_epoch_attester,
is_previous_epoch_target_attester: vote.is_previous_epoch_target_attester,
is_previous_epoch_head_attester: vote.is_previous_epoch_head_attester,
}))
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,28 @@
[package]
name = "http_metrics"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
prometheus = "0.9.0"
warp = "0.2.5"
serde = { version = "1.0.110", features = ["derive"] }
slog = "2.5.2"
beacon_chain = { path = "../beacon_chain" }
store = { path = "../store" }
eth2_libp2p = { path = "../eth2_libp2p" }
slot_clock = { path = "../../common/slot_clock" }
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
lazy_static = "1.4.0"
eth2 = { path = "../../common/eth2" }
lighthouse_version = { path = "../../common/lighthouse_version" }
warp_utils = { path = "../../common/warp_utils" }
[dev-dependencies]
tokio = { version = "0.2.21", features = ["sync"] }
reqwest = { version = "0.10.8", features = ["json"] }
environment = { path = "../../lighthouse/environment" }
types = { path = "../../consensus/types" }

View File

@ -0,0 +1,135 @@
//! This crate provides a HTTP server that is solely dedicated to serving the `/metrics` endpoint.
//!
//! For other endpoints, see the `http_api` crate.
#[macro_use]
extern crate lazy_static;
mod metrics;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use lighthouse_version::version_with_platform;
use serde::{Deserialize, Serialize};
use slog::{crit, info, Logger};
use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::path::PathBuf;
use std::sync::Arc;
use warp::{http::Response, Filter};
#[derive(Debug)]
pub enum Error {
Warp(warp::Error),
Other(String),
}
impl From<warp::Error> for Error {
fn from(e: warp::Error) -> Self {
Error::Warp(e)
}
}
impl From<String> for Error {
fn from(e: String) -> Self {
Error::Other(e)
}
}
/// A wrapper around all the items required to spawn the HTTP server.
///
/// The server will gracefully handle the case where any fields are `None`.
pub struct Context<T: BeaconChainTypes> {
pub config: Config,
pub chain: Option<Arc<BeaconChain<T>>>,
pub db_path: Option<PathBuf>,
pub freezer_db_path: Option<PathBuf>,
pub log: Logger,
}
/// Configuration for the HTTP server.
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub enabled: bool,
pub listen_addr: Ipv4Addr,
pub listen_port: u16,
pub allow_origin: Option<String>,
}
impl Default for Config {
fn default() -> Self {
Self {
enabled: false,
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
listen_port: 5054,
allow_origin: None,
}
}
}
/// Creates a server that will serve requests using information from `ctx`.
///
/// The server will shut down gracefully when the `shutdown` future resolves.
///
/// ## Returns
///
/// This function will bind the server to the provided address and then return a tuple of:
///
/// - `SocketAddr`: the address that the HTTP server will listen on.
/// - `Future`: the actual server future that will need to be awaited.
///
/// ## Errors
///
/// Returns an error if the server is unable to bind or there is another error during
/// configuration.
pub fn serve<T: BeaconChainTypes>(
ctx: Arc<Context<T>>,
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
) -> Result<(SocketAddr, impl Future<Output = ()>), Error> {
let config = &ctx.config;
let log = ctx.log.clone();
let allow_origin = config.allow_origin.clone();
// Sanity check.
if !config.enabled {
crit!(log, "Cannot start disabled metrics HTTP server");
return Err(Error::Other(
"A disabled metrics server should not be started".to_string(),
));
}
let inner_ctx = ctx.clone();
let routes = warp::get()
.and(warp::path("metrics"))
.map(move || inner_ctx.clone())
.and_then(|ctx: Arc<Context<T>>| async move {
Ok::<_, warp::Rejection>(
metrics::gather_prometheus_metrics(&ctx)
.map(|body| Response::builder().status(200).body(body).unwrap())
.unwrap_or_else(|e| {
Response::builder()
.status(500)
.body(format!("Unable to gather metrics: {:?}", e))
.unwrap()
}),
)
})
// Add a `Server` header.
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
// Maybe add some CORS headers.
.map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref()));
let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown(
SocketAddrV4::new(config.listen_addr, config.listen_port),
async {
shutdown.await;
},
)?;
info!(
log,
"Metrics HTTP server started";
"listen_address" => listening_socket.to_string(),
);
Ok((listening_socket, server))
}

View File

@ -1,38 +1,11 @@
use crate::{ApiError, Context}; use crate::Context;
use beacon_chain::BeaconChainTypes; use beacon_chain::BeaconChainTypes;
use eth2::lighthouse::Health;
use lighthouse_metrics::{Encoder, TextEncoder}; use lighthouse_metrics::{Encoder, TextEncoder};
use rest_types::Health;
use std::sync::Arc;
pub use lighthouse_metrics::*; pub use lighthouse_metrics::*;
lazy_static! { lazy_static! {
pub static ref BEACON_HTTP_API_REQUESTS_TOTAL: Result<IntCounterVec> =
try_create_int_counter_vec(
"beacon_http_api_requests_total",
"Count of HTTP requests received",
&["endpoint"]
);
pub static ref BEACON_HTTP_API_SUCCESS_TOTAL: Result<IntCounterVec> =
try_create_int_counter_vec(
"beacon_http_api_success_total",
"Count of HTTP requests that returned 200 OK",
&["endpoint"]
);
pub static ref BEACON_HTTP_API_ERROR_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"beacon_http_api_error_total",
"Count of HTTP that did not return 200 OK",
&["endpoint"]
);
pub static ref BEACON_HTTP_API_TIMES_TOTAL: Result<HistogramVec> = try_create_histogram_vec(
"beacon_http_api_times_total",
"Duration to process HTTP requests",
&["endpoint"]
);
pub static ref REQUEST_RESPONSE_TIME: Result<Histogram> = try_create_histogram(
"http_server_request_duration_seconds",
"Time taken to build a response to a HTTP request"
);
pub static ref PROCESS_NUM_THREADS: Result<IntGauge> = try_create_int_gauge( pub static ref PROCESS_NUM_THREADS: Result<IntGauge> = try_create_int_gauge(
"process_num_threads", "process_num_threads",
"Number of threads used by the current process" "Number of threads used by the current process"
@ -67,14 +40,9 @@ lazy_static! {
try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes"); try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes");
} }
/// Returns the full set of Prometheus metrics for the Beacon Node application. pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
/// ctx: &Context<T>,
/// # Note ) -> std::result::Result<String, String> {
///
/// This is a HTTP handler method.
pub fn get_prometheus<T: BeaconChainTypes>(
ctx: Arc<Context<T>>,
) -> std::result::Result<String, ApiError> {
let mut buffer = vec![]; let mut buffer = vec![];
let encoder = TextEncoder::new(); let encoder = TextEncoder::new();
@ -94,9 +62,17 @@ pub fn get_prometheus<T: BeaconChainTypes>(
// using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into
// a string that can be returned via HTTP. // a string that can be returned via HTTP.
slot_clock::scrape_for_metrics::<T::EthSpec, T::SlotClock>(&ctx.beacon_chain.slot_clock); if let Some(beacon_chain) = ctx.chain.as_ref() {
store::scrape_for_metrics(&ctx.db_path, &ctx.freezer_db_path); slot_clock::scrape_for_metrics::<T::EthSpec, T::SlotClock>(&beacon_chain.slot_clock);
beacon_chain::scrape_for_metrics(&ctx.beacon_chain); beacon_chain::scrape_for_metrics(beacon_chain);
}
if let (Some(db_path), Some(freezer_db_path)) =
(ctx.db_path.as_ref(), ctx.freezer_db_path.as_ref())
{
store::scrape_for_metrics(db_path, freezer_db_path);
}
eth2_libp2p::scrape_discovery_metrics(); eth2_libp2p::scrape_discovery_metrics();
// This will silently fail if we are unable to observe the health. This is desired behaviour // This will silently fail if we are unable to observe the health. This is desired behaviour
@ -125,6 +101,5 @@ pub fn get_prometheus<T: BeaconChainTypes>(
.encode(&lighthouse_metrics::gather(), &mut buffer) .encode(&lighthouse_metrics::gather(), &mut buffer)
.unwrap(); .unwrap();
String::from_utf8(buffer) String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e))
.map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e)))
} }

View File

@ -0,0 +1,46 @@
use beacon_chain::test_utils::BlockingMigratorEphemeralHarnessType;
use environment::null_logger;
use http_metrics::Config;
use reqwest::StatusCode;
use std::net::Ipv4Addr;
use std::sync::Arc;
use tokio::sync::oneshot;
use types::MainnetEthSpec;
type Context = http_metrics::Context<BlockingMigratorEphemeralHarnessType<MainnetEthSpec>>;
#[tokio::test(core_threads = 2)]
async fn returns_200_ok() {
let log = null_logger().unwrap();
let context = Arc::new(Context {
config: Config {
enabled: true,
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
listen_port: 0,
allow_origin: None,
},
chain: None,
db_path: None,
freezer_db_path: None,
log,
});
let ctx = context.clone();
let (_shutdown_tx, shutdown_rx) = oneshot::channel::<()>();
let server_shutdown = async {
// It's not really interesting why this triggered, just that it happened.
let _ = shutdown_rx.await;
};
let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap();
tokio::spawn(async { server.await });
let url = format!(
"http://{}:{}/metrics",
listening_socket.ip(),
listening_socket.port()
);
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
}

View File

@ -17,7 +17,6 @@ beacon_chain = { path = "../beacon_chain" }
store = { path = "../store" } store = { path = "../store" }
eth2_libp2p = { path = "../eth2_libp2p" } eth2_libp2p = { path = "../eth2_libp2p" }
hashset_delay = { path = "../../common/hashset_delay" } hashset_delay = { path = "../../common/hashset_delay" }
rest_types = { path = "../../common/rest_types" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
slot_clock = { path = "../../common/slot_clock" } slot_clock = { path = "../../common/slot_clock" }

View File

@ -15,9 +15,8 @@ use slog::{debug, error, o, trace, warn};
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::SubnetDiscovery; use eth2_libp2p::SubnetDiscovery;
use hashset_delay::HashSetDelay; use hashset_delay::HashSetDelay;
use rest_types::ValidatorSubscription;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use types::{Attestation, EthSpec, Slot, SubnetId}; use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription};
use crate::metrics; use crate::metrics;

View File

@ -45,7 +45,7 @@ impl<T: BeaconChainTypes> Worker<T> {
let attestation = match self let attestation = match self
.chain .chain
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id) .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id))
{ {
Ok(attestation) => attestation, Ok(attestation) => attestation,
Err(e) => { Err(e) => {

View File

@ -15,13 +15,12 @@ use eth2_libp2p::{
}; };
use eth2_libp2p::{MessageAcceptance, Service as LibP2PService}; use eth2_libp2p::{MessageAcceptance, Service as LibP2PService};
use futures::prelude::*; use futures::prelude::*;
use rest_types::ValidatorSubscription;
use slog::{debug, error, info, o, trace, warn}; use slog::{debug, error, info, o, trace, warn};
use std::{collections::HashMap, sync::Arc, time::Duration}; use std::{collections::HashMap, sync::Arc, time::Duration};
use store::HotColdDB; use store::HotColdDB;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::time::Delay; use tokio::time::Delay;
use types::EthSpec; use types::{EthSpec, ValidatorSubscription};
mod tests; mod tests;

View File

@ -332,6 +332,51 @@ impl<T: EthSpec> OperationPool<T> {
pub fn num_voluntary_exits(&self) -> usize { pub fn num_voluntary_exits(&self) -> usize {
self.voluntary_exits.read().len() self.voluntary_exits.read().len()
} }
/// Returns all known `Attestation` objects.
///
/// This method may return objects that are invalid for block inclusion.
pub fn get_all_attestations(&self) -> Vec<Attestation<T>> {
self.attestations
.read()
.iter()
.map(|(_, attns)| attns.iter().cloned())
.flatten()
.collect()
}
/// Returns all known `AttesterSlashing` objects.
///
/// This method may return objects that are invalid for block inclusion.
pub fn get_all_attester_slashings(&self) -> Vec<AttesterSlashing<T>> {
self.attester_slashings
.read()
.iter()
.map(|(slashing, _)| slashing.clone())
.collect()
}
/// Returns all known `ProposerSlashing` objects.
///
/// This method may return objects that are invalid for block inclusion.
pub fn get_all_proposer_slashings(&self) -> Vec<ProposerSlashing> {
self.proposer_slashings
.read()
.iter()
.map(|(_, slashing)| slashing.clone())
.collect()
}
/// Returns all known `SignedVoluntaryExit` objects.
///
/// This method may return objects that are invalid for block inclusion.
pub fn get_all_voluntary_exits(&self) -> Vec<SignedVoluntaryExit> {
self.voluntary_exits
.read()
.iter()
.map(|(_, exit)| exit.clone())
.collect()
}
} }
/// Filter up to a maximum number of operations out of an iterator. /// Filter up to a maximum number of operations out of an iterator.

View File

@ -1,499 +0,0 @@
use crate::helpers::*;
use crate::validator::get_state_for_epoch;
use crate::Context;
use crate::{ApiError, UrlQuery};
use beacon_chain::{
observed_operations::ObservationOutcome, BeaconChain, BeaconChainTypes, StateSkipConfig,
};
use futures::executor::block_on;
use hyper::body::Bytes;
use hyper::{Body, Request};
use rest_types::{
BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse,
ValidatorRequest, ValidatorResponse,
};
use std::io::Write;
use std::sync::Arc;
use slog::error;
use types::{
AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes,
RelativeEpoch, SignedBeaconBlockHash, Slot,
};
/// Returns a summary of the head of the beacon chain.
pub fn get_head<T: BeaconChainTypes>(
ctx: Arc<Context<T>>,
) -> Result<CanonicalHeadResponse, ApiError> {
let beacon_chain = &ctx.beacon_chain;
let chain_head = beacon_chain.head()?;
Ok(CanonicalHeadResponse {
slot: chain_head.beacon_state.slot,
block_root: chain_head.beacon_block_root,
state_root: chain_head.beacon_state_root,
finalized_slot: chain_head
.beacon_state
.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
finalized_block_root: chain_head.beacon_state.finalized_checkpoint.root,
justified_slot: chain_head
.beacon_state
.current_justified_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
justified_block_root: chain_head.beacon_state.current_justified_checkpoint.root,
previous_justified_slot: chain_head
.beacon_state
.previous_justified_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch()),
previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root,
})
}
/// Return the list of heads of the beacon chain.
pub fn get_heads<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Vec<HeadBeaconBlock> {
ctx.beacon_chain
.heads()
.into_iter()
.map(|(beacon_block_root, beacon_block_slot)| HeadBeaconBlock {
beacon_block_root,
beacon_block_slot,
})
.collect()
}
/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`.
pub fn get_block<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<BlockResponse<T::EthSpec>, ApiError> {
let beacon_chain = &ctx.beacon_chain;
let query_params = ["root", "slot"];
let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?;
let block_root = match (key.as_ref(), value) {
("slot", value) => {
let target = parse_slot(&value)?;
block_root_at_slot(beacon_chain, target)?.ok_or_else(|| {
ApiError::NotFound(format!(
"Unable to find SignedBeaconBlock for slot {:?}",
target
))
})?
}
("root", value) => parse_root(&value)?,
_ => return Err(ApiError::ServerError("Unexpected query parameter".into())),
};
let block = beacon_chain.store.get_block(&block_root)?.ok_or_else(|| {
ApiError::NotFound(format!(
"Unable to find SignedBeaconBlock for root {:?}",
block_root
))
})?;
Ok(BlockResponse {
root: block_root,
beacon_block: block,
})
}
/// HTTP handler to return a `SignedBeaconBlock` root at a given `slot`.
pub fn get_block_root<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Hash256, ApiError> {
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
let target = parse_slot(&slot_string)?;
block_root_at_slot(&ctx.beacon_chain, target)?.ok_or_else(|| {
ApiError::NotFound(format!(
"Unable to find SignedBeaconBlock for slot {:?}",
target
))
})
}
fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Result<Bytes> {
let mut buffer = Vec::new();
{
let mut sse_message = uhttp_sse::SseMessage::new(&mut buffer);
let untyped_hash: Hash256 = new_head_hash.into();
write!(sse_message.data()?, "{:?}", untyped_hash)?;
}
let bytes: Bytes = buffer.into();
Ok(bytes)
}
pub fn stream_forks<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<Body, ApiError> {
let mut events = ctx.events.lock().add_rx();
let (mut sender, body) = Body::channel();
std::thread::spawn(move || {
while let Ok(new_head_hash) = events.recv() {
let chunk = match make_sse_response_chunk(new_head_hash) {
Ok(chunk) => chunk,
Err(e) => {
error!(ctx.log, "Failed to make SSE chunk"; "error" => e.to_string());
sender.abort();
break;
}
};
match block_on(sender.send_data(chunk)) {
Err(e) if e.is_closed() => break,
Err(e) => error!(ctx.log, "Couldn't stream piece {:?}", e),
Ok(_) => (),
}
}
});
Ok(body)
}
/// HTTP handler to which accepts a query string of a list of validator pubkeys and maps it to a
/// `ValidatorResponse`.
///
/// This method is limited to as many `pubkeys` that can fit in a URL. See `post_validators` for
/// doing bulk requests.
pub fn get_validators<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<ValidatorResponse>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let validator_pubkeys = query
.all_of("validator_pubkeys")?
.iter()
.map(|validator_pubkey_str| parse_pubkey_bytes(validator_pubkey_str))
.collect::<Result<Vec<_>, _>>()?;
let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) {
Some(parse_root(&value)?)
} else {
None
};
validator_responses_by_pubkey(&ctx.beacon_chain, state_root_opt, validator_pubkeys)
}
/// HTTP handler to return all validators, each as a `ValidatorResponse`.
pub fn get_all_validators<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<ValidatorResponse>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) {
Some(parse_root(&value)?)
} else {
None
};
let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?;
let validators = state.validators.clone();
validators
.iter()
.map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone()))
.collect::<Result<Vec<_>, _>>()
}
/// HTTP handler to return all active validators, each as a `ValidatorResponse`.
pub fn get_active_validators<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<ValidatorResponse>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) {
Some(parse_root(&value)?)
} else {
None
};
let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?;
let validators = state.validators.clone();
let current_epoch = state.current_epoch();
validators
.iter()
.filter(|validator| validator.is_active_at(current_epoch))
.map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone()))
.collect::<Result<Vec<_>, _>>()
}
/// HTTP handler to which accepts a `ValidatorRequest` and returns a `ValidatorResponse` for
/// each of the given `pubkeys`. When `state_root` is `None`, the canonical head is used.
///
/// This method allows for a basically unbounded list of `pubkeys`, where as the `get_validators`
/// request is limited by the max number of pubkeys you can fit in a URL.
pub fn post_validators<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<ValidatorResponse>, ApiError> {
serde_json::from_slice::<ValidatorRequest>(&req.into_body())
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to parse JSON into ValidatorRequest: {:?}",
e
))
})
.and_then(|bulk_request| {
validator_responses_by_pubkey(
&ctx.beacon_chain,
bulk_request.state_root,
bulk_request.pubkeys,
)
})
}
/// Returns either the state given by `state_root_opt`, or the canonical head state if it is
/// `None`.
fn get_state_from_root_opt<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
state_root_opt: Option<Hash256>,
) -> Result<BeaconState<T::EthSpec>, ApiError> {
if let Some(state_root) = state_root_opt {
beacon_chain
.get_state(&state_root, None)
.map_err(|e| {
ApiError::ServerError(format!(
"Database error when reading state root {}: {:?}",
state_root, e
))
})?
.ok_or_else(|| ApiError::NotFound(format!("No state exists with root: {}", state_root)))
} else {
Ok(beacon_chain.head()?.beacon_state)
}
}
/// Maps a vec of `validator_pubkey` to a vec of `ValidatorResponse`, using the state at the given
/// `state_root`. If `state_root.is_none()`, uses the canonial head state.
fn validator_responses_by_pubkey<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
state_root_opt: Option<Hash256>,
validator_pubkeys: Vec<PublicKeyBytes>,
) -> Result<Vec<ValidatorResponse>, ApiError> {
let mut state = get_state_from_root_opt(beacon_chain, state_root_opt)?;
validator_pubkeys
.into_iter()
.map(|validator_pubkey| validator_response_by_pubkey(&mut state, validator_pubkey))
.collect::<Result<Vec<_>, ApiError>>()
}
/// Maps a `validator_pubkey` to a `ValidatorResponse`, using the given state.
///
/// The provided `state` must have a fully up-to-date pubkey cache.
fn validator_response_by_pubkey<E: EthSpec>(
state: &mut BeaconState<E>,
validator_pubkey: PublicKeyBytes,
) -> Result<ValidatorResponse, ApiError> {
let validator_index_opt = state
.get_validator_index(&validator_pubkey)
.map_err(|e| ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)))?;
if let Some(validator_index) = validator_index_opt {
let balance = state.balances.get(validator_index).ok_or_else(|| {
ApiError::ServerError(format!("Invalid balances index: {:?}", validator_index))
})?;
let validator = state
.validators
.get(validator_index)
.ok_or_else(|| {
ApiError::ServerError(format!("Invalid validator index: {:?}", validator_index))
})?
.clone();
Ok(ValidatorResponse {
pubkey: validator_pubkey,
validator_index: Some(validator_index),
balance: Some(*balance),
validator: Some(validator),
})
} else {
Ok(ValidatorResponse {
pubkey: validator_pubkey,
validator_index: None,
balance: None,
validator: None,
})
}
}
/// HTTP handler
pub fn get_committees<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<Committee>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let epoch = query.epoch()?;
let mut state =
get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err(|e| {
ApiError::ServerError(format!("Failed to get state suitable for epoch: {:?}", e))
})?;
state
.build_committee_cache(relative_epoch, &ctx.beacon_chain.spec)
.map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?;
Ok(state
.get_beacon_committees_at_epoch(relative_epoch)
.map_err(|e| ApiError::ServerError(format!("Unable to get all committees: {:?}", e)))?
.into_iter()
.map(|c| Committee {
slot: c.slot,
index: c.index,
committee: c.committee.to_vec(),
})
.collect::<Vec<_>>())
}
/// HTTP handler to return a `BeaconState` at a given `root` or `slot`.
///
/// Will not return a state if the request slot is in the future. Will return states higher than
/// the current head by skipping slots.
pub fn get_state<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<StateResponse<T::EthSpec>, ApiError> {
let head_state = ctx.beacon_chain.head()?.beacon_state;
let (key, value) = match UrlQuery::from_request(&req) {
Ok(query) => {
// We have *some* parameters, just check them.
let query_params = ["root", "slot"];
query.first_of(&query_params)?
}
Err(ApiError::BadRequest(_)) => {
// No parameters provided at all, use current slot.
(String::from("slot"), head_state.slot.to_string())
}
Err(e) => {
return Err(e);
}
};
let (root, state): (Hash256, BeaconState<T::EthSpec>) = match (key.as_ref(), value) {
("slot", value) => state_at_slot(&ctx.beacon_chain, parse_slot(&value)?)?,
("root", value) => {
let root = &parse_root(&value)?;
let state = ctx
.beacon_chain
.store
.get_state(root, None)?
.ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?;
(*root, state)
}
_ => return Err(ApiError::ServerError("Unexpected query parameter".into())),
};
Ok(StateResponse {
root,
beacon_state: state,
})
}
/// HTTP handler to return a `BeaconState` root at a given `slot`.
///
/// Will not return a state if the request slot is in the future. Will return states higher than
/// the current head by skipping slots.
pub fn get_state_root<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Hash256, ApiError> {
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
let slot = parse_slot(&slot_string)?;
state_root_at_slot(&ctx.beacon_chain, slot, StateSkipConfig::WithStateRoots)
}
/// HTTP handler to return a `BeaconState` at the genesis block.
///
/// This is an undocumented convenience method used during testing. For production, simply do a
/// state request at slot 0.
pub fn get_genesis_state<T: BeaconChainTypes>(
ctx: Arc<Context<T>>,
) -> Result<BeaconState<T::EthSpec>, ApiError> {
state_at_slot(&ctx.beacon_chain, Slot::new(0)).map(|(_root, state)| state)
}
pub fn proposer_slashing<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<bool, ApiError> {
let body = req.into_body();
serde_json::from_slice::<ProposerSlashing>(&body)
.map_err(|e| format!("Unable to parse JSON into ProposerSlashing: {:?}", e))
.and_then(move |proposer_slashing| {
if ctx.beacon_chain.eth1_chain.is_some() {
let obs_outcome = ctx
.beacon_chain
.verify_proposer_slashing_for_gossip(proposer_slashing)
.map_err(|e| format!("Error while verifying proposer slashing: {:?}", e))?;
if let ObservationOutcome::New(verified_proposer_slashing) = obs_outcome {
ctx.beacon_chain
.import_proposer_slashing(verified_proposer_slashing);
Ok(())
} else {
Err("Proposer slashing for that validator index already known".into())
}
} else {
Err("Cannot insert proposer slashing on node without Eth1 connection.".to_string())
}
})
.map_err(ApiError::BadRequest)?;
Ok(true)
}
pub fn attester_slashing<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<bool, ApiError> {
let body = req.into_body();
serde_json::from_slice::<AttesterSlashing<T::EthSpec>>(&body)
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to parse JSON into AttesterSlashing: {:?}",
e
))
})
.and_then(move |attester_slashing| {
if ctx.beacon_chain.eth1_chain.is_some() {
ctx.beacon_chain
.verify_attester_slashing_for_gossip(attester_slashing)
.map_err(|e| format!("Error while verifying attester slashing: {:?}", e))
.and_then(|outcome| {
if let ObservationOutcome::New(verified_attester_slashing) = outcome {
ctx.beacon_chain
.import_attester_slashing(verified_attester_slashing)
.map_err(|e| {
format!("Error while importing attester slashing: {:?}", e)
})
} else {
Err("Attester slashing only covers already slashed indices".to_string())
}
})
.map_err(ApiError::BadRequest)
} else {
Err(ApiError::BadRequest(
"Cannot insert attester slashing on node without Eth1 connection.".to_string(),
))
}
})?;
Ok(true)
}

View File

@ -1,55 +0,0 @@
use serde::{Deserialize, Serialize};
use std::net::Ipv4Addr;
/// Defines the encoding for the API.
#[derive(Clone, Serialize, Deserialize, Copy)]
pub enum ApiEncodingFormat {
JSON,
YAML,
SSZ,
}
impl ApiEncodingFormat {
pub fn get_content_type(&self) -> &str {
match self {
ApiEncodingFormat::JSON => "application/json",
ApiEncodingFormat::YAML => "application/yaml",
ApiEncodingFormat::SSZ => "application/ssz",
}
}
}
impl From<&str> for ApiEncodingFormat {
fn from(f: &str) -> ApiEncodingFormat {
match f {
"application/yaml" => ApiEncodingFormat::YAML,
"application/ssz" => ApiEncodingFormat::SSZ,
_ => ApiEncodingFormat::JSON,
}
}
}
/// HTTP REST API Configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
/// Enable the REST API server.
pub enabled: bool,
/// The IPv4 address the REST API HTTP server will listen on.
pub listen_address: Ipv4Addr,
/// The port the REST API HTTP server will listen on.
pub port: u16,
/// If something else than "", a 'Access-Control-Allow-Origin' header will be present in
/// responses. Put *, to allow any origin.
pub allow_origin: String,
}
impl Default for Config {
fn default() -> Self {
Config {
enabled: false,
listen_address: Ipv4Addr::new(127, 0, 0, 1),
port: 5052,
allow_origin: "".to_string(),
}
}
}

View File

@ -1,126 +0,0 @@
use crate::helpers::*;
use crate::{ApiError, Context, UrlQuery};
use beacon_chain::BeaconChainTypes;
use hyper::Request;
use rest_types::{IndividualVotesRequest, IndividualVotesResponse};
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatuses};
use std::sync::Arc;
use types::EthSpec;
/// The results of validators voting during an epoch.
///
/// Provides information about the current and previous epochs.
#[derive(Serialize, Deserialize, Encode, Decode)]
pub struct VoteCount {
/// The total effective balance of all active validators during the _current_ epoch.
pub current_epoch_active_gwei: u64,
/// The total effective balance of all active validators during the _previous_ epoch.
pub previous_epoch_active_gwei: u64,
/// The total effective balance of all validators who attested during the _current_ epoch.
pub current_epoch_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _current_ epoch and
/// agreed with the state about the beacon block at the first slot of the _current_ epoch.
pub current_epoch_target_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch.
pub previous_epoch_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
pub previous_epoch_target_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the time of attestation.
pub previous_epoch_head_attesting_gwei: u64,
}
impl Into<VoteCount> for TotalBalances {
fn into(self) -> VoteCount {
VoteCount {
current_epoch_active_gwei: self.current_epoch(),
previous_epoch_active_gwei: self.previous_epoch(),
current_epoch_attesting_gwei: self.current_epoch_attesters(),
current_epoch_target_attesting_gwei: self.current_epoch_target_attesters(),
previous_epoch_attesting_gwei: self.previous_epoch_attesters(),
previous_epoch_target_attesting_gwei: self.previous_epoch_target_attesters(),
previous_epoch_head_attesting_gwei: self.previous_epoch_head_attesters(),
}
}
}
/// HTTP handler return a `VoteCount` for some given `Epoch`.
pub fn get_vote_count<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<VoteCount, ApiError> {
let query = UrlQuery::from_request(&req)?;
let epoch = query.epoch()?;
// This is the last slot of the given epoch (one prior to the first slot of the next epoch).
let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1;
let (_root, state) = state_at_slot(&ctx.beacon_chain, target_slot)?;
let spec = &ctx.beacon_chain.spec;
let mut validator_statuses = ValidatorStatuses::new(&state, spec)?;
validator_statuses.process_attestations(&state, spec)?;
Ok(validator_statuses.total_balances.into())
}
pub fn post_individual_votes<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<IndividualVotesResponse>, ApiError> {
let body = req.into_body();
serde_json::from_slice::<IndividualVotesRequest>(&body)
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to parse JSON into ValidatorDutiesRequest: {:?}",
e
))
})
.and_then(move |body| {
let epoch = body.epoch;
// This is the last slot of the given epoch (one prior to the first slot of the next epoch).
let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1;
let (_root, mut state) = state_at_slot(&ctx.beacon_chain, target_slot)?;
let spec = &ctx.beacon_chain.spec;
let mut validator_statuses = ValidatorStatuses::new(&state, spec)?;
validator_statuses.process_attestations(&state, spec)?;
body.pubkeys
.into_iter()
.map(|pubkey| {
let validator_index_opt = state.get_validator_index(&pubkey).map_err(|e| {
ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e))
})?;
if let Some(validator_index) = validator_index_opt {
let vote = validator_statuses
.statuses
.get(validator_index)
.cloned()
.map(Into::into);
Ok(IndividualVotesResponse {
epoch,
pubkey,
validator_index: Some(validator_index),
vote,
})
} else {
Ok(IndividualVotesResponse {
epoch,
pubkey,
validator_index: None,
vote: None,
})
}
})
.collect::<Result<Vec<_>, _>>()
})
}

View File

@ -1,260 +0,0 @@
use crate::{ApiError, NetworkChannel};
use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig};
use bls::PublicKeyBytes;
use eth2_libp2p::PubsubMessage;
use itertools::process_results;
use network::NetworkMessage;
use ssz::Decode;
use store::iter::AncestorIter;
use types::{
BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedBeaconBlock, Slot,
};
/// Parse a slot.
///
/// E.g., `"1234"`
pub fn parse_slot(string: &str) -> Result<Slot, ApiError> {
string
.parse::<u64>()
.map(Slot::from)
.map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e)))
}
/// Parse an epoch.
///
/// E.g., `"13"`
pub fn parse_epoch(string: &str) -> Result<Epoch, ApiError> {
string
.parse::<u64>()
.map(Epoch::from)
.map_err(|e| ApiError::BadRequest(format!("Unable to parse epoch: {:?}", e)))
}
/// Parse a CommitteeIndex.
///
/// E.g., `"18"`
pub fn parse_committee_index(string: &str) -> Result<CommitteeIndex, ApiError> {
string
.parse::<CommitteeIndex>()
.map_err(|e| ApiError::BadRequest(format!("Unable to parse committee index: {:?}", e)))
}
/// Parse an SSZ object from some hex-encoded bytes.
///
/// E.g., A signature is `"0x0000000000000000000000000000000000000000000000000000000000000000"`
pub fn parse_hex_ssz_bytes<T: Decode>(string: &str) -> Result<T, ApiError> {
const PREFIX: &str = "0x";
if string.starts_with(PREFIX) {
let trimmed = string.trim_start_matches(PREFIX);
let bytes = hex::decode(trimmed)
.map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ hex: {:?}", e)))?;
T::from_ssz_bytes(&bytes)
.map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ bytes: {:?}", e)))
} else {
Err(ApiError::BadRequest(
"Hex bytes must have a 0x prefix".to_string(),
))
}
}
/// Parse a root from a `0x` prefixed string.
///
/// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"`
pub fn parse_root(string: &str) -> Result<Hash256, ApiError> {
const PREFIX: &str = "0x";
if string.starts_with(PREFIX) {
let trimmed = string.trim_start_matches(PREFIX);
trimmed
.parse()
.map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e)))
} else {
Err(ApiError::BadRequest(
"Root must have a 0x prefix".to_string(),
))
}
}
/// Parse a PublicKey from a `0x` prefixed hex string
pub fn parse_pubkey_bytes(string: &str) -> Result<PublicKeyBytes, ApiError> {
const PREFIX: &str = "0x";
if string.starts_with(PREFIX) {
let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX))
.map_err(|e| ApiError::BadRequest(format!("Invalid hex string: {:?}", e)))?;
let pubkey = PublicKeyBytes::deserialize(pubkey_bytes.as_slice()).map_err(|e| {
ApiError::BadRequest(format!("Unable to deserialize public key: {:?}.", e))
})?;
Ok(pubkey)
} else {
Err(ApiError::BadRequest(
"Public key must have a 0x prefix".to_string(),
))
}
}
/// Returns the root of the `SignedBeaconBlock` in the canonical chain of `beacon_chain` at the given
/// `slot`, if possible.
///
/// May return a root for a previous slot, in the case of skip slots.
pub fn block_root_at_slot<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
target: Slot,
) -> Result<Option<Hash256>, ApiError> {
Ok(process_results(
beacon_chain.rev_iter_block_roots()?,
|iter| {
iter.take_while(|(_, slot)| *slot >= target)
.find(|(_, slot)| *slot == target)
.map(|(root, _)| root)
},
)?)
}
/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given
/// `slot`, if possible.
///
/// Will not return a state if the request slot is in the future. Will return states higher than
/// the current head by skipping slots.
pub fn state_at_slot<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
slot: Slot,
) -> Result<(Hash256, BeaconState<T::EthSpec>), ApiError> {
let head = beacon_chain.head()?;
if head.beacon_state.slot == slot {
Ok((head.beacon_state_root, head.beacon_state))
} else {
let root = state_root_at_slot(beacon_chain, slot, StateSkipConfig::WithStateRoots)?;
let state: BeaconState<T::EthSpec> = beacon_chain
.store
.get_state(&root, Some(slot))?
.ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?;
Ok((root, state))
}
}
/// Returns the root of the `BeaconState` in the canonical chain of `beacon_chain` at the given
/// `slot`, if possible.
///
/// Will not return a state root if the request slot is in the future. Will return state roots
/// higher than the current head by skipping slots.
pub fn state_root_at_slot<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
slot: Slot,
config: StateSkipConfig,
) -> Result<Hash256, ApiError> {
let head_state = &beacon_chain.head()?.beacon_state;
let current_slot = beacon_chain
.slot()
.map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?;
// There are four scenarios when obtaining a state for a given slot:
//
// 1. The request slot is in the future.
// 2. The request slot is the same as the best block (head) slot.
// 3. The request slot is prior to the head slot.
// 4. The request slot is later than the head slot.
if current_slot < slot {
// 1. The request slot is in the future. Reject the request.
//
// We could actually speculate about future state roots by skipping slots, however that's
// likely to cause confusion for API users.
Err(ApiError::BadRequest(format!(
"Requested slot {} is past the current slot {}",
slot, current_slot
)))
} else if head_state.slot == slot {
// 2. The request slot is the same as the best block (head) slot.
//
// The head state root is stored in memory, return a reference.
Ok(beacon_chain.head()?.beacon_state_root)
} else if head_state.slot > slot {
// 3. The request slot is prior to the head slot.
//
// Iterate through the state roots on the head state to find the root for that
// slot. Once the root is found, load it from the database.
process_results(
head_state
.try_iter_ancestor_roots(beacon_chain.store.clone())
.ok_or_else(|| {
ApiError::ServerError("Failed to create roots iterator".to_string())
})?,
|mut iter| iter.find(|(_, s)| *s == slot).map(|(root, _)| root),
)?
.ok_or_else(|| ApiError::NotFound(format!("Unable to find state at slot {}", slot)))
} else {
// 4. The request slot is later than the head slot.
//
// Use `per_slot_processing` to advance the head state to the present slot,
// assuming that all slots do not contain a block (i.e., they are skipped slots).
let mut state = beacon_chain.head()?.beacon_state;
let spec = &T::EthSpec::default_spec();
let skip_state_root = match config {
StateSkipConfig::WithStateRoots => None,
StateSkipConfig::WithoutStateRoots => Some(Hash256::zero()),
};
for _ in state.slot.as_u64()..slot.as_u64() {
// Ensure the next epoch state caches are built in case of an epoch transition.
state.build_committee_cache(RelativeEpoch::Next, spec)?;
state_processing::per_slot_processing(&mut state, skip_state_root, spec)?;
}
// Note: this is an expensive operation. Once the tree hash cache is implement it may be
// used here.
Ok(state.canonical_root())
}
}
pub fn publish_beacon_block_to_network<T: BeaconChainTypes + 'static>(
chan: &NetworkChannel<T::EthSpec>,
block: SignedBeaconBlock<T::EthSpec>,
) -> Result<(), ApiError> {
// send the block via SSZ encoding
let messages = vec![PubsubMessage::BeaconBlock(Box::new(block))];
// Publish the block to the p2p network via gossipsub.
if let Err(e) = chan.send(NetworkMessage::Publish { messages }) {
return Err(ApiError::ServerError(format!(
"Unable to send new block to network: {:?}",
e
)));
}
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn parse_root_works() {
assert_eq!(
parse_root("0x0000000000000000000000000000000000000000000000000000000000000000"),
Ok(Hash256::zero())
);
assert_eq!(
parse_root("0x000000000000000000000000000000000000000000000000000000000000002a"),
Ok(Hash256::from_low_u64_be(42))
);
assert!(
parse_root("0000000000000000000000000000000000000000000000000000000000000042").is_err()
);
assert!(parse_root("0x").is_err());
assert!(parse_root("0x00").is_err());
}
#[test]
fn parse_slot_works() {
assert_eq!(parse_slot("0"), Ok(Slot::new(0)));
assert_eq!(parse_slot("42"), Ok(Slot::new(42)));
assert_eq!(parse_slot("10000000"), Ok(Slot::new(10_000_000)));
assert!(parse_slot("cats").is_err());
}
}

View File

@ -1,127 +0,0 @@
#[macro_use]
extern crate lazy_static;
mod router;
extern crate network as client_network;
mod beacon;
pub mod config;
mod consensus;
mod helpers;
mod lighthouse;
mod metrics;
mod node;
mod url_query;
mod validator;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use bus::Bus;
use client_network::NetworkMessage;
pub use config::ApiEncodingFormat;
use eth2_config::Eth2Config;
use eth2_libp2p::NetworkGlobals;
use futures::future::TryFutureExt;
use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Server};
use parking_lot::Mutex;
use rest_types::ApiError;
use slog::{info, warn};
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::mpsc;
use types::SignedBeaconBlockHash;
use url_query::UrlQuery;
pub use crate::helpers::parse_pubkey_bytes;
pub use config::Config;
pub use router::Context;
pub type NetworkChannel<T> = mpsc::UnboundedSender<NetworkMessage<T>>;
pub struct NetworkInfo<T: BeaconChainTypes> {
pub network_globals: Arc<NetworkGlobals<T::EthSpec>>,
pub network_chan: NetworkChannel<T::EthSpec>,
}
// Allowing more than 7 arguments.
#[allow(clippy::too_many_arguments)]
pub fn start_server<T: BeaconChainTypes>(
executor: environment::TaskExecutor,
config: &Config,
beacon_chain: Arc<BeaconChain<T>>,
network_info: NetworkInfo<T>,
db_path: PathBuf,
freezer_db_path: PathBuf,
eth2_config: Eth2Config,
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
) -> Result<SocketAddr, hyper::Error> {
let log = executor.log();
let eth2_config = Arc::new(eth2_config);
let context = Arc::new(Context {
executor: executor.clone(),
config: config.clone(),
beacon_chain,
network_globals: network_info.network_globals.clone(),
network_chan: network_info.network_chan,
eth2_config,
log: log.clone(),
db_path,
freezer_db_path,
events,
});
// Define the function that will build the request handler.
let make_service = make_service_fn(move |_socket: &AddrStream| {
let ctx = context.clone();
async move {
Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| {
router::on_http_request(req, ctx.clone())
}))
}
});
let bind_addr = (config.listen_address, config.port).into();
let server = Server::bind(&bind_addr).serve(make_service);
// Determine the address the server is actually listening on.
//
// This may be different to `bind_addr` if bind port was 0 (this allows the OS to choose a free
// port).
let actual_listen_addr = server.local_addr();
// Build a channel to kill the HTTP server.
let exit = executor.exit();
let inner_log = log.clone();
let server_exit = async move {
let _ = exit.await;
info!(inner_log, "HTTP service shutdown");
};
// Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered.
let inner_log = log.clone();
let server_future = server
.with_graceful_shutdown(async {
server_exit.await;
})
.map_err(move |e| {
warn!(
inner_log,
"HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e)
)
})
.unwrap_or_else(|_| ());
info!(
log,
"HTTP API started";
"address" => format!("{}", actual_listen_addr.ip()),
"port" => actual_listen_addr.port(),
);
executor.spawn_without_exit(server_future, "http");
Ok(actual_listen_addr)
}

View File

@ -1,48 +0,0 @@
//! This contains a collection of lighthouse specific HTTP endpoints.
use crate::{ApiError, Context};
use beacon_chain::BeaconChainTypes;
use eth2_libp2p::PeerInfo;
use serde::Serialize;
use std::sync::Arc;
use types::EthSpec;
/// Returns all known peers and corresponding information
pub fn peers<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<Vec<Peer<T::EthSpec>>, ApiError> {
Ok(ctx
.network_globals
.peers
.read()
.peers()
.map(|(peer_id, peer_info)| Peer {
peer_id: peer_id.to_string(),
peer_info: peer_info.clone(),
})
.collect())
}
/// Returns all known connected peers and their corresponding information
pub fn connected_peers<T: BeaconChainTypes>(
ctx: Arc<Context<T>>,
) -> Result<Vec<Peer<T::EthSpec>>, ApiError> {
Ok(ctx
.network_globals
.peers
.read()
.connected_peers()
.map(|(peer_id, peer_info)| Peer {
peer_id: peer_id.to_string(),
peer_info: peer_info.clone(),
})
.collect())
}
/// Information returned by `peers` and `connected_peers`.
#[derive(Clone, Debug, Serialize)]
#[serde(bound = "T: EthSpec")]
pub struct Peer<T: EthSpec> {
/// The Peer's ID
peer_id: String,
/// The PeerInfo associated with the peer.
peer_info: PeerInfo<T>,
}

View File

@ -1,39 +0,0 @@
use crate::{ApiError, Context};
use beacon_chain::BeaconChainTypes;
use eth2_libp2p::types::SyncState;
use rest_types::{SyncingResponse, SyncingStatus};
use std::sync::Arc;
use types::Slot;
/// Returns a syncing status.
pub fn syncing<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<SyncingResponse, ApiError> {
let current_slot = ctx
.beacon_chain
.head_info()
.map_err(|e| ApiError::ServerError(format!("Unable to read head slot: {:?}", e)))?
.slot;
let (starting_slot, highest_slot) = match ctx.network_globals.sync_state() {
SyncState::SyncingFinalized {
start_slot,
head_slot,
..
}
| SyncState::SyncingHead {
start_slot,
head_slot,
} => (start_slot, head_slot),
SyncState::Synced | SyncState::Stalled => (Slot::from(0u64), current_slot),
};
let sync_status = SyncingStatus {
starting_slot,
current_slot,
highest_slot,
};
Ok(SyncingResponse {
is_syncing: ctx.network_globals.is_syncing(),
sync_status,
})
}

View File

@ -1,322 +0,0 @@
use crate::{
beacon, config::Config, consensus, lighthouse, metrics, node, validator, NetworkChannel,
};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use bus::Bus;
use environment::TaskExecutor;
use eth2_config::Eth2Config;
use eth2_libp2p::{NetworkGlobals, PeerId};
use hyper::header::HeaderValue;
use hyper::{Body, Method, Request, Response};
use lighthouse_version::version_with_platform;
use operation_pool::PersistedOperationPool;
use parking_lot::Mutex;
use rest_types::{ApiError, Handler, Health};
use slog::debug;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use types::{EthSpec, SignedBeaconBlockHash};
pub struct Context<T: BeaconChainTypes> {
pub executor: TaskExecutor,
pub config: Config,
pub beacon_chain: Arc<BeaconChain<T>>,
pub network_globals: Arc<NetworkGlobals<T::EthSpec>>,
pub network_chan: NetworkChannel<T::EthSpec>,
pub eth2_config: Arc<Eth2Config>,
pub log: slog::Logger,
pub db_path: PathBuf,
pub freezer_db_path: PathBuf,
pub events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
}
pub async fn on_http_request<T: BeaconChainTypes>(
req: Request<Body>,
ctx: Arc<Context<T>>,
) -> Result<Response<Body>, ApiError> {
let path = req.uri().path().to_string();
let _timer = metrics::start_timer_vec(&metrics::BEACON_HTTP_API_TIMES_TOTAL, &[&path]);
metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_REQUESTS_TOTAL, &[&path]);
let received_instant = Instant::now();
let log = ctx.log.clone();
let allow_origin = ctx.config.allow_origin.clone();
match route(req, ctx).await {
Ok(mut response) => {
metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_SUCCESS_TOTAL, &[&path]);
if allow_origin != "" {
let headers = response.headers_mut();
headers.insert(
hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN,
HeaderValue::from_str(&allow_origin)?,
);
headers.insert(hyper::header::VARY, HeaderValue::from_static("Origin"));
}
debug!(
log,
"HTTP API request successful";
"path" => path,
"duration_ms" => Instant::now().duration_since(received_instant).as_millis()
);
Ok(response)
}
Err(error) => {
metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_ERROR_TOTAL, &[&path]);
debug!(
log,
"HTTP API request failure";
"path" => path,
"duration_ms" => Instant::now().duration_since(received_instant).as_millis()
);
Ok(error.into())
}
}
}
async fn route<T: BeaconChainTypes>(
req: Request<Body>,
ctx: Arc<Context<T>>,
) -> Result<Response<Body>, ApiError> {
let path = req.uri().path().to_string();
let ctx = ctx.clone();
let method = req.method().clone();
let executor = ctx.executor.clone();
let handler = Handler::new(req, ctx, executor)?;
match (method, path.as_ref()) {
(Method::GET, "/node/version") => handler
.static_value(version_with_platform())
.await?
.serde_encodings(),
(Method::GET, "/node/health") => handler
.static_value(Health::observe().map_err(ApiError::ServerError)?)
.await?
.serde_encodings(),
(Method::GET, "/node/syncing") => handler
.allow_body()
.in_blocking_task(|_, ctx| node::syncing(ctx))
.await?
.serde_encodings(),
(Method::GET, "/network/enr") => handler
.in_core_task(|_, ctx| Ok(ctx.network_globals.local_enr().to_base64()))
.await?
.serde_encodings(),
(Method::GET, "/network/peer_count") => handler
.in_core_task(|_, ctx| Ok(ctx.network_globals.connected_peers()))
.await?
.serde_encodings(),
(Method::GET, "/network/peer_id") => handler
.in_core_task(|_, ctx| Ok(ctx.network_globals.local_peer_id().to_base58()))
.await?
.serde_encodings(),
(Method::GET, "/network/peers") => handler
.in_blocking_task(|_, ctx| {
Ok(ctx
.network_globals
.peers
.read()
.connected_peer_ids()
.map(PeerId::to_string)
.collect::<Vec<_>>())
})
.await?
.serde_encodings(),
(Method::GET, "/network/listen_port") => handler
.in_core_task(|_, ctx| Ok(ctx.network_globals.listen_port_tcp()))
.await?
.serde_encodings(),
(Method::GET, "/network/listen_addresses") => handler
.in_blocking_task(|_, ctx| Ok(ctx.network_globals.listen_multiaddrs()))
.await?
.serde_encodings(),
(Method::GET, "/beacon/head") => handler
.in_blocking_task(|_, ctx| beacon::get_head(ctx))
.await?
.all_encodings(),
(Method::GET, "/beacon/heads") => handler
.in_blocking_task(|_, ctx| Ok(beacon::get_heads(ctx)))
.await?
.all_encodings(),
(Method::GET, "/beacon/block") => handler
.in_blocking_task(beacon::get_block)
.await?
.all_encodings(),
(Method::GET, "/beacon/block_root") => handler
.in_blocking_task(beacon::get_block_root)
.await?
.all_encodings(),
(Method::GET, "/beacon/fork") => handler
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.fork))
.await?
.all_encodings(),
(Method::GET, "/beacon/fork/stream") => {
handler.sse_stream(|_, ctx| beacon::stream_forks(ctx)).await
}
(Method::GET, "/beacon/genesis_time") => handler
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_time))
.await?
.all_encodings(),
(Method::GET, "/beacon/genesis_validators_root") => handler
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_validators_root))
.await?
.all_encodings(),
(Method::GET, "/beacon/validators") => handler
.in_blocking_task(beacon::get_validators)
.await?
.all_encodings(),
(Method::POST, "/beacon/validators") => handler
.allow_body()
.in_blocking_task(beacon::post_validators)
.await?
.all_encodings(),
(Method::GET, "/beacon/validators/all") => handler
.in_blocking_task(beacon::get_all_validators)
.await?
.all_encodings(),
(Method::GET, "/beacon/validators/active") => handler
.in_blocking_task(beacon::get_active_validators)
.await?
.all_encodings(),
(Method::GET, "/beacon/state") => handler
.in_blocking_task(beacon::get_state)
.await?
.all_encodings(),
(Method::GET, "/beacon/state_root") => handler
.in_blocking_task(beacon::get_state_root)
.await?
.all_encodings(),
(Method::GET, "/beacon/state/genesis") => handler
.in_blocking_task(|_, ctx| beacon::get_genesis_state(ctx))
.await?
.all_encodings(),
(Method::GET, "/beacon/committees") => handler
.in_blocking_task(beacon::get_committees)
.await?
.all_encodings(),
(Method::POST, "/beacon/proposer_slashing") => handler
.allow_body()
.in_blocking_task(beacon::proposer_slashing)
.await?
.serde_encodings(),
(Method::POST, "/beacon/attester_slashing") => handler
.allow_body()
.in_blocking_task(beacon::attester_slashing)
.await?
.serde_encodings(),
(Method::POST, "/validator/duties") => handler
.allow_body()
.in_blocking_task(validator::post_validator_duties)
.await?
.serde_encodings(),
(Method::POST, "/validator/subscribe") => handler
.allow_body()
.in_blocking_task(validator::post_validator_subscriptions)
.await?
.serde_encodings(),
(Method::GET, "/validator/duties/all") => handler
.in_blocking_task(validator::get_all_validator_duties)
.await?
.serde_encodings(),
(Method::GET, "/validator/duties/active") => handler
.in_blocking_task(validator::get_active_validator_duties)
.await?
.serde_encodings(),
(Method::GET, "/validator/block") => handler
.in_blocking_task(validator::get_new_beacon_block)
.await?
.serde_encodings(),
(Method::POST, "/validator/block") => handler
.allow_body()
.in_blocking_task(validator::publish_beacon_block)
.await?
.serde_encodings(),
(Method::GET, "/validator/attestation") => handler
.in_blocking_task(validator::get_new_attestation)
.await?
.serde_encodings(),
(Method::GET, "/validator/aggregate_attestation") => handler
.in_blocking_task(validator::get_aggregate_attestation)
.await?
.serde_encodings(),
(Method::POST, "/validator/attestations") => handler
.allow_body()
.in_blocking_task(validator::publish_attestations)
.await?
.serde_encodings(),
(Method::POST, "/validator/aggregate_and_proofs") => handler
.allow_body()
.in_blocking_task(validator::publish_aggregate_and_proofs)
.await?
.serde_encodings(),
(Method::GET, "/consensus/global_votes") => handler
.allow_body()
.in_blocking_task(consensus::get_vote_count)
.await?
.serde_encodings(),
(Method::POST, "/consensus/individual_votes") => handler
.allow_body()
.in_blocking_task(consensus::post_individual_votes)
.await?
.serde_encodings(),
(Method::GET, "/spec") => handler
// TODO: this clone is not ideal.
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.spec.clone()))
.await?
.serde_encodings(),
(Method::GET, "/spec/slots_per_epoch") => handler
.static_value(T::EthSpec::slots_per_epoch())
.await?
.serde_encodings(),
(Method::GET, "/spec/eth2_config") => handler
// TODO: this clone is not ideal.
.in_blocking_task(|_, ctx| Ok(ctx.eth2_config.as_ref().clone()))
.await?
.serde_encodings(),
(Method::GET, "/advanced/fork_choice") => handler
.in_blocking_task(|_, ctx| {
Ok(ctx
.beacon_chain
.fork_choice
.read()
.proto_array()
.core_proto_array()
.clone())
})
.await?
.serde_encodings(),
(Method::GET, "/advanced/operation_pool") => handler
.in_blocking_task(|_, ctx| {
Ok(PersistedOperationPool::from_operation_pool(
&ctx.beacon_chain.op_pool,
))
})
.await?
.serde_encodings(),
(Method::GET, "/metrics") => handler
.in_blocking_task(|_, ctx| metrics::get_prometheus(ctx))
.await?
.text_encoding(),
(Method::GET, "/lighthouse/syncing") => handler
.in_blocking_task(|_, ctx| Ok(ctx.network_globals.sync_state()))
.await?
.serde_encodings(),
(Method::GET, "/lighthouse/peers") => handler
.in_blocking_task(|_, ctx| lighthouse::peers(ctx))
.await?
.serde_encodings(),
(Method::GET, "/lighthouse/connected_peers") => handler
.in_blocking_task(|_, ctx| lighthouse::connected_peers(ctx))
.await?
.serde_encodings(),
_ => Err(ApiError::NotFound(
"Request path and/or method not found.".to_owned(),
)),
}
}

View File

@ -1,166 +0,0 @@
use crate::helpers::{parse_committee_index, parse_epoch, parse_hex_ssz_bytes, parse_slot};
use crate::ApiError;
use hyper::Request;
use types::{AttestationData, CommitteeIndex, Epoch, Signature, Slot};
/// Provides handy functions for parsing the query parameters of a URL.
#[derive(Clone, Copy)]
pub struct UrlQuery<'a>(url::form_urlencoded::Parse<'a>);
impl<'a> UrlQuery<'a> {
/// Instantiate from an existing `Request`.
///
/// Returns `Err` if `req` does not contain any query parameters.
pub fn from_request<T>(req: &'a Request<T>) -> Result<Self, ApiError> {
let query_str = req.uri().query().unwrap_or_else(|| "");
Ok(UrlQuery(url::form_urlencoded::parse(query_str.as_bytes())))
}
/// Returns the first `(key, value)` pair found where the `key` is in `keys`.
///
/// If no match is found, an `InvalidQueryParams` error is returned.
pub fn first_of(mut self, keys: &[&str]) -> Result<(String, String), ApiError> {
self.0
.find(|(key, _value)| keys.contains(&&**key))
.map(|(key, value)| (key.into_owned(), value.into_owned()))
.ok_or_else(|| {
ApiError::BadRequest(format!(
"URL query must be valid and contain at least one of the following keys: {:?}",
keys
))
})
}
/// Returns the first `(key, value)` pair found where the `key` is in `keys`, if any.
///
/// Returns `None` if no match is found.
pub fn first_of_opt(mut self, keys: &[&str]) -> Option<(String, String)> {
self.0
.find(|(key, _value)| keys.contains(&&**key))
.map(|(key, value)| (key.into_owned(), value.into_owned()))
}
/// Returns the value for `key`, if and only if `key` is the only key present in the query
/// parameters.
pub fn only_one(self, key: &str) -> Result<String, ApiError> {
let queries: Vec<_> = self
.0
.map(|(k, v)| (k.into_owned(), v.into_owned()))
.collect();
if queries.len() == 1 {
let (first_key, first_value) = &queries[0]; // Must have 0 index if len is 1.
if first_key == key {
Ok(first_value.to_string())
} else {
Err(ApiError::BadRequest(format!(
"Only the {} query parameter is supported",
key
)))
}
} else {
Err(ApiError::BadRequest(format!(
"Only one query parameter is allowed, {} supplied",
queries.len()
)))
}
}
/// Returns a vector of all values present where `key` is in `keys
///
/// If no match is found, an `InvalidQueryParams` error is returned.
pub fn all_of(self, key: &str) -> Result<Vec<String>, ApiError> {
let queries: Vec<_> = self
.0
.filter_map(|(k, v)| {
if k.eq(key) {
Some(v.into_owned())
} else {
None
}
})
.collect();
Ok(queries)
}
/// Returns the value of the first occurrence of the `epoch` key.
pub fn epoch(self) -> Result<Epoch, ApiError> {
self.first_of(&["epoch"])
.and_then(|(_key, value)| parse_epoch(&value))
}
/// Returns the value of the first occurrence of the `slot` key.
pub fn slot(self) -> Result<Slot, ApiError> {
self.first_of(&["slot"])
.and_then(|(_key, value)| parse_slot(&value))
}
/// Returns the value of the first occurrence of the `committee_index` key.
pub fn committee_index(self) -> Result<CommitteeIndex, ApiError> {
self.first_of(&["committee_index"])
.and_then(|(_key, value)| parse_committee_index(&value))
}
/// Returns the value of the first occurrence of the `randao_reveal` key.
pub fn randao_reveal(self) -> Result<Signature, ApiError> {
self.first_of(&["randao_reveal"])
.and_then(|(_key, value)| parse_hex_ssz_bytes(&value))
}
/// Returns the value of the first occurrence of the `attestation_data` key.
pub fn attestation_data(self) -> Result<AttestationData, ApiError> {
self.first_of(&["attestation_data"])
.and_then(|(_key, value)| parse_hex_ssz_bytes(&value))
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn only_one() {
let get_result = |addr: &str, key: &str| -> Result<String, ApiError> {
UrlQuery(url::Url::parse(addr).unwrap().query_pairs()).only_one(key)
};
assert_eq!(get_result("http://cat.io/?a=42", "a"), Ok("42".to_string()));
assert!(get_result("http://cat.io/?a=42", "b").is_err());
assert!(get_result("http://cat.io/?a=42&b=12", "a").is_err());
assert!(get_result("http://cat.io/", "").is_err());
}
#[test]
fn first_of() {
let url = url::Url::parse("http://lighthouse.io/cats?a=42&b=12&c=100").unwrap();
let get_query = || UrlQuery(url.query_pairs());
assert_eq!(
get_query().first_of(&["a"]),
Ok(("a".to_string(), "42".to_string()))
);
assert_eq!(
get_query().first_of(&["a", "b", "c"]),
Ok(("a".to_string(), "42".to_string()))
);
assert_eq!(
get_query().first_of(&["a", "a", "a"]),
Ok(("a".to_string(), "42".to_string()))
);
assert_eq!(
get_query().first_of(&["a", "b", "c"]),
Ok(("a".to_string(), "42".to_string()))
);
assert_eq!(
get_query().first_of(&["b", "c"]),
Ok(("b".to_string(), "12".to_string()))
);
assert_eq!(
get_query().first_of(&["c"]),
Ok(("c".to_string(), "100".to_string()))
);
assert!(get_query().first_of(&["nothing"]).is_err());
}
}

View File

@ -1,747 +0,0 @@
use crate::helpers::{parse_hex_ssz_bytes, publish_beacon_block_to_network};
use crate::{ApiError, Context, NetworkChannel, UrlQuery};
use beacon_chain::{
attestation_verification::Error as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes,
BlockError, ForkChoiceError, StateSkipConfig,
};
use bls::PublicKeyBytes;
use eth2_libp2p::PubsubMessage;
use hyper::Request;
use network::NetworkMessage;
use rest_types::{ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorSubscription};
use slog::{error, info, trace, warn, Logger};
use std::sync::Arc;
use types::beacon_state::EthSpec;
use types::{
Attestation, AttestationData, BeaconBlock, BeaconState, Epoch, RelativeEpoch, SelectionProof,
SignedAggregateAndProof, SignedBeaconBlock, SubnetId,
};
/// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This
/// method allows for collecting bulk sets of validator duties without risking exceeding the max
/// URL length with query pairs.
pub fn post_validator_duties<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
let body = req.into_body();
serde_json::from_slice::<ValidatorDutiesRequest>(&body)
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to parse JSON into ValidatorDutiesRequest: {:?}",
e
))
})
.and_then(|bulk_request| {
return_validator_duties(
&ctx.beacon_chain.clone(),
bulk_request.epoch,
bulk_request.pubkeys.into_iter().map(Into::into).collect(),
)
})
}
/// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to
/// organise peer discovery and topic subscription for known validators.
pub fn post_validator_subscriptions<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<(), ApiError> {
let body = req.into_body();
serde_json::from_slice(&body)
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to parse JSON into ValidatorSubscriptions: {:?}",
e
))
})
.and_then(move |subscriptions: Vec<ValidatorSubscription>| {
ctx.network_chan
.send(NetworkMessage::Subscribe { subscriptions })
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to subscriptions to the network: {:?}",
e
))
})?;
Ok(())
})
}
/// HTTP Handler to retrieve all validator duties for the given epoch.
pub fn get_all_validator_duties<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let epoch = query.epoch()?;
let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
let validator_pubkeys = state
.validators
.iter()
.map(|validator| validator.pubkey.clone())
.collect();
return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys)
}
/// HTTP Handler to retrieve all active validator duties for the given epoch.
pub fn get_active_validator_duties<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let epoch = query.epoch()?;
let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
let validator_pubkeys = state
.validators
.iter()
.filter(|validator| validator.is_active_at(state.current_epoch()))
.map(|validator| validator.pubkey.clone())
.collect();
return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys)
}
/// Helper function to return the state that can be used to determine the duties for some `epoch`.
pub fn get_state_for_epoch<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
epoch: Epoch,
config: StateSkipConfig,
) -> Result<BeaconState<T::EthSpec>, ApiError> {
let slots_per_epoch = T::EthSpec::slots_per_epoch();
let head = beacon_chain.head()?;
let current_epoch = beacon_chain.epoch()?;
let head_epoch = head.beacon_state.current_epoch();
if head_epoch == current_epoch && RelativeEpoch::from_epoch(current_epoch, epoch).is_ok() {
Ok(head.beacon_state)
} else {
// If epoch is ahead of current epoch, then it should be a "next epoch" request for
// attestation duties. So, go to the start slot of the epoch prior to that,
// which should be just the next wall-clock epoch.
let slot = if epoch > current_epoch {
(epoch - 1).start_slot(slots_per_epoch)
}
// Otherwise, go to the start of the request epoch.
else {
epoch.start_slot(slots_per_epoch)
};
beacon_chain.state_at_slot(slot, config).map_err(|e| {
ApiError::ServerError(format!("Unable to load state for epoch {}: {:?}", epoch, e))
})
}
}
/// Helper function to get the duties for some `validator_pubkeys` in some `epoch`.
fn return_validator_duties<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
epoch: Epoch,
validator_pubkeys: Vec<PublicKeyBytes>,
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
let mut state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch)
.map_err(|_| ApiError::ServerError(String::from("Loaded state is in the wrong epoch")))?;
state
.build_committee_cache(relative_epoch, &beacon_chain.spec)
.map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?;
// Get a list of all validators for this epoch.
//
// Used for quickly determining the slot for a proposer.
let validator_proposers = if epoch == state.current_epoch() {
Some(
epoch
.slot_iter(T::EthSpec::slots_per_epoch())
.map(|slot| {
state
.get_beacon_proposer_index(slot, &beacon_chain.spec)
.map(|i| (i, slot))
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to get proposer index for validator: {:?}",
e
))
})
})
.collect::<Result<Vec<_>, _>>()?,
)
} else {
None
};
validator_pubkeys
.into_iter()
.map(|validator_pubkey| {
// The `beacon_chain` can return a validator index that does not exist in all states.
// Therefore, we must check to ensure that the validator index is valid for our
// `state`.
let validator_index = beacon_chain
.validator_index(&validator_pubkey)
.map_err(|e| {
ApiError::ServerError(format!("Unable to get validator index: {:?}", e))
})?
.filter(|i| *i < state.validators.len());
if let Some(validator_index) = validator_index {
let duties = state
.get_attestation_duties(validator_index, relative_epoch)
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to obtain attestation duties: {:?}",
e
))
})?;
let committee_count_at_slot = duties
.map(|d| state.get_committee_count_at_slot(d.slot))
.transpose()
.map_err(|e| {
ApiError::ServerError(format!(
"Unable to find committee count at slot: {:?}",
e
))
})?;
let aggregator_modulo = duties
.map(|duties| SelectionProof::modulo(duties.committee_len, &beacon_chain.spec))
.transpose()
.map_err(|e| {
ApiError::ServerError(format!("Unable to find modulo: {:?}", e))
})?;
let block_proposal_slots = validator_proposers.as_ref().map(|proposers| {
proposers
.iter()
.filter(|(i, _slot)| validator_index == *i)
.map(|(_i, slot)| *slot)
.collect()
});
Ok(ValidatorDutyBytes {
validator_pubkey,
validator_index: Some(validator_index as u64),
attestation_slot: duties.map(|d| d.slot),
attestation_committee_index: duties.map(|d| d.index),
committee_count_at_slot,
attestation_committee_position: duties.map(|d| d.committee_position),
block_proposal_slots,
aggregator_modulo,
})
} else {
Ok(ValidatorDutyBytes {
validator_pubkey,
validator_index: None,
attestation_slot: None,
attestation_committee_index: None,
attestation_committee_position: None,
block_proposal_slots: None,
committee_count_at_slot: None,
aggregator_modulo: None,
})
}
})
.collect::<Result<Vec<_>, ApiError>>()
}
/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator.
pub fn get_new_beacon_block<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<BeaconBlock<T::EthSpec>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let slot = query.slot()?;
let randao_reveal = query.randao_reveal()?;
let validator_graffiti = if let Some((_key, value)) = query.first_of_opt(&["graffiti"]) {
Some(parse_hex_ssz_bytes(&value)?)
} else {
None
};
let (new_block, _state) = ctx
.beacon_chain
.produce_block(randao_reveal, slot, validator_graffiti)
.map_err(|e| {
error!(
ctx.log,
"Error whilst producing block";
"error" => format!("{:?}", e)
);
ApiError::ServerError(format!(
"Beacon node is not able to produce a block: {:?}",
e
))
})?;
Ok(new_block)
}
/// HTTP Handler to publish a SignedBeaconBlock, which has been signed by a validator.
pub fn publish_beacon_block<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<(), ApiError> {
let body = req.into_body();
serde_json::from_slice(&body).map_err(|e| {
ApiError::BadRequest(format!("Unable to parse JSON into SignedBeaconBlock: {:?}", e))
})
.and_then(move |block: SignedBeaconBlock<T::EthSpec>| {
let slot = block.slot();
match ctx.beacon_chain.process_block(block.clone()) {
Ok(block_root) => {
// Block was processed, publish via gossipsub
info!(
ctx.log,
"Block from local validator";
"block_root" => format!("{}", block_root),
"block_slot" => slot,
);
publish_beacon_block_to_network::<T>(&ctx.network_chan, block)?;
// Run the fork choice algorithm and enshrine a new canonical head, if
// found.
//
// The new head may or may not be the block we just received.
if let Err(e) = ctx.beacon_chain.fork_choice() {
error!(
ctx.log,
"Failed to find beacon chain head";
"error" => format!("{:?}", e)
);
} else {
// In the best case, validators should produce blocks that become the
// head.
//
// Potential reasons this may not be the case:
//
// - A quick re-org between block produce and publish.
// - Excessive time between block produce and publish.
// - A validator is using another beacon node to produce blocks and
// submitting them here.
if ctx.beacon_chain.head()?.beacon_block_root != block_root {
warn!(
ctx.log,
"Block from validator is not head";
"desc" => "potential re-org",
);
}
}
Ok(())
}
Err(BlockError::BeaconChainError(e)) => {
error!(
ctx.log,
"Error whilst processing block";
"error" => format!("{:?}", e)
);
Err(ApiError::ServerError(format!(
"Error while processing block: {:?}",
e
)))
}
Err(other) => {
warn!(
ctx.log,
"Invalid block from local validator";
"outcome" => format!("{:?}", other)
);
Err(ApiError::ProcessingError(format!(
"The SignedBeaconBlock could not be processed and has not been published: {:?}",
other
)))
}
}
})
}
/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator.
pub fn get_new_attestation<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Attestation<T::EthSpec>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let slot = query.slot()?;
let index = query.committee_index()?;
ctx.beacon_chain
.produce_unaggregated_attestation(slot, index)
.map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e)))
}
/// HTTP Handler to retrieve the aggregate attestation for a slot
pub fn get_aggregate_attestation<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<Attestation<T::EthSpec>, ApiError> {
let query = UrlQuery::from_request(&req)?;
let attestation_data = query.attestation_data()?;
match ctx
.beacon_chain
.get_aggregated_attestation(&attestation_data)
{
Ok(Some(attestation)) => Ok(attestation),
Ok(None) => Err(ApiError::NotFound(format!(
"No matching aggregate attestation for slot {:?} is known in slot {:?}",
attestation_data.slot,
ctx.beacon_chain.slot()
))),
Err(e) => Err(ApiError::ServerError(format!(
"Unable to obtain attestation: {:?}",
e
))),
}
}
/// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators.
pub fn publish_attestations<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<(), ApiError> {
let bytes = req.into_body();
serde_json::from_slice(&bytes)
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to deserialize JSON into a list of attestations: {:?}",
e
))
})
// Process all of the aggregates _without_ exiting early if one fails.
.map(
move |attestations: Vec<(Attestation<T::EthSpec>, SubnetId)>| {
attestations
.into_iter()
.enumerate()
.map(|(i, (attestation, subnet_id))| {
process_unaggregated_attestation(
&ctx.beacon_chain,
ctx.network_chan.clone(),
attestation,
subnet_id,
i,
&ctx.log,
)
})
.collect::<Vec<Result<_, _>>>()
},
)
// Iterate through all the results and return on the first `Err`.
//
// Note: this will only provide info about the _first_ failure, not all failures.
.and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result))
.map(|_| ())
}
/// Processes an unaggregrated attestation that was included in a list of attestations with the
/// index `i`.
#[allow(clippy::redundant_clone)] // false positives in this function.
fn process_unaggregated_attestation<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
network_chan: NetworkChannel<T::EthSpec>,
attestation: Attestation<T::EthSpec>,
subnet_id: SubnetId,
i: usize,
log: &Logger,
) -> Result<(), ApiError> {
let data = &attestation.data.clone();
// Verify that the attestation is valid to included on the gossip network.
let verified_attestation = beacon_chain
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id)
.map_err(|e| {
handle_attestation_error(
e,
&format!("unaggregated attestation {} failed gossip verification", i),
data,
log,
)
})?;
// Publish the attestation to the network
if let Err(e) = network_chan.send(NetworkMessage::Publish {
messages: vec![PubsubMessage::Attestation(Box::new((
subnet_id,
attestation,
)))],
}) {
return Err(ApiError::ServerError(format!(
"Unable to send unaggregated attestation {} to network: {:?}",
i, e
)));
}
beacon_chain
.apply_attestation_to_fork_choice(&verified_attestation)
.map_err(|e| {
handle_fork_choice_error(
e,
&format!(
"unaggregated attestation {} was unable to be added to fork choice",
i
),
data,
log,
)
})?;
beacon_chain
.add_to_naive_aggregation_pool(verified_attestation)
.map_err(|e| {
handle_attestation_error(
e,
&format!(
"unaggregated attestation {} was unable to be added to aggregation pool",
i
),
data,
log,
)
})?;
Ok(())
}
/// HTTP Handler to publish an Attestation, which has been signed by a validator.
pub fn publish_aggregate_and_proofs<T: BeaconChainTypes>(
req: Request<Vec<u8>>,
ctx: Arc<Context<T>>,
) -> Result<(), ApiError> {
let body = req.into_body();
serde_json::from_slice(&body)
.map_err(|e| {
ApiError::BadRequest(format!(
"Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}",
e
))
})
// Process all of the aggregates _without_ exiting early if one fails.
.map(
move |signed_aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>| {
signed_aggregates
.into_iter()
.enumerate()
.map(|(i, signed_aggregate)| {
process_aggregated_attestation(
&ctx.beacon_chain,
ctx.network_chan.clone(),
signed_aggregate,
i,
&ctx.log,
)
})
.collect::<Vec<Result<_, _>>>()
},
)
// Iterate through all the results and return on the first `Err`.
//
// Note: this will only provide info about the _first_ failure, not all failures.
.and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result))
}
/// Processes an aggregrated attestation that was included in a list of attestations with the index
/// `i`.
#[allow(clippy::redundant_clone)] // false positives in this function.
fn process_aggregated_attestation<T: BeaconChainTypes>(
beacon_chain: &BeaconChain<T>,
network_chan: NetworkChannel<T::EthSpec>,
signed_aggregate: SignedAggregateAndProof<T::EthSpec>,
i: usize,
log: &Logger,
) -> Result<(), ApiError> {
let data = &signed_aggregate.message.aggregate.data.clone();
// Verify that the attestation is valid to be included on the gossip network.
//
// Using this gossip check for local validators is not necessarily ideal, there will be some
// attestations that we reject that could possibly be included in a block (e.g., attestations
// that late by more than 1 epoch but less than 2). We can come pick this back up if we notice
// that it's materially affecting validator profits. Until then, I'm hesitant to introduce yet
// _another_ attestation verification path.
let verified_attestation =
match beacon_chain.verify_aggregated_attestation_for_gossip(signed_aggregate.clone()) {
Ok(verified_attestation) => verified_attestation,
Err(AttnError::AttestationAlreadyKnown(attestation_root)) => {
trace!(
log,
"Ignored known attn from local validator";
"attn_root" => format!("{}", attestation_root)
);
// Exit early with success for a known attestation, there's no need to re-process
// an aggregate we already know.
return Ok(());
}
/*
* It's worth noting that we don't check for `Error::AggregatorAlreadyKnown` since (at
* the time of writing) we check for `AttestationAlreadyKnown` first.
*
* Given this, it's impossible to hit `Error::AggregatorAlreadyKnown` without that
* aggregator having already produced a conflicting aggregation. This is not slashable
* but I think it's still the sort of condition we should error on, at least for now.
*/
Err(e) => {
return Err(handle_attestation_error(
e,
&format!("aggregated attestation {} failed gossip verification", i),
data,
log,
))
}
};
// Publish the attestation to the network
if let Err(e) = network_chan.send(NetworkMessage::Publish {
messages: vec![PubsubMessage::AggregateAndProofAttestation(Box::new(
signed_aggregate,
))],
}) {
return Err(ApiError::ServerError(format!(
"Unable to send aggregated attestation {} to network: {:?}",
i, e
)));
}
beacon_chain
.apply_attestation_to_fork_choice(&verified_attestation)
.map_err(|e| {
handle_fork_choice_error(
e,
&format!(
"aggregated attestation {} was unable to be added to fork choice",
i
),
data,
log,
)
})?;
beacon_chain
.add_to_block_inclusion_pool(verified_attestation)
.map_err(|e| {
handle_attestation_error(
e,
&format!(
"aggregated attestation {} was unable to be added to op pool",
i
),
data,
log,
)
})?;
Ok(())
}
/// Common handler for `AttnError` during attestation verification.
fn handle_attestation_error(
e: AttnError,
detail: &str,
data: &AttestationData,
log: &Logger,
) -> ApiError {
match e {
AttnError::BeaconChainError(e) => {
error!(
log,
"Internal error verifying local attestation";
"detail" => detail,
"error" => format!("{:?}", e),
"target" => data.target.epoch,
"source" => data.source.epoch,
"index" => data.index,
"slot" => data.slot,
);
ApiError::ServerError(format!(
"Internal error verifying local attestation. Error: {:?}. Detail: {}",
e, detail
))
}
e => {
error!(
log,
"Invalid local attestation";
"detail" => detail,
"reason" => format!("{:?}", e),
"target" => data.target.epoch,
"source" => data.source.epoch,
"index" => data.index,
"slot" => data.slot,
);
ApiError::ProcessingError(format!(
"Invalid local attestation. Error: {:?} Detail: {}",
e, detail
))
}
}
}
/// Common handler for `ForkChoiceError` during attestation verification.
fn handle_fork_choice_error(
e: BeaconChainError,
detail: &str,
data: &AttestationData,
log: &Logger,
) -> ApiError {
match e {
BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => {
error!(
log,
"Local attestation invalid for fork choice";
"detail" => detail,
"reason" => format!("{:?}", e),
"target" => data.target.epoch,
"source" => data.source.epoch,
"index" => data.index,
"slot" => data.slot,
);
ApiError::ProcessingError(format!(
"Invalid local attestation. Error: {:?} Detail: {}",
e, detail
))
}
e => {
error!(
log,
"Internal error applying attn to fork choice";
"detail" => detail,
"error" => format!("{:?}", e),
"target" => data.target.epoch,
"source" => data.source.epoch,
"index" => data.index,
"slot" => data.slot,
);
ApiError::ServerError(format!(
"Internal error verifying local attestation. Error: {:?}. Detail: {}",
e, detail
))
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -142,7 +142,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.arg( .arg(
Arg::with_name("http") Arg::with_name("http")
.long("http") .long("http")
.help("Enable RESTful HTTP API server. Disabled by default.") .help("Enable the RESTful HTTP API server. Disabled by default.")
.takes_value(false), .takes_value(false),
) )
.arg( .arg(
@ -169,6 +169,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.default_value("") .default_value("")
.takes_value(true), .takes_value(true),
) )
/* Prometheus metrics HTTP server related arguments */
.arg(
Arg::with_name("metrics")
.long("metrics")
.help("Enable the Prometheus metrics HTTP server. Disabled by default.")
.takes_value(false),
)
.arg(
Arg::with_name("metrics-address")
.long("metrics-address")
.value_name("ADDRESS")
.help("Set the listen address for the Prometheus metrics HTTP server.")
.default_value("127.0.0.1")
.takes_value(true),
)
.arg(
Arg::with_name("metrics-port")
.long("metrics-port")
.value_name("PORT")
.help("Set the listen TCP port for the Prometheus metrics HTTP server.")
.default_value("5054")
.takes_value(true),
)
.arg(
Arg::with_name("metrics-allow-origin")
.long("metrics-allow-origin")
.value_name("ORIGIN")
.help("Set the value of the Access-Control-Allow-Origin response HTTP header for the Prometheus metrics HTTP server. \
Use * to allow any origin (not recommended in production)")
.default_value("")
.takes_value(true),
)
/* Websocket related arguments */ /* Websocket related arguments */
.arg( .arg(
Arg::with_name("ws") Arg::with_name("ws")

View File

@ -87,26 +87,26 @@ pub fn get_config<E: EthSpec>(
*/ */
if cli_args.is_present("staking") { if cli_args.is_present("staking") {
client_config.rest_api.enabled = true; client_config.http_api.enabled = true;
client_config.sync_eth1_chain = true; client_config.sync_eth1_chain = true;
} }
/* /*
* Http server * Http API server
*/ */
if cli_args.is_present("http") { if cli_args.is_present("http") {
client_config.rest_api.enabled = true; client_config.http_api.enabled = true;
} }
if let Some(address) = cli_args.value_of("http-address") { if let Some(address) = cli_args.value_of("http-address") {
client_config.rest_api.listen_address = address client_config.http_api.listen_addr = address
.parse::<Ipv4Addr>() .parse::<Ipv4Addr>()
.map_err(|_| "http-address is not a valid IPv4 address.")?; .map_err(|_| "http-address is not a valid IPv4 address.")?;
} }
if let Some(port) = cli_args.value_of("http-port") { if let Some(port) = cli_args.value_of("http-port") {
client_config.rest_api.port = port client_config.http_api.listen_port = port
.parse::<u16>() .parse::<u16>()
.map_err(|_| "http-port is not a valid u16.")?; .map_err(|_| "http-port is not a valid u16.")?;
} }
@ -117,7 +117,36 @@ pub fn get_config<E: EthSpec>(
hyper::header::HeaderValue::from_str(allow_origin) hyper::header::HeaderValue::from_str(allow_origin)
.map_err(|_| "Invalid allow-origin value")?; .map_err(|_| "Invalid allow-origin value")?;
client_config.rest_api.allow_origin = allow_origin.to_string(); client_config.http_api.allow_origin = Some(allow_origin.to_string());
}
/*
* Prometheus metrics HTTP server
*/
if cli_args.is_present("metrics") {
client_config.http_metrics.enabled = true;
}
if let Some(address) = cli_args.value_of("metrics-address") {
client_config.http_metrics.listen_addr = address
.parse::<Ipv4Addr>()
.map_err(|_| "metrics-address is not a valid IPv4 address.")?;
}
if let Some(port) = cli_args.value_of("metrics-port") {
client_config.http_metrics.listen_port = port
.parse::<u16>()
.map_err(|_| "metrics-port is not a valid u16.")?;
}
if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") {
// Pre-validate the config value to give feedback to the user on node startup, instead of
// as late as when the first API response is produced.
hyper::header::HeaderValue::from_str(allow_origin)
.map_err(|_| "Invalid allow-origin value")?;
client_config.http_metrics.allow_origin = Some(allow_origin.to_string());
} }
// Log a warning indicating an open HTTP server if it wasn't specified explicitly // Log a warning indicating an open HTTP server if it wasn't specified explicitly
@ -125,7 +154,7 @@ pub fn get_config<E: EthSpec>(
if cli_args.is_present("staking") { if cli_args.is_present("staking") {
warn!( warn!(
log, log,
"Running HTTP server on port {}", client_config.rest_api.port "Running HTTP server on port {}", client_config.http_api.listen_port
); );
} }
@ -219,7 +248,8 @@ pub fn get_config<E: EthSpec>(
unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?;
client_config.network.discovery_port = client_config.network.discovery_port =
unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?; unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?;
client_config.rest_api.port = 0; client_config.http_api.listen_port = 0;
client_config.http_metrics.listen_port = 0;
client_config.websocket_server.port = 0; client_config.websocket_server.port = 0;
} }
@ -230,6 +260,11 @@ pub fn get_config<E: EthSpec>(
client_config.eth1.deposit_contract_address = client_config.eth1.deposit_contract_address =
format!("{:?}", eth2_testnet_config.deposit_contract_address()?); format!("{:?}", eth2_testnet_config.deposit_contract_address()?);
let spec_contract_address = format!("{:?}", spec.deposit_contract_address);
if client_config.eth1.deposit_contract_address != spec_contract_address {
return Err("Testnet contract address does not match spec".into());
}
client_config.eth1.deposit_contract_deploy_block = client_config.eth1.deposit_contract_deploy_block =
eth2_testnet_config.deposit_contract_deploy_block; eth2_testnet_config.deposit_contract_deploy_block;
client_config.eth1.lowest_cached_block_number = client_config.eth1.lowest_cached_block_number =
@ -265,7 +300,7 @@ pub fn get_config<E: EthSpec>(
}; };
let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN); let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN);
client_config.graffiti[..trimmed_graffiti_len] client_config.graffiti.0[..trimmed_graffiti_len]
.copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]); .copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]);
if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") { if let Some(max_skip_slots) = cli_args.value_of("max-skip-slots") {

View File

@ -71,7 +71,6 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
context: RuntimeContext<E>, context: RuntimeContext<E>,
mut client_config: ClientConfig, mut client_config: ClientConfig,
) -> Result<Self, String> { ) -> Result<Self, String> {
let http_eth2_config = context.eth2_config().clone();
let spec = context.eth2_config().spec.clone(); let spec = context.eth2_config().spec.clone();
let client_config_1 = client_config.clone(); let client_config_1 = client_config.clone();
let client_genesis = client_config.genesis.clone(); let client_genesis = client_config.genesis.clone();
@ -118,26 +117,22 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
builder.no_eth1_backend()? builder.no_eth1_backend()?
}; };
let (builder, events) = builder let (builder, _events) = builder
.system_time_slot_clock()? .system_time_slot_clock()?
.tee_event_handler(client_config.websocket_server.clone())?; .tee_event_handler(client_config.websocket_server.clone())?;
// Inject the executor into the discv5 network config. // Inject the executor into the discv5 network config.
client_config.network.discv5_config.executor = Some(Box::new(executor)); client_config.network.discv5_config.executor = Some(Box::new(executor));
let builder = builder builder
.build_beacon_chain()? .build_beacon_chain()?
.network(&client_config.network) .network(&client_config.network)
.await? .await?
.notifier()?; .notifier()?
.http_api_config(client_config.http_api.clone())
let builder = if client_config.rest_api.enabled { .http_metrics_config(client_config.http_metrics.clone())
builder.http_server(&client_config, &http_eth2_config, events)? .build()
} else { .map(Self)
builder
};
Ok(Self(builder.build()))
} }
pub fn into_inner(self) -> ProductionClient<E> { pub fn into_inner(self) -> ProductionClient<E> {

View File

@ -3,6 +3,7 @@
use beacon_chain::StateSkipConfig; use beacon_chain::StateSkipConfig;
use node_test_rig::{ use node_test_rig::{
environment::{Environment, EnvironmentBuilder}, environment::{Environment, EnvironmentBuilder},
eth2::types::StateId,
testing_client_config, LocalBeaconNode, testing_client_config, LocalBeaconNode,
}; };
use types::{EthSpec, MinimalEthSpec, Slot}; use types::{EthSpec, MinimalEthSpec, Slot};
@ -34,10 +35,12 @@ fn http_server_genesis_state() {
let node = build_node(&mut env); let node = build_node(&mut env);
let remote_node = node.remote_node().expect("should produce remote node"); let remote_node = node.remote_node().expect("should produce remote node");
let (api_state, _root) = env let api_state = env
.runtime() .runtime()
.block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0))) .block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0))))
.expect("should fetch state from http api"); .expect("should fetch state from http api")
.unwrap()
.data;
let mut db_state = node let mut db_state = node
.client .client

View File

@ -14,20 +14,15 @@
* [Key recovery](./key-recovery.md) * [Key recovery](./key-recovery.md)
* [Validator Management](./validator-management.md) * [Validator Management](./validator-management.md)
* [Importing from the Eth2 Launchpad](./validator-import-launchpad.md) * [Importing from the Eth2 Launchpad](./validator-import-launchpad.md)
* [Local Testnets](./local-testnets.md) * [APIs](./api.md)
* [API](./api.md) * [Beacon Node API](./api-bn.md)
* [HTTP (RESTful JSON)](./http.md) * [/lighthouse](./api-lighthouse.md)
* [/node](./http/node.md) * [Validator Inclusion APIs](./validator-inclusion.md)
* [/beacon](./http/beacon.md) * [Validator Client API](./api-vc.md)
* [/validator](./http/validator.md) * [Prometheus Metrics](./advanced_metrics.md)
* [/consensus](./http/consensus.md)
* [/network](./http/network.md)
* [/spec](./http/spec.md)
* [/advanced](./http/advanced.md)
* [/lighthouse](./http/lighthouse.md)
* [WebSocket](./websockets.md)
* [Advanced Usage](./advanced.md) * [Advanced Usage](./advanced.md)
* [Database Configuration](./advanced_database.md) * [Database Configuration](./advanced_database.md)
* [Local Testnets](./local-testnets.md)
* [Contributing](./contributing.md) * [Contributing](./contributing.md)
* [Development Environment](./setup.md) * [Development Environment](./setup.md)
* [FAQs](./faq.md) * [FAQs](./faq.md)

View File

@ -0,0 +1,34 @@
# Prometheus Metrics
Lighthouse provides an extensive suite of metrics and monitoring in the
[Prometheus](https://prometheus.io/docs/introduction/overview/) export format
via a HTTP server built into Lighthouse.
These metrics are generally consumed by a Prometheus server and displayed via a
Grafana dashboard. These components are available in a docker-compose format at
[sigp/lighthouse-metrics](https://github.com/sigp/lighthouse-metrics).
## Beacon Node Metrics
By default, these metrics are disabled but can be enabled with the `--metrics`
flag. Use the `--metrics-address`, `--metrics-port` and
`--metrics-allow-origin` flags to customize the metrics server.
### Example
Start a beacon node with the metrics server enabled:
```bash
lighthouse bn --metrics
```
Check to ensure that the metrics are available on the default port:
```bash
curl localhost:5054/metrics
```
## Validator Client Metrics
The validator client does not *yet* expose metrics, however this functionality
is expected to be implemented in late-September 2020.

130
book/src/api-bn.md Normal file
View File

@ -0,0 +1,130 @@
# Beacon Node API
Lighthouse implements the standard [Eth2 Beacon Node API
specification][OpenAPI]. Please follow that link for a full description of each API endpoint.
> **Warning:** the standard API specification is still in flux and the Lighthouse implementation is partially incomplete. You can track the status of each endpoint at [#1434](https://github.com/sigp/lighthouse/issues/1434).
## Starting the server
A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `127.0.0.1:5052`.
The following CLI flags control the HTTP server:
- `--http`: enable the HTTP server (required even if the following flags are
provided).
- `--http-port`: specify the listen port of the server.
- `--http-address`: specify the listen address of the server.
- `--http-allow-origin`: specify the value of the `Access-Control-Allow-Origin`
header. The default is to not supply a header.
The schema of the API aligns with the standard Eth2 Beacon Node API as defined
at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs).
An interactive specification is available [here][OpenAPI].
### CLI Example
Start the beacon node with the HTTP server listening on [http://localhost:5052](http://localhost:5052):
```bash
lighthouse bn --http
```
## HTTP Request/Response Examples
This section contains some simple examples of using the HTTP API via `curl`.
All endpoints are documented in the [Eth2 Beacon Node API
specification][OpenAPI].
### View the head of the beacon chain
Returns the block header at the head of the canonical chain.
```bash
curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept:
application/json"
```
```json
{
"data": {
"root": "0x4381454174fc28c7095077e959dcab407ae5717b5dca447e74c340c1b743d7b2",
"canonical": true,
"header": {
"message": {
"slot": 3199,
"proposer_index": "19077",
"parent_root": "0xf1934973041c5896d0d608e52847c3cd9a5f809c59c64e76f6020e3d7cd0c7cd",
"state_root": "0xe8e468f9f5961655dde91968f66480868dab8d4147de9498111df2b7e4e6fe60",
"body_root": "0x6f183abc6c4e97f832900b00d4e08d4373bfdc819055d76b0f4ff850f559b883"
},
"signature": "0x988064a2f9cf13fe3aae051a3d85f6a4bca5a8ff6196f2f504e32f1203b549d5f86a39c6509f7113678880701b1881b50925a0417c1c88a750c8da7cd302dda5aabae4b941e3104d0cf19f5043c4f22a7d75d0d50dad5dbdaf6991381dc159ab"
}
}
}
```
### View the status of a validator
Shows the status of validator at index `1` at the `head` state.
```bash
curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json"
```
```json
{
"data": {
"index": "1",
"balance": "63985937939",
"status": "Active",
"validator": {
"pubkey": "0x873e73ee8b3e4fcf1d2fb0f1036ba996ac9910b5b348f6438b5f8ef50857d4da9075d0218a9d1b99a9eae235a39703e1",
"withdrawal_credentials": "0x00b8cdcf79ba7e74300a07e9d8f8121dd0d8dd11dcfd6d3f2807c45b426ac968",
"effective_balance": 32000000000,
"slashed": false,
"activation_eligibility_epoch": 0,
"activation_epoch": 0,
"exit_epoch": 18446744073709552000,
"withdrawable_epoch": 18446744073709552000
}
}
}
```
## Troubleshooting
### HTTP API is unavailable or refusing connections
Ensure the `--http` flag has been supplied at the CLI.
You can quickly check that the HTTP endpoint is up using `curl`:
```bash
curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json"
```
The beacon node should respond with its version:
```json
{"data":{"version":"Lighthouse/v0.2.9-6f7b4768a/x86_64-linux"}}
```
If this doesn't work, the server might not be started or there might be a
network connection error.
### I cannot query my node from a web browser (e.g., Swagger)
By default, the API does not provide an `Access-Control-Allow-Origin` header,
which causes browsers to reject responses with a CORS error.
The `--http-allow-origin` flag can be used to add a wild-card CORS header:
```bash
lighthouse bn --http --http-allow-origin "*"
```
> **Warning:** Adding the wild-card allow-origin flag can pose a security risk.
> Only use it in production if you understand the risks of a loose CORS policy.
[OpenAPI]: https://ethereum.github.io/eth2.0-APIs/#/

179
book/src/api-lighthouse.md Normal file
View File

@ -0,0 +1,179 @@
# Lighthouse Non-Standard APIs
Lighthouse fully supports the standardization efforts at
[github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs),
however sometimes development requires additional endpoints that shouldn't
necessarily be defined as a broad-reaching standard. Such endpoints are placed
behind the `/lighthouse` path.
The endpoints behind the `/lighthouse` path are:
- Not intended to be stable.
- Not guaranteed to be safe.
- For testing and debugging purposes only.
Although we don't recommend that users rely on these endpoints, we
document them briefly so they can be utilized by developers and
researchers.
### `/lighthouse/health`
*Presently only available on Linux.*
```bash
curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/json" | jq
```
```json
{
"data": {
"pid": 1728254,
"pid_num_threads": 47,
"pid_mem_resident_set_size": 510054400,
"pid_mem_virtual_memory_size": 3963158528,
"sys_virt_mem_total": 16715530240,
"sys_virt_mem_available": 4065374208,
"sys_virt_mem_used": 11383402496,
"sys_virt_mem_free": 1368662016,
"sys_virt_mem_percent": 75.67906,
"sys_loadavg_1": 4.92,
"sys_loadavg_5": 5.53,
"sys_loadavg_15": 5.58
}
}
```
### `/lighthouse/syncing`
```bash
curl -X GET "http://localhost:5052/lighthouse/syncing" -H "accept: application/json" | jq
```
```json
{
"data": {
"SyncingFinalized": {
"start_slot": 3104,
"head_slot": 343744,
"head_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf"
}
}
}
```
### `/lighthouse/peers`
```bash
curl -X GET "http://localhost:5052/lighthouse/peers" -H "accept: application/json" | jq
```
```json
[
{
"peer_id": "16Uiu2HAmA9xa11dtNv2z5fFbgF9hER3yq35qYNTPvN7TdAmvjqqv",
"peer_info": {
"_status": "Healthy",
"score": {
"score": 0
},
"client": {
"kind": "Lighthouse",
"version": "v0.2.9-1c9a055c",
"os_version": "aarch64-linux",
"protocol_version": "lighthouse/libp2p",
"agent_string": "Lighthouse/v0.2.9-1c9a055c/aarch64-linux"
},
"connection_status": {
"status": "disconnected",
"connections_in": 0,
"connections_out": 0,
"last_seen": 1082,
"banned_ips": []
},
"listening_addresses": [
"/ip4/80.109.35.174/tcp/9000",
"/ip4/127.0.0.1/tcp/9000",
"/ip4/192.168.0.73/tcp/9000",
"/ip4/172.17.0.1/tcp/9000",
"/ip6/::1/tcp/9000"
],
"sync_status": {
"Advanced": {
"info": {
"status_head_slot": 343829,
"status_head_root": "0xe34e43efc2bb462d9f364bc90e1f7f0094e74310fd172af698b5a94193498871",
"status_finalized_epoch": 10742,
"status_finalized_root": "0x1b434b5ed702338df53eb5e3e24336a90373bb51f74b83af42840be7421dd2bf"
}
}
},
"meta_data": {
"seq_number": 160,
"attnets": "0x0000000800000080"
}
}
}
]
```
### `/lighthouse/peers/connected`
```bash
curl -X GET "http://localhost:5052/lighthouse/peers/connected" -H "accept: application/json" | jq
```
```json
[
{
"peer_id": "16Uiu2HAkzJC5TqDSKuLgVUsV4dWat9Hr8EjNZUb6nzFb61mrfqBv",
"peer_info": {
"_status": "Healthy",
"score": {
"score": 0
},
"client": {
"kind": "Lighthouse",
"version": "v0.2.8-87181204+",
"os_version": "x86_64-linux",
"protocol_version": "lighthouse/libp2p",
"agent_string": "Lighthouse/v0.2.8-87181204+/x86_64-linux"
},
"connection_status": {
"status": "connected",
"connections_in": 1,
"connections_out": 0,
"last_seen": 0,
"banned_ips": []
},
"listening_addresses": [
"/ip4/34.204.178.218/tcp/9000",
"/ip4/127.0.0.1/tcp/9000",
"/ip4/172.31.67.58/tcp/9000",
"/ip4/172.17.0.1/tcp/9000",
"/ip6/::1/tcp/9000"
],
"sync_status": "Unknown",
"meta_data": {
"seq_number": 1819,
"attnets": "0xffffffffffffffff"
}
}
}
]
```
### `/lighthouse/proto_array`
```bash
curl -X GET "http://localhost:5052/lighthouse/proto_array" -H "accept: application/json" | jq
```
*Example omitted for brevity.*
### `/lighthouse/validator_inclusion/{epoch}/{validator_id}`
See [Validator Inclusion APIs](./validator-inclusion.md).
### `/lighthouse/validator_inclusion/{epoch}/global`
See [Validator Inclusion APIs](./validator-inclusion.md).

3
book/src/api-vc.md Normal file
View File

@ -0,0 +1,3 @@
# Validator Client API
The validator client API is planned for release in late September 2020.

View File

@ -1,13 +1,9 @@
# APIs # APIs
The Lighthouse `beacon_node` provides two APIs for local consumption: Lighthouse allows users to query the state of Eth2.0 using web-standard,
RESTful HTTP/JSON APIs.
- A [RESTful JSON HTTP API](http.html) which provides beacon chain, node and network There are two APIs served by Lighthouse:
information.
- A read-only [WebSocket API](websockets.html) providing beacon chain events, as they occur.
- [Beacon Node API](./api-bn.md)
## Security - [Validator Client API](./api-vc.md) (not yet released).
These endpoints are not designed to be exposed to the public Internet or
untrusted users. They may pose a considerable DoS attack vector when used improperly.

View File

@ -1,5 +1,9 @@
# HTTP API # HTTP API
[OpenAPI Specification](https://ethereum.github.io/eth2.0-APIs/#/)
## Beacon Node
A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `localhost:5052`. A Lighthouse beacon node can be configured to expose a HTTP server by supplying the `--http` flag. The default listen address is `localhost:5052`.
The following CLI flags control the HTTP server: The following CLI flags control the HTTP server:
@ -9,24 +13,10 @@ The following CLI flags control the HTTP server:
- `--http-port`: specify the listen port of the server. - `--http-port`: specify the listen port of the server.
- `--http-address`: specify the listen address of the server. - `--http-address`: specify the listen address of the server.
The API is logically divided into several core endpoints, each documented in The schema of the API aligns with the standard Eth2 Beacon Node API as defined
detail: at [github.com/ethereum/eth2.0-APIs](https://github.com/ethereum/eth2.0-APIs).
It is an easy-to-use RESTful HTTP/JSON API. An interactive specification is
Endpoint | Description | available [here](https://ethereum.github.io/eth2.0-APIs/#/).
| --- | -- |
[`/node`](./http/node.md) | General information about the beacon node.
[`/beacon`](./http/beacon.md) | General information about the beacon chain.
[`/validator`](./http/validator.md) | Provides functionality to validator clients.
[`/consensus`](./http/consensus.md) | Proof-of-stake voting statistics.
[`/network`](./http/network.md) | Information about the p2p network.
[`/spec`](./http/spec.md) | Information about the specs that the client is running.
[`/advanced`](./http/advanced.md) | Provides endpoints for advanced inspection of Lighthouse specific objects.
[`/lighthouse`](./http/lighthouse.md) | Provides lighthouse specific endpoints.
_Please note: The OpenAPI format at
[SwaggerHub: Lighthouse REST
API](https://app.swaggerhub.com/apis-docs/spble/lighthouse_rest_api/0.2.0) has
been **deprecated**. This documentation is now the source of truth for the REST API._
## Troubleshooting ## Troubleshooting

View File

@ -1,115 +0,0 @@
# Lighthouse REST API: `/advanced`
The `/advanced` endpoints provide information Lighthouse specific data structures for advanced debugging.
## Endpoints
HTTP Path | Description |
| --- | -- |
[`/advanced/fork_choice`](#advancedfork_choice) | Get the `proto_array` fork choice object.
[`/advanced/operation_pool`](#advancedoperation_pool) | Get the Lighthouse `PersistedOperationPool` object.
## `/advanced/fork_choice`
Requests the `proto_array` fork choice object as represented in Lighthouse.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/advanced/fork_choice`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
{
"prune_threshold": 256,
"justified_epoch": 25,
"finalized_epoch": 24,
"nodes": [
{
"slot": 544,
"root": "0x27103c56d4427cb4309dd202920ead6381d54d43277c29cf0572ddf0d528e6ea",
"parent": null,
"justified_epoch": 16,
"finalized_epoch": 15,
"weight": 256000000000,
"best_child": 1,
"best_descendant": 296
},
{
"slot": 545,
"root": "0x09af0e8d4e781ea4280c9c969d168839c564fab3a03942e7db0bfbede7d4c745",
"parent": 0,
"justified_epoch": 16,
"finalized_epoch": 15,
"weight": 256000000000,
"best_child": 2,
"best_descendant": 296
},
],
"indices": {
"0xb935bb3651eeddcb2d2961bf307156850de982021087062033f02576d5df00a3": 59,
"0x8f4ec47a34c6c1d69ede64d27165d195f7e2a97c711808ce51f1071a6e12d5b9": 189,
"0xf675eba701ef77ee2803a130dda89c3c5673a604d2782c9e25ea2be300d7d2da": 173,
"0x488a483c8d5083faaf5f9535c051b9f373ba60d5a16e77ddb1775f248245b281": 37
}
}
```
_Truncated for brevity._
## `/advanced/operation_pool`
Requests the `PersistedOperationPool` object as represented in Lighthouse.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/advanced/operation_pool`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
{
"attestations": [
[
{
"v": [39, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 118, 215, 252, 51, 186, 76, 156, 157, 99, 91, 4, 137, 195, 209, 224, 26, 233, 233, 184, 38, 89, 215, 177, 247, 97, 243, 119, 229, 69, 50, 90, 24, 0, 0, 0, 0, 0, 0, 0, 79, 37, 38, 210, 96, 235, 121, 142, 129, 136, 206, 214, 179, 132, 22, 19, 222, 213, 203, 46, 112, 192, 26, 5, 254, 26, 103, 170, 158, 205, 72, 3, 25, 0, 0, 0, 0, 0, 0, 0, 164, 50, 214, 67, 98, 13, 50, 180, 108, 232, 248, 109, 128, 45, 177, 23, 221, 24, 218, 211, 8, 152, 172, 120, 24, 86, 198, 103, 68, 164, 67, 202, 1, 0, 0, 0, 0, 0, 0, 0]
},
[
{
"aggregation_bits": "0x03",
"data": {
"slot": 807,
"index": 0,
"beacon_block_root": "0x7076d7fc33ba4c9c9d635b0489c3d1e01ae9e9b82659d7b1f761f377e545325a",
"source": {
"epoch": 24,
"root": "0x4f2526d260eb798e8188ced6b3841613ded5cb2e70c01a05fe1a67aa9ecd4803"
},
"target": {
"epoch": 25,
"root": "0xa432d643620d32b46ce8f86d802db117dd18dad30898ac781856c66744a443ca"
}
},
"signature": "0x8b1d624b0cd5a7a0e13944e90826878a230e3901db34ea87dbef5b145ade2fedbc830b6752a38a0937a1594211ab85b615d65f9eef0baccd270acca945786036695f4db969d9ff1693c505c0fe568b2fe9831ea78a74cbf7c945122231f04026"
}
]
]
],
"attester_slashings": [],
"proposer_slashings": [],
"voluntary_exits": []
}
```
_Truncated for brevity._

View File

@ -1,784 +0,0 @@
# Lighthouse REST API: `/beacon`
The `/beacon` endpoints provide information about the canonical head of the
beacon chain and also historical information about beacon blocks and states.
## Endpoints
HTTP Path | Description |
| --- | -- |
[`/beacon/head`](#beaconhead) | Info about the block at the head of the chain.
[`/beacon/heads`](#beaconheads) | Returns a list of all known chain heads.
[`/beacon/block`](#beaconblock) | Get a `BeaconBlock` by slot or root.
[`/beacon/block_root`](#beaconblock_root) | Resolve a slot to a block root.
[`/beacon/fork`](#beaconfork) | Get the fork of the head of the chain.
[`/beacon/genesis_time`](#beacongenesis_time) | Get the genesis time from the beacon state.
[`/beacon/genesis_validators_root`](#beacongenesis_validators_root) | Get the genesis validators root.
[`/beacon/validators`](#beaconvalidators) | Query for one or more validators.
[`/beacon/validators/all`](#beaconvalidatorsall) | Get all validators.
[`/beacon/validators/active`](#beaconvalidatorsactive) | Get all active validators.
[`/beacon/state`](#beaconstate) | Get a `BeaconState` by slot or root.
[`/beacon/state_root`](#beaconstate_root) | Resolve a slot to a state root.
[`/beacon/state/genesis`](#beaconstategenesis) | Get a `BeaconState` at genesis.
[`/beacon/committees`](#beaconcommittees) | Get the shuffling for an epoch.
[`/beacon/proposer_slashing`](#beaconproposer_slashing) | Insert a proposer slashing
[`/beacon/attester_slashing`](#beaconattester_slashing) | Insert an attester slashing
## `/beacon/head`
Requests information about the head of the beacon chain, from the node's
perspective.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/head`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
{
"slot": 37923,
"block_root": "0xe865d4805395a0776b8abe46d714a9e64914ab8dc5ff66624e5a1776bcc1684b",
"state_root": "0xe500e3567ab273c9a6f8a057440deff476ab236f0983da27f201ee9494a879f0",
"finalized_slot": 37856,
"finalized_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86",
"justified_slot": 37888,
"justified_block_root": "0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4",
"previous_justified_slot": 37856,
"previous_justified_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86"
}
```
## `/beacon/heads`
Returns the roots of all known head blocks. Only one of these roots is the
canonical head and that is decided by the fork choice algorithm. See [`/beacon/head`](#beaconhead) for the canonical head.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/heads`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
[
{
"beacon_block_root": "0x226b2fd7c5f3d31dbb21444b96dfafe715f0017cd16545ecc4ffa87229496a69",
"beacon_block_slot": 38373
},
{
"beacon_block_root": "0x41ed5b253c4fc841cba8a6d44acbe101866bc674c3cfa3c4e9f7388f465aa15b",
"beacon_block_slot": 38375
}
]
```
## `/beacon/block`
Request that the node return a beacon chain block that matches the provided
criteria (a block `root` or beacon chain `slot`). Only one of the parameters
should be provided as a criteria.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/block`
Method | GET
JSON Encoding | Object
Query Parameters | `slot`, `root`
Typical Responses | 200, 404
### Parameters
Accepts **only one** of the following parameters:
- `slot` (`Slot`): Query by slot number. Any block returned must be in the canonical chain (i.e.,
either the head or an ancestor of the head).
- `root` (`Bytes32`): Query by tree hash root. A returned block is not required to be in the
canonical chain.
### Returns
Returns an object containing a single [`SignedBeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblock) and the block root of the inner [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblock).
### Example Response
```json
{
"root": "0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196",
"beacon_block": {
"message": {
"slot": 0,
"proposer_index": 14,
"parent_root": "0x0000000000000000000000000000000000000000000000000000000000000000",
"state_root": "0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f",
"body": {
"randao_reveal": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"eth1_data": {
"deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000",
"deposit_count": 0,
"block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000",
"proposer_slashings": [],
"attester_slashings": [],
"attestations": [],
"deposits": [],
"voluntary_exits": []
}
},
"signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
}
```
## `/beacon/block_root`
Returns the block root for the given slot in the canonical chain. If there
is a re-org, the same slot may return a different root.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/block_root`
Method | GET
JSON Encoding | Object
Query Parameters | `slot`
Typical Responses | 200, 404
## Parameters
- `slot` (`Slot`): the slot to be resolved to a root.
### Example Response
```json
"0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196"
```
## `/beacon/committees`
Request the committees (a.k.a. "shuffling") for all slots and committee indices
in a given `epoch`.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/committees`
Method | GET
JSON Encoding | Object
Query Parameters | `epoch`
Typical Responses | 200/500
### Parameters
The `epoch` (`Epoch`) query parameter is required and defines the epoch for
which the committees will be returned. All slots contained within the response will
be inside this epoch.
### Returns
A list of beacon committees.
### Example Response
```json
[
{
"slot": 4768,
"index": 0,
"committee": [
1154,
492,
9667,
3089,
8987,
1421,
224,
11243,
2127,
2329,
188,
482,
486
]
},
{
"slot": 4768,
"index": 1,
"committee": [
5929,
8482,
5528,
6130,
14343,
9777,
10808,
12739,
15234,
12819,
5423,
6320,
9991
]
}
]
```
_Truncated for brevity._
## `/beacon/fork`
Request that the node return the `fork` of the current head.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/fork`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Returns
Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#fork) of the current head.
### Example Response
```json
{
"previous_version": "0x00000000",
"current_version": "0x00000000",
"epoch": 0
}
```
## `/beacon/genesis_time`
Request that the node return the genesis time from the beacon state.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/genesis_time`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Returns
Returns an object containing the genesis time.
### Example Response
```json
1581576353
```
## `/beacon/genesis_validators_root`
Request that the node return the genesis validators root from the beacon state.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/genesis_validators_root`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Returns
Returns an object containing the genesis validators root.
### Example Response
```json
0x4fbf23439a7a9b9dd91650e64e8124012dde5e2ea2940c552b86f04eb47f95de
```
## `/beacon/validators`
Request that the node returns information about one or more validator public
keys. This request takes the form of a `POST` request to allow sending a large
number of pubkeys in the request.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/validators`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Request Body
Expects the following object in the POST request body:
```
{
state_root: Bytes32,
pubkeys: [PublicKey]
}
```
The `state_root` field indicates which `BeaconState` should be used to collect
the information. The `state_root` is optional and omitting it will result in
the canonical head state being used.
### Returns
Returns an object describing several aspects of the given validator.
### Example
### Request Body
```json
{
"pubkeys": [
"0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16",
"0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42"
]
}
```
_Note: for demonstration purposes the second pubkey is some unknown pubkey._
### Response Body
```json
[
{
"pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16",
"validator_index": 14935,
"balance": 3228885987,
"validator": {
"pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16",
"withdrawal_credentials": "0x00b7bec22d5bda6b2cca1343d4f640d0e9ccc204a06a73703605c590d4c0d28e",
"effective_balance": 3200000000,
"slashed": false,
"activation_eligibility_epoch": 0,
"activation_epoch": 0,
"exit_epoch": 18446744073709551615,
"withdrawable_epoch": 18446744073709551615
}
},
{
"pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42",
"validator_index": null,
"balance": null,
"validator": null
}
]
```
## `/beacon/validators/all`
Returns all validators.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/validators/all`
Method | GET
JSON Encoding | Object
Query Parameters | `state_root` (optional)
Typical Responses | 200
### Parameters
The optional `state_root` (`Bytes32`) query parameter indicates which
`BeaconState` should be used to collect the information. When omitted, the
canonical head state will be used.
### Returns
The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body.
## `/beacon/validators/active`
Returns all validators that are active in the state defined by `state_root`.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/validators/active`
Method | GET
JSON Encoding | Object
Query Parameters | `state_root` (optional)
Typical Responses | 200
### Parameters
The optional `state_root` (`Bytes32`) query parameter indicates which
`BeaconState` should be used to collect the information. When omitted, the
canonical head state will be used.
### Returns
The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body.
## `/beacon/state`
Request that the node return a beacon chain state that matches the provided
criteria (a state `root` or beacon chain `slot`). Only one of the parameters
should be provided as a criteria.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/state`
Method | GET
JSON Encoding | Object
Query Parameters | `slot`, `root`
Typical Responses | 200, 404
### Parameters
Accepts **only one** of the following parameters:
- `slot` (`Slot`): Query by slot number. Any state returned must be in the canonical chain (i.e.,
either the head or an ancestor of the head).
- `root` (`Bytes32`): Query by tree hash root. A returned state is not required to be in the
canonical chain.
### Returns
Returns an object containing a single
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate)
and its tree hash root.
### Example Response
```json
{
"root": "0x528e54ca5d4c957729a73f40fc513ae312e054c7295775c4a2b21f423416a72b",
"beacon_state": {
"genesis_time": 1575652800,
"genesis_validators_root": "0xa8a9226edee1b2627fb4117d7dea4996e64dec2998f37f6e824f74f2ce39a538",
"slot": 18478
}
}
```
_Truncated for brevity._
## `/beacon/state_root`
Returns the state root for the given slot in the canonical chain. If there
is a re-org, the same slot may return a different root.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/state_root`
Method | GET
JSON Encoding | Object
Query Parameters | `slot`
Typical Responses | 200, 404
## Parameters
- `slot` (`Slot`): the slot to be resolved to a root.
### Example Response
```json
"0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f"
```
## `/beacon/state/genesis`
Request that the node return a beacon chain state at genesis (slot 0).
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/state/genesis`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Returns
Returns an object containing the genesis
[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#beaconstate).
### Example Response
```json
{
"genesis_time": 1581576353,
"slot": 0,
"fork": {
"previous_version": "0x00000000",
"current_version": "0x00000000",
"epoch": 0
},
}
```
_Truncated for brevity._
## `/beacon/state/committees`
Request that the node return a beacon chain state at genesis (slot 0).
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/state/genesis`
Method | GET
JSON Encoding | Object
Query Parameters | `epoch`
Typical Responses | 200
### Returns
Returns an object containing the committees for a given epoch.
### Example Response
```json
[
{"slot":64,"index":0,"committee":[]},
{"slot":65,"index":0,"committee":[3]},
{"slot":66,"index":0,"committee":[]},
{"slot":67,"index":0,"committee":[14]},
{"slot":68,"index":0,"committee":[]},
{"slot":69,"index":0,"committee":[9]},
{"slot":70,"index":0,"committee":[]},
{"slot":71,"index":0,"committee":[11]},
{"slot":72,"index":0,"committee":[]},
{"slot":73,"index":0,"committee":[5]},
{"slot":74,"index":0,"committee":[]},
{"slot":75,"index":0,"committee":[15]},
{"slot":76,"index":0,"committee":[]},
{"slot":77,"index":0,"committee":[0]}
]
```
_Truncated for brevity._
## `/beacon/attester_slashing`
Accepts an `attester_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns a 400 error if the `attester_slashing` is invalid.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/attester_slashing`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200/400
### Parameters
Expects the following object in the POST request body:
```
{
attestation_1: {
attesting_indices: [u64],
data: {
slot: Slot,
index: u64,
beacon_block_root: Bytes32,
source: {
epoch: Epoch,
root: Bytes32
},
target: {
epoch: Epoch,
root: Bytes32
}
}
signature: Bytes32
},
attestation_2: {
attesting_indices: [u64],
data: {
slot: Slot,
index: u64,
beacon_block_root: Bytes32,
source: {
epoch: Epoch,
root: Bytes32
},
target: {
epoch: Epoch,
root: Bytes32
}
}
signature: Bytes32
}
}
```
### Returns
Returns `true` if the attester slashing was inserted successfully, or the corresponding error if it failed.
### Example
### Request Body
```json
{
"attestation_1": {
"attesting_indices": [0],
"data": {
"slot": 1,
"index": 0,
"beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000",
"source": {
"epoch": 1,
"root": "0x0000000000000000000000000000000000000000000000000100000000000000"
},
"target": {
"epoch": 1,
"root": "0x0000000000000000000000000000000000000000000000000100000000000000"
}
},
"signature": "0xb47f7397cd944b8d5856a13352166bbe74c85625a45b14b7347fc2c9f6f6f82acee674c65bc9ceb576fcf78387a6731c0b0eb3f8371c70db2da4e7f5dfbc451730c159d67263d3db56b6d0e009e4287a8ba3efcacac30b3ae3447e89dc71b5b9"
},
"attestation_2": {
"attesting_indices": [0],
"data": {
"slot": 1,
"index": 0,
"beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000",
"source": {
"epoch": 1,
"root": "0x0000000000000000000000000000000000000000000000000100000000000000"
},
"target": {
"epoch": 1,
"root": "0x0000000000000000000000000000000000000000000000000200000000000000"
}
},
"signature": "0x93fef587a63acf72aaf8df627718fd43cb268035764071f802ffb4370a2969d226595cc650f4c0bf2291ae0c0a41fcac1700f318603d75d34bcb4b9f4a8368f61eeea0e1f5d969d92d5073ba5fbadec102b45ec87d418d25168d2e3c74b9fcbb"
}
}
```
_Note: data sent here is for demonstration purposes only_
## `/beacon/proposer_slashing`
Accepts a `proposer_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns an 400 error if the `proposer_slashing` is invalid.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/beacon/proposer_slashing`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200/400
### Request Body
Expects the following object in the POST request body:
```
{
proposer_index: u64,
header_1: {
slot: Slot,
parent_root: Bytes32,
state_root: Bytes32,
body_root: Bytes32,
signature: Bytes32
},
header_2: {
slot: Slot,
parent_root: Bytes32,
state_root: Bytes32,
body_root: Bytes32,
signature: Bytes32
}
}
```
### Returns
Returns `true` if the proposer slashing was inserted successfully, or the corresponding error if it failed.
### Example
### Request Body
```json
{
"proposer_index": 0,
"header_1": {
"slot": 0,
"parent_root": "0x0101010101010101010101010101010101010101010101010101010101010101",
"state_root": "0x0101010101010101010101010101010101010101010101010101010101010101",
"body_root": "0x0101010101010101010101010101010101010101010101010101010101010101",
"signature": "0xb8970d1342c6d5779c700ec366efd0ca819937ca330960db3ca5a55eb370a3edd83f4cbb2f74d06e82f934fcbd4bb80609a19c2254cc8b3532a4efff9e80edf312ac735757c059d77126851e377f875593e64ba50d1dffe69a809a409202dd12"
},
"header_2": {
"slot": 0,
"parent_root": "0x0202020202020202020202020202020202020202020202020202020202020202",
"state_root": "0x0101010101010101010101010101010101010101010101010101010101010101",
"body_root": "0x0101010101010101010101010101010101010101010101010101010101010101",
"signature": "0xb60e6b348698a34e59b22e0af96f8809f977f00f95d52375383ade8d22e9102270a66c6d52b0434214897e11ca4896871510c01b3fd74d62108a855658d5705fcfc4ced5136264a1c6496f05918576926aa191b1ad311b7e27f5aa2167aba294"
}
}
```
_Note: data sent here is for demonstration purposes only_

View File

@ -1,182 +0,0 @@
# Lighthouse REST API: `/lighthouse`
The `/lighthouse` endpoints provide lighthouse-specific information about the beacon node.
## Endpoints
HTTP Path | Description |
| --- | -- |
[`/lighthouse/syncing`](#lighthousesyncing) | Get the node's syncing status
[`/lighthouse/peers`](#lighthousepeers) | Get the peers info known by the beacon node
[`/lighthouse/connected_peers`](#lighthousepeers) | Get the connected_peers known by the beacon node
## `/lighthouse/syncing`
Requests the syncing state of a Lighthouse beacon node. Lighthouse as a
custom sync protocol, this request gets Lighthouse-specific sync information.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/lighthouse/syncing`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
If the node is undergoing a finalization sync:
```json
{
"SyncingFinalized": {
"start_slot": 10,
"head_slot": 20,
"head_root":"0x74020d0e3c3c02d2ea6279d5760f7d0dd376c4924beaaec4d5c0cefd1c0c4465"
}
}
```
If the node is undergoing a head chain sync:
```json
{
"SyncingHead": {
"start_slot":0,
"head_slot":1195
}
}
```
If the node is synced
```json
{
"Synced"
}
```
## `/lighthouse/peers`
Get all known peers info from the beacon node.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/lighthouse/peers`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
[
{
"peer_id" : "16Uiu2HAmTEinipUS3haxqucrn7d7SmCKx5XzAVbAZCiNW54ncynG",
"peer_info" : {
"_status" : "Healthy",
"client" : {
"agent_string" : "github.com/libp2p/go-libp2p",
"kind" : "Prysm",
"os_version" : "unknown",
"protocol_version" : "ipfs/0.1.0",
"version" : "unknown"
},
"connection_status" : {
"Disconnected" : {
"since" : 3
}
},
"listening_addresses" : [
"/ip4/10.3.58.241/tcp/9001",
"/ip4/35.172.14.146/tcp/9001",
"/ip4/35.172.14.146/tcp/9001"
],
"meta_data" : {
"attnets" : "0x0000000000000000",
"seq_number" : 0
},
"reputation" : 20,
"sync_status" : {
"Synced" : {
"status_head_slot" : 18146
}
}
}
},
{
"peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ",
"peer_info" : {
"_status" : "Healthy",
"client" : {
"agent_string" : null,
"kind" : "Unknown",
"os_version" : "unknown",
"protocol_version" : "unknown",
"version" : "unknown"
},
"connection_status" : {
"Disconnected" : {
"since" : 5
}
},
"listening_addresses" : [],
"meta_data" : {
"attnets" : "0x0900000000000000",
"seq_number" : 0
},
"reputation" : 20,
"sync_status" : "Unknown"
}
},
]
```
## `/lighthouse/connected_peers`
Get all known peers info from the beacon node.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/lighthouse/connected_peers`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
[
{
"peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ",
"peer_info" : {
"_status" : "Healthy",
"client" : {
"agent_string" : null,
"kind" : "Unknown",
"os_version" : "unknown",
"protocol_version" : "unknown",
"version" : "unknown"
},
"connection_status" : {
"Connected" : {
"in" : 5,
"out" : 2
}
},
"listening_addresses" : [],
"meta_data" : {
"attnets" : "0x0900000000000000",
"seq_number" : 0
},
"reputation" : 20,
"sync_status" : "Unknown"
}
},
]
```

View File

@ -1,148 +0,0 @@
# Lighthouse REST API: `/network`
The `/network` endpoints provide information about the p2p network that
Lighthouse uses to communicate with other beacon nodes.
## Endpoints
HTTP Path | Description |
| --- | -- |
[`/network/enr`](#networkenr) | Get the local node's `ENR` as base64 .
[`/network/peer_count`](#networkpeer_count) | Get the count of connected peers.
[`/network/peer_id`](#networkpeer_id) | Get a node's libp2p `PeerId`.
[`/network/peers`](#networkpeers) | List a node's connected peers (as `PeerIds`).
[`/network/listen_port`](#networklisten_port) | Get a node's libp2p listening port.
[`/network/listen_addresses`](#networklisten_addresses) | Get a list of libp2p multiaddr the node is listening on.
## `network/enr`
Requests the beacon node for its listening `ENR` address.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/network/enr`
Method | GET
JSON Encoding | String (base64)
Query Parameters | None
Typical Responses | 200
### Example Response
```json
"-IW4QPYyGkXJSuJ2Eji8b-m4PTNrW4YMdBsNOBrYAdCk8NLMJcddAiQlpcv6G_hdNjiLACOPTkqTBhUjnC0wtIIhyQkEgmlwhKwqAPqDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhA1sBKo0yCfw4Z_jbggwflNfftjwKACu-a-CoFAQHJnrm"
```
## `/network/peer_count`
Requests the count of peers connected to the client.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/network/peer_count`
Method | GET
JSON Encoding | Number
Query Parameters | None
Typical Responses | 200
### Example Response
```json
5
```
## `/network/peer_id`
Requests the beacon node's local `PeerId`.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/network/peer_id`
Method | GET
JSON Encoding | String (base58)
Query Parameters | None
Typical Responses | 200
### Example Response
```json
"QmVFcULBYZecPdCKgGmpEYDqJLqvMecfhJadVBtB371Avd"
```
## `/network/peers`
Requests one `MultiAddr` for each peer connected to the beacon node.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/network/peers`
Method | GET
JSON Encoding | [String] (base58)
Query Parameters | None
Typical Responses | 200
### Example Response
```json
[
"QmaPGeXcfKFMU13d8VgbnnpeTxcvoFoD9bUpnRGMUJ1L9w",
"QmZt47cP8V96MgiS35WzHKpPbKVBMqr1eoBNTLhQPqpP3m"
]
```
## `/network/listen_port`
Requests the TCP port that the client's libp2p service is listening on.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/network/listen_port`
Method | GET
JSON Encoding | Number
Query Parameters | None
Typical Responses | 200
### Example Response
```json
9000
```
## `/network/listen_addresses`
Requests the list of multiaddr that the client's libp2p service is listening on.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/network/listen_addresses`
Method | GET
JSON Encoding | Array
Query Parameters | None
Typical Responses | 200
### Example Response
```json
[
"/ip4/127.0.0.1/tcp/9000",
"/ip4/192.168.31.115/tcp/9000",
"/ip4/172.24.0.1/tcp/9000",
"/ip4/172.21.0.1/tcp/9000",
"/ip4/172.17.0.1/tcp/9000",
"/ip4/172.18.0.1/tcp/9000",
"/ip4/172.19.0.1/tcp/9000",
"/ip4/172.42.0.1/tcp/9000",
"/ip6/::1/tcp/9000"
]
```

View File

@ -1,91 +0,0 @@
# Lighthouse REST API: `/node`
The `/node` endpoints provide information about the lighthouse beacon node.
## Endpoints
HTTP Path | Description |
| --- | -- |
[`/node/version`](#nodeversion) | Get the node's version.
[`/node/syncing`](#nodesyncing) | Get the node's syncing status.
[`/node/health`](#nodehealth) | Get the node's health.
## `/node/version`
Requests the beacon node's version.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/node/version`
Method | GET
JSON Encoding | String
Query Parameters | None
Typical Responses | 200
### Example Response
```json
"Lighthouse-0.2.0-unstable"
```
## `/node/syncing`
Requests the syncing status of the beacon node.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/node/syncing`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
{
is_syncing: true,
sync_status: {
starting_slot: 0,
current_slot: 100,
highest_slot: 200,
}
}
```
## `/node/health`
Requests information about the health of the beacon node.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/node/health`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
{
"pid": 96160,
"pid_num_threads": 30,
"pid_mem_resident_set_size": 55476224,
"pid_mem_virtual_memory_size": 2081382400,
"sys_virt_mem_total": 16721076224,
"sys_virt_mem_available": 7423197184,
"sys_virt_mem_used": 8450183168,
"sys_virt_mem_free": 3496345600,
"sys_virt_mem_percent": 55.605743,
"sys_loadavg_1": 1.56,
"sys_loadavg_5": 2.61,
"sys_loadavg_15": 2.43
}
```

View File

@ -1,154 +0,0 @@
# Lighthouse REST API: `/spec`
The `/spec` endpoints provide information about Eth2.0 specifications that the node is running.
## Endpoints
HTTP Path | Description |
| --- | -- |
[`/spec`](#spec) | Get the full spec object that a node's running.
[`/spec/slots_per_epoch`](#specslots_per_epoch) | Get the number of slots per epoch.
[`/spec/eth2_config`](#specseth2_config) | Get the full Eth2 config object.
## `/spec`
Requests the full spec object that a node's running.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/spec`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
{
"genesis_slot": 0,
"base_rewards_per_epoch": 4,
"deposit_contract_tree_depth": 32,
"max_committees_per_slot": 64,
"target_committee_size": 128,
"min_per_epoch_churn_limit": 4,
"churn_limit_quotient": 65536,
"shuffle_round_count": 90,
"min_genesis_active_validator_count": 16384,
"min_genesis_time": 1578009600,
"min_deposit_amount": 1000000000,
"max_effective_balance": 32000000000,
"ejection_balance": 16000000000,
"effective_balance_increment": 1000000000,
"genesis_fork_version": "0x00000000",
"bls_withdrawal_prefix_byte": "0x00",
"genesis_delay": 172800,
"milliseconds_per_slot": 12000,
"min_attestation_inclusion_delay": 1,
"min_seed_lookahead": 1,
"max_seed_lookahead": 4,
"min_epochs_to_inactivity_penalty": 4,
"min_validator_withdrawability_delay": 256,
"shard_committee_period": 2048,
"base_reward_factor": 64,
"whistleblower_reward_quotient": 512,
"proposer_reward_quotient": 8,
"inactivity_penalty_quotient": 33554432,
"min_slashing_penalty_quotient": 32,
"domain_beacon_proposer": 0,
"domain_beacon_attester": 1,
"domain_randao": 2,
"domain_deposit": 3,
"domain_voluntary_exit": 4,
"safe_slots_to_update_justified": 8,
"eth1_follow_distance": 1024,
"seconds_per_eth1_block": 14,
"boot_nodes": [],
"network_id": 1
}
```
## `/spec/eth2_config`
Requests the full `Eth2Config` object.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/spec/eth2_config`
Method | GET
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Example Response
```json
{
"spec_constants": "mainnet",
"spec": {
"genesis_slot": 0,
"base_rewards_per_epoch": 4,
"deposit_contract_tree_depth": 32,
"max_committees_per_slot": 64,
"target_committee_size": 128,
"min_per_epoch_churn_limit": 4,
"churn_limit_quotient": 65536,
"shuffle_round_count": 90,
"min_genesis_active_validator_count": 16384,
"min_genesis_time": 1578009600,
"min_deposit_amount": 1000000000,
"max_effective_balance": 32000000000,
"ejection_balance": 16000000000,
"effective_balance_increment": 1000000000,
"genesis_fork_version": "0x00000000",
"bls_withdrawal_prefix_byte": "0x00",
"genesis_delay": 172800,
"milliseconds_per_slot": 12000,
"min_attestation_inclusion_delay": 1,
"min_seed_lookahead": 1,
"max_seed_lookahead": 4,
"min_epochs_to_inactivity_penalty": 4,
"min_validator_withdrawability_delay": 256,
"shard_committee_period": 2048,
"base_reward_factor": 64,
"whistleblower_reward_quotient": 512,
"proposer_reward_quotient": 8,
"inactivity_penalty_quotient": 33554432,
"min_slashing_penalty_quotient": 32,
"domain_beacon_proposer": 0,
"domain_beacon_attester": 1,
"domain_randao": 2,
"domain_deposit": 3,
"domain_voluntary_exit": 4,
"safe_slots_to_update_justified": 8,
"eth1_follow_distance": 1024,
"seconds_per_eth1_block": 14,
"boot_nodes": [],
"network_id": 1
}
}
```
## `/spec/slots_per_epoch`
Requests the `SLOTS_PER_EPOCH` parameter from the specs that the node is running.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/spec/slots_per_epoch`
Method | GET
JSON Encoding | Number
Query Parameters | None
Typical Responses | 200
### Example Response
```json
32
```

View File

@ -1,545 +0,0 @@
# Lighthouse REST API: `/validator`
The `/validator` endpoints provide the minimum functionality required for a validator
client to connect to the beacon node and produce blocks and attestations.
## Endpoints
HTTP Path | HTTP Method | Description |
| - | - | ---- |
[`/validator/duties`](#validatorduties) | POST | Provides block and attestation production information for validators.
[`/validator/subscribe`](#validatorsubscribe) | POST | Subscribes a list of validators to the beacon node for a particular duty/slot.
[`/validator/duties/all`](#validatordutiesall) | GET |Provides block and attestation production information for all validators.
[`/validator/duties/active`](#validatordutiesactive) | GET | Provides block and attestation production information for all active validators.
[`/validator/block`](#validatorblock-get) | GET | Retrieves the current beacon block for the validator to publish.
[`/validator/block`](#validatorblock-post) | POST | Publishes a signed block to the network.
[`/validator/attestation`](#validatorattestation) | GET | Retrieves the current best attestation for a validator to publish.
[`/validator/aggregate_attestation`](#validatoraggregate_attestation) | GET | Gets an aggregate attestation for validators to sign and publish.
[`/validator/attestations`](#validatorattestations) | POST | Publishes a list of raw unaggregated attestations to their appropriate subnets.
[`/validator/aggregate_and_proofs`](#validatoraggregate_and_proofs) | POST | Publishes a list of Signed aggregate and proofs for validators who are aggregators.
## `/validator/duties`
Request information about when a validator must produce blocks and attestations
at some given `epoch`. The information returned always refers to the canonical
chain and the same input parameters may yield different results after a re-org.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/duties`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Request Body
Expects the following object in the POST request body:
```
{
epoch: Epoch,
pubkeys: [PublicKey]
}
```
Duties are assigned on a per-epoch basis, all duties returned will contain
slots that are inside the given `epoch`. A set of duties will be returned for
each of the `pubkeys`.
Validators who are not known to the beacon chain (e.g., have not yet deposited)
will have `null` values for most fields.
### Returns
A set of duties for each given pubkey.
### Example
#### Request Body
```json
{
"epoch": 1203,
"pubkeys": [
"0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16",
"0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42"
]
}
```
_Note: for demonstration purposes the second pubkey is some unknown pubkey._
#### Response Body
```json
[
{
"validator_pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16",
"validator_index": 14935,
"attestation_slot": 38511,
"attestation_committee_index": 3,
"attestation_committee_position": 39,
"block_proposal_slots": [],
"aggregator_modulo": 5,
},
{
"validator_pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42",
"validator_index": null,
"attestation_slot": null,
"attestation_committee_index": null,
"attestation_committee_position": null,
"block_proposal_slots": []
"aggregator_modulo": null,
}
]
```
## `/validator/duties/all`
Returns the duties for all validators, equivalent to calling [Validator
Duties](#validator-duties) while providing all known validator public keys.
Considering that duties for non-active validators will just be `null`, it is
generally more efficient to query using [Active Validator
Duties](#active-validator-duties).
This endpoint will only return validators that were in the beacon state
in the given epoch. For example, if the query epoch is 10 and some validator
deposit was included in epoch 11, that validator will not be included in the
result.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/duties/all`
Method | GET
JSON Encoding | Object
Query Parameters | `epoch`
Typical Responses | 200
### Parameters
The duties returned will all be inside the given `epoch` (`Epoch`) query
parameter. This parameter is required.
### Returns
The return format is identical to the [Validator Duties](#validator-duties) response body.
## `/validator/duties/active`
Returns the duties for all active validators, equivalent to calling [Validator
Duties](#validator-duties) while providing all known validator public keys that
are active in the given epoch.
This endpoint will only return validators that were in the beacon state
in the given epoch. For example, if the query epoch is 10 and some validator
deposit was included in epoch 11, that validator will not be included in the
result.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/duties/active`
Method | GET
JSON Encoding | Object
Query Parameters | `epoch`
Typical Responses | 200
### Parameters
The duties returned will all be inside the given `epoch` (`Epoch`) query
parameter. This parameter is required.
### Returns
The return format is identical to the [Validator Duties](#validator-duties) response body.
## `/validator/subscribe`
Posts a list of `ValidatorSubscription` to subscribe validators to
particular slots to perform attestation duties.
This informs the beacon node to search for peers and subscribe to
required attestation subnets to perform the attestation duties required.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/subscribe`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Request Body
Expects the following object in the POST request body:
```
[
{
validator_index: 10,
attestation_committee_index: 12,
slot: 3,
is_aggregator: true
}
]
```
The `is_aggregator` informs the beacon node if the validator is an aggregator
for this slot/committee.
### Returns
A null object on success and an error indicating any failures.
## `/validator/block` GET
Produces and returns an unsigned `BeaconBlock` object.
The block will be produced with the given `slot` and the parent block will be the
highest block in the canonical chain that has a slot less than `slot`. The
block will still be produced if some other block is also known to be at `slot`
(i.e., it may produce a block that would be slashable if signed).
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/block`
Method | GET
JSON Encoding | Object
Query Parameters | `slot`, `randao_reveal`
Typical Responses | 200
### Parameters
- `slot` (`Slot`): The slot number for which the block is to be produced.
- `randao_reveal` (`Signature`): 96 bytes `Signature` for the randomness.
### Returns
Returns a `BeaconBlock` object.
#### Response Body
```json
{
"slot": 33,
"parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912",
"state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26",
"body": {
"randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f",
"eth1_data": {
"deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925",
"deposit_count": 8,
"block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e"
},
"graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365",
"proposer_slashings": [],
"attester_slashings": [],
"attestations": [],
"deposits": [],
"voluntary_exits": []
}
}
```
## `/validator/block` POST
Accepts a `SignedBeaconBlock` for verification. If it is valid, it will be
imported into the local database and published on the network. Invalid blocks
will not be published to the network.
A block may be considered invalid because it is fundamentally incorrect, or its
parent has not yet been imported.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/block`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200/202
### Request Body
Expects a JSON encoded `SignedBeaconBlock` in the POST request body:
### Returns
Returns a null object if the block passed all block validation and is published to the network.
Else, returns a processing error description.
### Example
### Request Body
```json
{
"message": {
"slot": 33,
"parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912",
"state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26",
"body": {
"randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f",
"eth1_data": {
"deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925",
"deposit_count": 8,
"block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e"
},
"graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365",
"proposer_slashings": [
],
"attester_slashings": [
],
"attestations": [
],
"deposits": [
],
"voluntary_exits": [
]
}
},
"signature": "0x965ced900dbabd0a78b81a0abb5d03407be0d38762104316416347f2ea6f82652b5759396f402e85df8ee18ba2c60145037c73b1c335f4272f1751a1cd89862b7b4937c035e350d0108554bd4a8930437ec3311c801a65fe8e5ba022689b5c24"
}
```
## `/validator/attestation`
Produces and returns an unsigned `Attestation` from the current state.
The attestation will reference the `beacon_block_root` of the highest block in
the canonical chain with a slot equal to or less than the given `slot`.
An error will be returned if the given slot is more than
`SLOTS_PER_HISTORICAL_VECTOR` slots behind the current head block.
This endpoint is not protected against slashing. Signing the returned
attestation may result in a slashable offence.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/attestation`
Method | GET
JSON Encoding | Object
Query Parameters | `slot`, `committee_index`
Typical Responses | 200
### Parameters
- `slot` (`Slot`): The slot number for which the attestation is to be produced.
- `committee_index` (`CommitteeIndex`): The index of the committee that makes the attestation.
### Returns
Returns a `Attestation` object with a default signature. The `signature` field should be replaced by the valid signature.
#### Response Body
```json
{
"aggregation_bits": "0x01",
"data": {
"slot": 100,
"index": 0,
"beacon_block_root": "0xf22e4ec281136d119eabcd4d9d248aeacd042eb63d8d7642f73ad3e71f1c9283",
"source": {
"epoch": 2,
"root": "0x34c1244535c923f08e7f83170d41a076e4f1ec61013846b3a615a1d109d3c329"
},
"target": {
"epoch": 3,
"root": "0xaefd23b384994dc0c1a6b77836bdb2f24f209ebfe6c4819324d9685f4a43b4e1"
}
},
"signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
```
## `/validator/aggregate_attestation`
Requests an `AggregateAttestation` from the beacon node that has a
specific `attestation.data`. If no aggregate attestation is known this will
return a null object.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/aggregate_attestation`
Method | GET
JSON Encoding | Object
Query Parameters | `attestation_data`
Typical Responses | 200
### Returns
Returns a null object if the attestation data passed is not known to the beacon
node.
### Example
### Request Body
```json
{
"aggregation_bits": "0x03",
"data": {
"slot": 3,
"index": 0,
"beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9",
"source": {
"epoch": 0,
"root": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"target": {
"epoch": 0,
"root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd"
}
},
"signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f"
}
```
## `/validator/attestations`
Accepts a list of `Attestation` for verification. If they are valid, they will be imported
into the local database and published to the network. Invalid attestations will
not be published to the network.
An attestation may be considered invalid because it is fundamentally incorrect
or because the beacon node has not imported the relevant blocks required to
verify it.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/attestations`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200/202
### Request Body
Expects a JSON encoded list of signed `Attestation` objects in the POST request body. In
accordance with the naive aggregation scheme, the attestation _must_ have
exactly one of the `attestation.aggregation_bits` fields set.
### Returns
Returns a null object if the attestation passed all validation and is published to the network.
Else, returns a processing error description.
### Example
### Request Body
```json
{
"aggregation_bits": "0x03",
"data": {
"slot": 3,
"index": 0,
"beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9",
"source": {
"epoch": 0,
"root": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"target": {
"epoch": 0,
"root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd"
}
},
"signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f"
}
```
## `/validator/aggregate_and_proofs`
Accepts a list of `SignedAggregateAndProof` for publication. If they are valid
(the validator is an aggregator and the signatures can be verified) these
are published to the network on the global aggregate gossip topic.
### HTTP Specification
| Property | Specification |
| --- |--- |
Path | `/validator/aggregate_and_proofs`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200/202
### Request Body
Expects a JSON encoded list of `SignedAggregateAndProof` objects in the POST request body.
### Returns
Returns a null object if the attestation passed all validation and is published to the network.
Else, returns a processing error description.
### Example
### Request Body
```json
[
{
"message": {
"aggregator_index": 12,
"aggregate": {
"aggregation_bits": "0x03",
"data": {
"slot": 3,
"index": 0,
"beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9",
"source": {
"epoch": 0,
"root": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"target": {
"epoch": 0,
"root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd"
}
},
"signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f"
},
"selection_proof": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f"
}
signature: "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f"
}
]
```
_Note: The data in this request is for demonstrating types and does not
contain real data_

View File

@ -1,16 +1,21 @@
# Lighthouse REST API: `/consensus` # Validator Inclusion APIs
The `/consensus` endpoints provide information on results of the proof-of-stake The `/lighthouse/validator_inclusion` API endpoints provide information on
voting process used for finality/justification under Casper FFG. results of the proof-of-stake voting process used for finality/justification
under Casper FFG.
These endpoints are not stable or included in the Eth2 standard API. As such,
they are subject to change or removal without a change in major release
version.
## Endpoints ## Endpoints
HTTP Path | Description | HTTP Path | Description |
| --- | -- | | --- | -- |
[`/consensus/global_votes`](#consensusglobal_votes) | A global vote count for a given epoch. [`/lighthouse/validator_inclusion/{epoch}/global`](#global) | A global vote count for a given epoch.
[`/consensus/individual_votes`](#consensusindividual_votes) | A per-validator breakdown of votes in a given epoch. [`/lighthouse/validator_inclusion/{epoch}/{validator_id}`](#individual) | A per-validator breakdown of votes in a given epoch.
## `/consensus/global_votes` ## Global
Returns a global count of votes for some given `epoch`. The results are included Returns a global count of votes for some given `epoch`. The results are included
both for the current and previous (`epoch - 1`) epochs since both are required both for the current and previous (`epoch - 1`) epochs since both are required
@ -75,40 +80,27 @@ voting upon the previous epoch included in a block.
When this value is greater than or equal to `2/3` it is possible that the When this value is greater than or equal to `2/3` it is possible that the
beacon chain may justify and/or finalize the epoch. beacon chain may justify and/or finalize the epoch.
### HTTP Specification ### HTTP Example
| Property | Specification | ```bash
| --- |--- | curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/global" -H "accept: application/json" | jq
Path | `/consensus/global_votes` ```
Method | GET
JSON Encoding | Object
Query Parameters | `epoch`
Typical Responses | 200
### Parameters
Requires the `epoch` (`Epoch`) query parameter to determine which epoch will be
considered the current epoch.
### Returns
A report on global validator voting participation.
### Example
```json ```json
{ {
"current_epoch_active_gwei": 52377600000000, "data": {
"previous_epoch_active_gwei": 52377600000000, "current_epoch_active_gwei": 642688000000000,
"current_epoch_attesting_gwei": 50740900000000, "previous_epoch_active_gwei": 642688000000000,
"current_epoch_target_attesting_gwei": 49526000000000, "current_epoch_attesting_gwei": 366208000000000,
"previous_epoch_attesting_gwei": 52377600000000, "current_epoch_target_attesting_gwei": 366208000000000,
"previous_epoch_target_attesting_gwei": 51063400000000, "previous_epoch_attesting_gwei": 1000000000,
"previous_epoch_head_attesting_gwei": 9248600000000 "previous_epoch_target_attesting_gwei": 1000000000,
"previous_epoch_head_attesting_gwei": 1000000000
}
} }
``` ```
## `/consensus/individual_votes` ## Individual
Returns a per-validator summary of how that validator performed during the Returns a per-validator summary of how that validator performed during the
current epoch. current epoch.
@ -117,73 +109,26 @@ The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of t
individual values, please see it for definitions of terms like "current_epoch", individual values, please see it for definitions of terms like "current_epoch",
"previous_epoch" and "target_attester". "previous_epoch" and "target_attester".
### HTTP Specification
| Property | Specification | ### HTTP Example
| --- |--- |
Path | `/consensus/individual_votes`
Method | POST
JSON Encoding | Object
Query Parameters | None
Typical Responses | 200
### Request Body
Expects the following object in the POST request body:
```bash
curl -X GET "http://localhost:5052/lighthouse/validator_inclusion/0/42" -H "accept: application/json" | jq
``` ```
{
epoch: Epoch,
pubkeys: [PublicKey]
}
```
### Returns
A report on the validators voting participation.
### Example
#### Request Body
```json ```json
{ {
"epoch": 1203, "data": {
"pubkeys": [
"0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16",
"0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42"
]
}
```
_Note: for demonstration purposes the second pubkey is some unknown pubkey._
#### Response Body
```json
[
{
"epoch": 1203,
"pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16",
"validator_index": 14935,
"vote": {
"is_slashed": false, "is_slashed": false,
"is_withdrawable_in_current_epoch": false, "is_withdrawable_in_current_epoch": false,
"is_active_in_current_epoch": true, "is_active_in_current_epoch": true,
"is_active_in_previous_epoch": true, "is_active_in_previous_epoch": true,
"current_epoch_effective_balance_gwei": 3200000000, "current_epoch_effective_balance_gwei": 32000000000,
"is_current_epoch_attester": true, "is_current_epoch_attester": false,
"is_current_epoch_target_attester": true, "is_current_epoch_target_attester": false,
"is_previous_epoch_attester": true, "is_previous_epoch_attester": false,
"is_previous_epoch_target_attester": true, "is_previous_epoch_target_attester": false,
"is_previous_epoch_head_attester": false "is_previous_epoch_head_attester": false
} }
}, }
{
"epoch": 1203,
"pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42",
"validator_index": null,
"vote": null
}
]
``` ```

View File

@ -1,111 +0,0 @@
# Websocket API
**Note: the WebSocket server _only_ emits events. It does not accept any
requests. Use the [HTTP API](./http.md) for requests.**
By default, a Lighthouse `beacon_node` exposes a websocket server on `localhost:5053`.
The following CLI flags control the websocket server:
- `--no-ws`: disable the websocket server.
- `--ws-port`: specify the listen port of the server.
- `--ws-address`: specify the listen address of the server.
All clients connected to the websocket server will receive the same stream of events, all triggered
by the `BeaconChain`. Each event is a JSON object with the following schema:
```json
{
"event": "string",
"data": "object"
}
```
## Events
The following events may be emitted:
### Beacon Head Changed
Occurs whenever the canonical head of the beacon chain changes.
```json
{
"event": "beacon_head_changed",
"data": {
"reorg": "boolean",
"current_head_beacon_block_root": "string",
"previous_head_beacon_block_root": "string"
}
}
```
### Beacon Finalization
Occurs whenever the finalized checkpoint of the canonical head changes.
```json
{
"event": "beacon_finalization",
"data": {
"epoch": "number",
"root": "string"
}
}
```
### Beacon Block Imported
Occurs whenever the beacon node imports a valid block.
```json
{
"event": "beacon_block_imported",
"data": {
"block": "object"
}
}
```
### Beacon Block Rejected
Occurs whenever the beacon node rejects a block because it is invalid or an
error occurred during validation.
```json
{
"event": "beacon_block_rejected",
"data": {
"reason": "string",
"block": "object"
}
}
```
### Beacon Attestation Imported
Occurs whenever the beacon node imports a valid attestation.
```json
{
"event": "beacon_attestation_imported",
"data": {
"attestation": "object"
}
}
```
### Beacon Attestation Rejected
Occurs whenever the beacon node rejects an attestation because it is invalid or
an error occurred during validation.
```json
{
"event": "beacon_attestation_rejected",
"data": {
"reason": "string",
"attestation": "object"
}
}
```

25
common/eth2/Cargo.toml Normal file
View File

@ -0,0 +1,25 @@
[package]
name = "eth2"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
serde = { version = "1.0.110", features = ["derive"] }
serde_json = "1.0.52"
types = { path = "../../consensus/types" }
hex = "0.4.2"
reqwest = { version = "0.10.8", features = ["json"] }
eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" }
proto_array = { path = "../../consensus/proto_array", optional = true }
serde_utils = { path = "../../consensus/serde_utils" }
[target.'cfg(target_os = "linux")'.dependencies]
psutil = { version = "3.1.0", optional = true }
procinfo = { version = "0.4.2", optional = true }
[features]
default = ["lighthouse"]
lighthouse = ["proto_array", "psutil", "procinfo"]

784
common/eth2/src/lib.rs Normal file
View File

@ -0,0 +1,784 @@
//! This crate provides two major things:
//!
//! 1. The types served by the `http_api` crate.
//! 2. A wrapper around `reqwest` that forms a HTTP client, able of consuming the endpoints served
//! by the `http_api` crate.
//!
//! Eventually it would be ideal to publish this crate on crates.io, however we have some local
//! dependencies preventing this presently.
#[cfg(feature = "lighthouse")]
pub mod lighthouse;
pub mod types;
use self::types::*;
use reqwest::{IntoUrl, Response};
use serde::{de::DeserializeOwned, Serialize};
use std::convert::TryFrom;
use std::fmt;
pub use reqwest;
pub use reqwest::{StatusCode, Url};
#[derive(Debug)]
pub enum Error {
/// The `reqwest` client raised an error.
Reqwest(reqwest::Error),
/// The server returned an error message where the body was able to be parsed.
ServerMessage(ErrorMessage),
/// The server returned an error message where the body was unable to be parsed.
StatusCode(StatusCode),
/// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`.
InvalidUrl(Url),
}
impl Error {
/// If the error has a HTTP status code, return it.
pub fn status(&self) -> Option<StatusCode> {
match self {
Error::Reqwest(error) => error.status(),
Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(),
Error::StatusCode(status) => Some(*status),
Error::InvalidUrl(_) => None,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a
/// Lighthouse Beacon Node HTTP server (`http_api`).
#[derive(Clone)]
pub struct BeaconNodeHttpClient {
client: reqwest::Client,
server: Url,
}
impl BeaconNodeHttpClient {
pub fn new(server: Url) -> Self {
Self {
client: reqwest::Client::new(),
server,
}
}
pub fn from_components(server: Url, client: reqwest::Client) -> Self {
Self { client, server }
}
/// Return the path with the standard `/eth1/v1` prefix applied.
fn eth_path(&self) -> Result<Url, Error> {
let mut path = self.server.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("eth")
.push("v1");
Ok(path)
}
/// Perform a HTTP GET request.
async fn get<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<T, Error> {
let response = self.client.get(url).send().await.map_err(Error::Reqwest)?;
ok_or_error(response)
.await?
.json()
.await
.map_err(Error::Reqwest)
}
/// Perform a HTTP GET request, returning `None` on a 404 error.
async fn get_opt<T: DeserializeOwned, U: IntoUrl>(&self, url: U) -> Result<Option<T>, Error> {
let response = self.client.get(url).send().await.map_err(Error::Reqwest)?;
match ok_or_error(response).await {
Ok(resp) => resp.json().await.map(Option::Some).map_err(Error::Reqwest),
Err(err) => {
if err.status() == Some(StatusCode::NOT_FOUND) {
Ok(None)
} else {
Err(err)
}
}
}
}
/// Perform a HTTP POST request.
async fn post<T: Serialize, U: IntoUrl>(&self, url: U, body: &T) -> Result<(), Error> {
let response = self
.client
.post(url)
.json(body)
.send()
.await
.map_err(Error::Reqwest)?;
ok_or_error(response).await?;
Ok(())
}
/// `GET beacon/genesis`
///
/// ## Errors
///
/// May return a `404` if beacon chain genesis has not yet occurred.
pub async fn get_beacon_genesis(&self) -> Result<GenericResponse<GenesisData>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("genesis");
self.get(path).await
}
/// `GET beacon/states/{state_id}/root`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_states_root(
&self,
state_id: StateId,
) -> Result<Option<GenericResponse<RootData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("states")
.push(&state_id.to_string())
.push("root");
self.get_opt(path).await
}
/// `GET beacon/states/{state_id}/fork`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_states_fork(
&self,
state_id: StateId,
) -> Result<Option<GenericResponse<Fork>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("states")
.push(&state_id.to_string())
.push("fork");
self.get_opt(path).await
}
/// `GET beacon/states/{state_id}/finality_checkpoints`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_states_finality_checkpoints(
&self,
state_id: StateId,
) -> Result<Option<GenericResponse<FinalityCheckpointsData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("states")
.push(&state_id.to_string())
.push("finality_checkpoints");
self.get_opt(path).await
}
/// `GET beacon/states/{state_id}/validators`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_states_validators(
&self,
state_id: StateId,
) -> Result<Option<GenericResponse<Vec<ValidatorData>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("states")
.push(&state_id.to_string())
.push("validators");
self.get_opt(path).await
}
/// `GET beacon/states/{state_id}/committees?slot,index`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_states_committees(
&self,
state_id: StateId,
epoch: Epoch,
slot: Option<Slot>,
index: Option<u64>,
) -> Result<Option<GenericResponse<Vec<CommitteeData>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("states")
.push(&state_id.to_string())
.push("committees")
.push(&epoch.to_string());
if let Some(slot) = slot {
path.query_pairs_mut()
.append_pair("slot", &slot.to_string());
}
if let Some(index) = index {
path.query_pairs_mut()
.append_pair("index", &index.to_string());
}
self.get_opt(path).await
}
/// `GET beacon/states/{state_id}/validators/{validator_id}`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_states_validator_id(
&self,
state_id: StateId,
validator_id: &ValidatorId,
) -> Result<Option<GenericResponse<ValidatorData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("states")
.push(&state_id.to_string())
.push("validators")
.push(&validator_id.to_string());
self.get_opt(path).await
}
/// `GET beacon/headers?slot,parent_root`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_headers(
&self,
slot: Option<Slot>,
parent_root: Option<Hash256>,
) -> Result<Option<GenericResponse<Vec<BlockHeaderData>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("headers");
if let Some(slot) = slot {
path.query_pairs_mut()
.append_pair("slot", &slot.to_string());
}
if let Some(root) = parent_root {
path.query_pairs_mut()
.append_pair("parent_root", &format!("{:?}", root));
}
self.get_opt(path).await
}
/// `GET beacon/headers/{block_id}`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_headers_block_id(
&self,
block_id: BlockId,
) -> Result<Option<GenericResponse<BlockHeaderData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("headers")
.push(&block_id.to_string());
self.get_opt(path).await
}
/// `POST beacon/blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blocks<T: EthSpec>(
&self,
block: &SignedBeaconBlock<T>,
) -> Result<(), Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("blocks");
self.post(path, block).await?;
Ok(())
}
/// `GET beacon/blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_blocks<T: EthSpec>(
&self,
block_id: BlockId,
) -> Result<Option<GenericResponse<SignedBeaconBlock<T>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("blocks")
.push(&block_id.to_string());
self.get_opt(path).await
}
/// `GET beacon/blocks/{block_id}/root`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_blocks_root(
&self,
block_id: BlockId,
) -> Result<Option<GenericResponse<RootData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("blocks")
.push(&block_id.to_string())
.push("root");
self.get_opt(path).await
}
/// `GET beacon/blocks/{block_id}/attestations`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn get_beacon_blocks_attestations<T: EthSpec>(
&self,
block_id: BlockId,
) -> Result<Option<GenericResponse<Vec<Attestation<T>>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("blocks")
.push(&block_id.to_string())
.push("attestations");
self.get_opt(path).await
}
/// `POST beacon/pool/attestations`
pub async fn post_beacon_pool_attestations<T: EthSpec>(
&self,
attestation: &Attestation<T>,
) -> Result<(), Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("attestations");
self.post(path, attestation).await?;
Ok(())
}
/// `GET beacon/pool/attestations`
pub async fn get_beacon_pool_attestations<T: EthSpec>(
&self,
) -> Result<GenericResponse<Vec<Attestation<T>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("attestations");
self.get(path).await
}
/// `POST beacon/pool/attester_slashings`
pub async fn post_beacon_pool_attester_slashings<T: EthSpec>(
&self,
slashing: &AttesterSlashing<T>,
) -> Result<(), Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("attester_slashings");
self.post(path, slashing).await?;
Ok(())
}
/// `GET beacon/pool/attester_slashings`
pub async fn get_beacon_pool_attester_slashings<T: EthSpec>(
&self,
) -> Result<GenericResponse<Vec<AttesterSlashing<T>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("attester_slashings");
self.get(path).await
}
/// `POST beacon/pool/proposer_slashings`
pub async fn post_beacon_pool_proposer_slashings(
&self,
slashing: &ProposerSlashing,
) -> Result<(), Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("proposer_slashings");
self.post(path, slashing).await?;
Ok(())
}
/// `GET beacon/pool/proposer_slashings`
pub async fn get_beacon_pool_proposer_slashings(
&self,
) -> Result<GenericResponse<Vec<ProposerSlashing>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("proposer_slashings");
self.get(path).await
}
/// `POST beacon/pool/voluntary_exits`
pub async fn post_beacon_pool_voluntary_exits(
&self,
exit: &SignedVoluntaryExit,
) -> Result<(), Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("voluntary_exits");
self.post(path, exit).await?;
Ok(())
}
/// `GET beacon/pool/voluntary_exits`
pub async fn get_beacon_pool_voluntary_exits(
&self,
) -> Result<GenericResponse<Vec<SignedVoluntaryExit>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("pool")
.push("voluntary_exits");
self.get(path).await
}
/// `GET config/fork_schedule`
pub async fn get_config_fork_schedule(&self) -> Result<GenericResponse<Vec<Fork>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("config")
.push("fork_schedule");
self.get(path).await
}
/// `GET config/fork_schedule`
pub async fn get_config_spec(&self) -> Result<GenericResponse<YamlConfig>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("config")
.push("spec");
self.get(path).await
}
/// `GET config/deposit_contract`
pub async fn get_config_deposit_contract(
&self,
) -> Result<GenericResponse<DepositContractData>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("config")
.push("deposit_contract");
self.get(path).await
}
/// `GET node/version`
pub async fn get_node_version(&self) -> Result<GenericResponse<VersionData>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("node")
.push("version");
self.get(path).await
}
/// `GET node/syncing`
pub async fn get_node_syncing(&self) -> Result<GenericResponse<SyncingData>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("node")
.push("syncing");
self.get(path).await
}
/// `GET debug/beacon/states/{state_id}`
pub async fn get_debug_beacon_states<T: EthSpec>(
&self,
state_id: StateId,
) -> Result<Option<GenericResponse<BeaconState<T>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("debug")
.push("beacon")
.push("states")
.push(&state_id.to_string());
self.get_opt(path).await
}
/// `GET debug/beacon/heads`
pub async fn get_debug_beacon_heads(
&self,
) -> Result<GenericResponse<Vec<ChainHeadData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("debug")
.push("beacon")
.push("heads");
self.get(path).await
}
/// `GET validator/duties/attester/{epoch}?index`
///
/// ## Note
///
/// The `index` query parameter accepts a list of validator indices.
pub async fn get_validator_duties_attester(
&self,
epoch: Epoch,
index: Option<&[u64]>,
) -> Result<GenericResponse<Vec<AttesterData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("duties")
.push("attester")
.push(&epoch.to_string());
if let Some(index) = index {
let string = index
.iter()
.map(|i| i.to_string())
.collect::<Vec<_>>()
.join(",");
path.query_pairs_mut().append_pair("index", &string);
}
self.get(path).await
}
/// `GET validator/duties/proposer/{epoch}`
pub async fn get_validator_duties_proposer(
&self,
epoch: Epoch,
) -> Result<GenericResponse<Vec<ProposerData>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("duties")
.push("proposer")
.push(&epoch.to_string());
self.get(path).await
}
/// `GET validator/duties/attester/{epoch}?index`
///
/// ## Note
///
/// The `index` query parameter accepts a list of validator indices.
pub async fn get_validator_blocks<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: SignatureBytes,
graffiti: Option<&Graffiti>,
) -> Result<GenericResponse<BeaconBlock<T>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("blocks")
.push(&slot.to_string());
path.query_pairs_mut()
.append_pair("randao_reveal", &randao_reveal.to_string());
if let Some(graffiti) = graffiti {
path.query_pairs_mut()
.append_pair("graffiti", &graffiti.to_string());
}
self.get(path).await
}
/// `GET validator/attestation_data?slot,committee_index`
pub async fn get_validator_attestation_data(
&self,
slot: Slot,
committee_index: CommitteeIndex,
) -> Result<GenericResponse<AttestationData>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("attestation_data");
path.query_pairs_mut()
.append_pair("slot", &slot.to_string())
.append_pair("committee_index", &committee_index.to_string());
self.get(path).await
}
/// `GET validator/attestation_attestation?slot,attestation_data_root`
pub async fn get_validator_aggregate_attestation<T: EthSpec>(
&self,
slot: Slot,
attestation_data_root: Hash256,
) -> Result<Option<GenericResponse<Attestation<T>>>, Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("aggregate_attestation");
path.query_pairs_mut()
.append_pair("slot", &slot.to_string())
.append_pair(
"attestation_data_root",
&format!("{:?}", attestation_data_root),
);
self.get_opt(path).await
}
/// `POST validator/aggregate_and_proofs`
pub async fn post_validator_aggregate_and_proof<T: EthSpec>(
&self,
aggregate: &SignedAggregateAndProof<T>,
) -> Result<(), Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("aggregate_and_proofs");
self.post(path, aggregate).await?;
Ok(())
}
/// `POST validator/beacon_committee_subscriptions`
pub async fn post_validator_beacon_committee_subscriptions(
&self,
subscriptions: &[BeaconCommitteeSubscription],
) -> Result<(), Error> {
let mut path = self.eth_path()?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("beacon_committee_subscriptions");
self.post(path, &subscriptions).await?;
Ok(())
}
}
/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an
/// appropriate error message.
async fn ok_or_error(response: Response) -> Result<Response, Error> {
let status = response.status();
if status == StatusCode::OK {
Ok(response)
} else if let Ok(message) = response.json().await {
Err(Error::ServerMessage(message))
} else {
Err(Error::StatusCode(status))
}
}

View File

@ -0,0 +1,224 @@
//! This module contains endpoints that are non-standard and only available on Lighthouse servers.
use crate::{
types::{Epoch, EthSpec, GenericResponse, ValidatorId},
BeaconNodeHttpClient, Error,
};
use proto_array::core::ProtoArray;
use serde::{Deserialize, Serialize};
pub use eth2_libp2p::{types::SyncState, PeerInfo};
/// Information returned by `peers` and `connected_peers`.
// TODO: this should be deserializable..
#[derive(Debug, Clone, Serialize)]
#[serde(bound = "T: EthSpec")]
pub struct Peer<T: EthSpec> {
/// The Peer's ID
pub peer_id: String,
/// The PeerInfo associated with the peer.
pub peer_info: PeerInfo<T>,
}
/// The results of validators voting during an epoch.
///
/// Provides information about the current and previous epochs.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct GlobalValidatorInclusionData {
/// The total effective balance of all active validators during the _current_ epoch.
pub current_epoch_active_gwei: u64,
/// The total effective balance of all active validators during the _previous_ epoch.
pub previous_epoch_active_gwei: u64,
/// The total effective balance of all validators who attested during the _current_ epoch.
pub current_epoch_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _current_ epoch and
/// agreed with the state about the beacon block at the first slot of the _current_ epoch.
pub current_epoch_target_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch.
pub previous_epoch_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
pub previous_epoch_target_attesting_gwei: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the time of attestation.
pub previous_epoch_head_attesting_gwei: u64,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ValidatorInclusionData {
/// True if the validator has been slashed, ever.
pub is_slashed: bool,
/// True if the validator can withdraw in the current epoch.
pub is_withdrawable_in_current_epoch: bool,
/// True if the validator was active in the state's _current_ epoch.
pub is_active_in_current_epoch: bool,
/// True if the validator was active in the state's _previous_ epoch.
pub is_active_in_previous_epoch: bool,
/// The validator's effective balance in the _current_ epoch.
pub current_epoch_effective_balance_gwei: u64,
/// True if the validator had an attestation included in the _current_ epoch.
pub is_current_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _current_
/// epoch matches the block root known to the state.
pub is_current_epoch_target_attester: bool,
/// True if the validator had an attestation included in the _previous_ epoch.
pub is_previous_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _previous_
/// epoch matches the block root known to the state.
pub is_previous_epoch_target_attester: bool,
/// True if the validator's beacon block root attestation in the _previous_ epoch at the
/// attestation's slot (`attestation_data.slot`) matches the block root known to the state.
pub is_previous_epoch_head_attester: bool,
}
#[cfg(target_os = "linux")]
use {procinfo::pid, psutil::process::Process};
/// Reports on the health of the Lighthouse instance.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Health {
/// The pid of this process.
pub pid: u32,
/// The number of threads used by this pid.
pub pid_num_threads: i32,
/// The total resident memory used by this pid.
pub pid_mem_resident_set_size: u64,
/// The total virtual memory used by this pid.
pub pid_mem_virtual_memory_size: u64,
/// Total virtual memory on the system
pub sys_virt_mem_total: u64,
/// Total virtual memory available for new processes.
pub sys_virt_mem_available: u64,
/// Total virtual memory used on the system
pub sys_virt_mem_used: u64,
/// Total virtual memory not used on the system
pub sys_virt_mem_free: u64,
/// Percentage of virtual memory used on the system
pub sys_virt_mem_percent: f32,
/// System load average over 1 minute.
pub sys_loadavg_1: f64,
/// System load average over 5 minutes.
pub sys_loadavg_5: f64,
/// System load average over 15 minutes.
pub sys_loadavg_15: f64,
}
impl Health {
#[cfg(not(target_os = "linux"))]
pub fn observe() -> Result<Self, String> {
Err("Health is only available on Linux".into())
}
#[cfg(target_os = "linux")]
pub fn observe() -> Result<Self, String> {
let process =
Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?;
let process_mem = process
.memory_info()
.map_err(|e| format!("Unable to get process memory info: {:?}", e))?;
let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?;
let vm = psutil::memory::virtual_memory()
.map_err(|e| format!("Unable to get virtual memory: {:?}", e))?;
let loadavg =
psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?;
Ok(Self {
pid: process.pid(),
pid_num_threads: stat.num_threads,
pid_mem_resident_set_size: process_mem.rss(),
pid_mem_virtual_memory_size: process_mem.vms(),
sys_virt_mem_total: vm.total(),
sys_virt_mem_available: vm.available(),
sys_virt_mem_used: vm.used(),
sys_virt_mem_free: vm.free(),
sys_virt_mem_percent: vm.percent(),
sys_loadavg_1: loadavg.one,
sys_loadavg_5: loadavg.five,
sys_loadavg_15: loadavg.fifteen,
})
}
}
impl BeaconNodeHttpClient {
/// `GET lighthouse/health`
pub async fn get_lighthouse_health(&self) -> Result<GenericResponse<Health>, Error> {
let mut path = self.server.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("health");
self.get(path).await
}
/// `GET lighthouse/syncing`
pub async fn get_lighthouse_syncing(&self) -> Result<GenericResponse<SyncState>, Error> {
let mut path = self.server.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("syncing");
self.get(path).await
}
/*
* Note:
*
* The `lighthouse/peers` endpoints do not have functions here. We are yet to implement
* `Deserialize` on the `PeerInfo` struct since it contains use of `Instant`. This could be
* fairly simply achieved, if desired.
*/
/// `GET lighthouse/proto_array`
pub async fn get_lighthouse_proto_array(&self) -> Result<GenericResponse<ProtoArray>, Error> {
let mut path = self.server.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("proto_array");
self.get(path).await
}
/// `GET lighthouse/validator_inclusion/{epoch}/global`
pub async fn get_lighthouse_validator_inclusion_global(
&self,
epoch: Epoch,
) -> Result<GenericResponse<GlobalValidatorInclusionData>, Error> {
let mut path = self.server.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("validator_inclusion")
.push(&epoch.to_string())
.push("global");
self.get(path).await
}
/// `GET lighthouse/validator_inclusion/{epoch}/{validator_id}`
pub async fn get_lighthouse_validator_inclusion(
&self,
epoch: Epoch,
validator_id: ValidatorId,
) -> Result<GenericResponse<Option<ValidatorInclusionData>>, Error> {
let mut path = self.server.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("validator_inclusion")
.push(&epoch.to_string())
.push(&validator_id.to_string());
self.get(path).await
}
}

432
common/eth2/src/types.rs Normal file
View File

@ -0,0 +1,432 @@
//! This module exposes a superset of the `types` crate. It adds additional types that are only
//! required for the HTTP API.
use eth2_libp2p::{Enr, Multiaddr};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use std::fmt;
use std::str::FromStr;
pub use types::*;
/// An API error serializable to JSON.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ErrorMessage {
pub code: u16,
pub message: String,
#[serde(default)]
pub stacktraces: Vec<String>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct GenesisData {
#[serde(with = "serde_utils::quoted_u64")]
pub genesis_time: u64,
pub genesis_validators_root: Hash256,
#[serde(with = "serde_utils::bytes_4_hex")]
pub genesis_fork_version: [u8; 4],
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum BlockId {
Head,
Genesis,
Finalized,
Justified,
Slot(Slot),
Root(Hash256),
}
impl FromStr for BlockId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"head" => Ok(BlockId::Head),
"genesis" => Ok(BlockId::Genesis),
"finalized" => Ok(BlockId::Finalized),
"justified" => Ok(BlockId::Justified),
other => {
if other.starts_with("0x") {
Hash256::from_str(&s[2..])
.map(BlockId::Root)
.map_err(|e| format!("{} cannot be parsed as a root", e))
} else {
u64::from_str(s)
.map(Slot::new)
.map(BlockId::Slot)
.map_err(|_| format!("{} cannot be parsed as a parameter", s))
}
}
}
}
}
impl fmt::Display for BlockId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BlockId::Head => write!(f, "head"),
BlockId::Genesis => write!(f, "genesis"),
BlockId::Finalized => write!(f, "finalized"),
BlockId::Justified => write!(f, "justified"),
BlockId::Slot(slot) => write!(f, "{}", slot),
BlockId::Root(root) => write!(f, "{:?}", root),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum StateId {
Head,
Genesis,
Finalized,
Justified,
Slot(Slot),
Root(Hash256),
}
impl FromStr for StateId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"head" => Ok(StateId::Head),
"genesis" => Ok(StateId::Genesis),
"finalized" => Ok(StateId::Finalized),
"justified" => Ok(StateId::Justified),
other => {
if other.starts_with("0x") {
Hash256::from_str(&s[2..])
.map(StateId::Root)
.map_err(|e| format!("{} cannot be parsed as a root", e))
} else {
u64::from_str(s)
.map(Slot::new)
.map(StateId::Slot)
.map_err(|_| format!("{} cannot be parsed as a slot", s))
}
}
}
}
}
impl fmt::Display for StateId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
StateId::Head => write!(f, "head"),
StateId::Genesis => write!(f, "genesis"),
StateId::Finalized => write!(f, "finalized"),
StateId::Justified => write!(f, "justified"),
StateId::Slot(slot) => write!(f, "{}", slot),
StateId::Root(root) => write!(f, "{:?}", root),
}
}
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")]
pub struct GenericResponse<T: Serialize + serde::de::DeserializeOwned> {
pub data: T,
}
impl<T: Serialize + serde::de::DeserializeOwned> From<T> for GenericResponse<T> {
fn from(data: T) -> Self {
Self { data }
}
}
#[derive(Debug, PartialEq, Clone, Serialize)]
#[serde(bound = "T: Serialize")]
pub struct GenericResponseRef<'a, T: Serialize> {
pub data: &'a T,
}
impl<'a, T: Serialize> From<&'a T> for GenericResponseRef<'a, T> {
fn from(data: &'a T) -> Self {
Self { data }
}
}
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub struct RootData {
pub root: Hash256,
}
impl From<Hash256> for RootData {
fn from(root: Hash256) -> Self {
Self { root }
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct FinalityCheckpointsData {
pub previous_justified: Checkpoint,
pub current_justified: Checkpoint,
pub finalized: Checkpoint,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ValidatorId {
PublicKey(PublicKeyBytes),
Index(u64),
}
impl FromStr for ValidatorId {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.starts_with("0x") {
PublicKeyBytes::from_str(s)
.map(ValidatorId::PublicKey)
.map_err(|e| format!("{} cannot be parsed as a public key: {}", s, e))
} else {
u64::from_str(s)
.map(ValidatorId::Index)
.map_err(|e| format!("{} cannot be parsed as a slot: {}", s, e))
}
}
}
impl fmt::Display for ValidatorId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ValidatorId::PublicKey(pubkey) => write!(f, "{:?}", pubkey),
ValidatorId::Index(index) => write!(f, "{}", index),
}
}
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ValidatorData {
#[serde(with = "serde_utils::quoted_u64")]
pub index: u64,
#[serde(with = "serde_utils::quoted_u64")]
pub balance: u64,
pub status: ValidatorStatus,
pub validator: Validator,
}
// TODO: This does not currently match the spec, but I'm going to try and change the spec using
// this proposal:
//
// https://hackmd.io/bQxMDRt1RbS1TLno8K4NPg?view
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
pub enum ValidatorStatus {
Unknown,
WaitingForEligibility,
WaitingForFinality,
WaitingInQueue,
StandbyForActive(Epoch),
Active,
ActiveAwaitingVoluntaryExit(Epoch),
ActiveAwaitingSlashedExit(Epoch),
ExitedVoluntarily(Epoch),
ExitedSlashed(Epoch),
Withdrawable,
Withdrawn,
}
impl ValidatorStatus {
pub fn from_validator(
validator_opt: Option<&Validator>,
epoch: Epoch,
finalized_epoch: Epoch,
far_future_epoch: Epoch,
) -> Self {
if let Some(validator) = validator_opt {
if validator.is_withdrawable_at(epoch) {
ValidatorStatus::Withdrawable
} else if validator.is_exited_at(epoch) {
if validator.slashed {
ValidatorStatus::ExitedSlashed(validator.withdrawable_epoch)
} else {
ValidatorStatus::ExitedVoluntarily(validator.withdrawable_epoch)
}
} else if validator.is_active_at(epoch) {
if validator.exit_epoch < far_future_epoch {
if validator.slashed {
ValidatorStatus::ActiveAwaitingSlashedExit(validator.exit_epoch)
} else {
ValidatorStatus::ActiveAwaitingVoluntaryExit(validator.exit_epoch)
}
} else {
ValidatorStatus::Active
}
} else if validator.activation_epoch < far_future_epoch {
ValidatorStatus::StandbyForActive(validator.activation_epoch)
} else if validator.activation_eligibility_epoch < far_future_epoch {
if finalized_epoch < validator.activation_eligibility_epoch {
ValidatorStatus::WaitingForFinality
} else {
ValidatorStatus::WaitingInQueue
}
} else {
ValidatorStatus::WaitingForEligibility
}
} else {
ValidatorStatus::Unknown
}
}
}
#[derive(Serialize, Deserialize)]
pub struct CommitteesQuery {
pub slot: Option<Slot>,
pub index: Option<u64>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct CommitteeData {
#[serde(with = "serde_utils::quoted_u64")]
pub index: u64,
pub slot: Slot,
#[serde(with = "serde_utils::quoted_u64_vec")]
pub validators: Vec<u64>,
}
#[derive(Serialize, Deserialize)]
pub struct HeadersQuery {
pub slot: Option<Slot>,
pub parent_root: Option<Hash256>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct BlockHeaderAndSignature {
pub message: BeaconBlockHeader,
pub signature: SignatureBytes,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct BlockHeaderData {
pub root: Hash256,
pub canonical: bool,
pub header: BlockHeaderAndSignature,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct DepositContractData {
#[serde(with = "serde_utils::quoted_u64")]
pub chain_id: u64,
pub address: Address,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ChainHeadData {
pub slot: Slot,
pub root: Hash256,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct IdentityData {
pub peer_id: String,
pub enr: Enr,
pub p2p_addresses: Vec<Multiaddr>,
// TODO: missing the following fields:
//
// - discovery_addresses
// - metadata
//
// Tracked here: https://github.com/sigp/lighthouse/issues/1434
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct VersionData {
pub version: String,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct SyncingData {
pub is_syncing: bool,
pub head_slot: Slot,
pub sync_distance: Slot,
}
#[derive(Clone, PartialEq, Debug, Deserialize)]
#[serde(try_from = "String", bound = "T: FromStr")]
pub struct QueryVec<T: FromStr>(pub Vec<T>);
impl<T: FromStr> TryFrom<String> for QueryVec<T> {
type Error = String;
fn try_from(string: String) -> Result<Self, Self::Error> {
if string == "" {
return Ok(Self(vec![]));
}
string
.split(',')
.map(|s| s.parse().map_err(|_| "unable to parse".to_string()))
.collect::<Result<Vec<T>, String>>()
.map(Self)
}
}
#[derive(Clone, Deserialize)]
pub struct ValidatorDutiesQuery {
pub index: Option<QueryVec<u64>>,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct AttesterData {
pub pubkey: PublicKeyBytes,
#[serde(with = "serde_utils::quoted_u64")]
pub validator_index: u64,
#[serde(with = "serde_utils::quoted_u64")]
pub committees_at_slot: u64,
#[serde(with = "serde_utils::quoted_u64")]
pub committee_index: CommitteeIndex,
#[serde(with = "serde_utils::quoted_u64")]
pub committee_length: u64,
#[serde(with = "serde_utils::quoted_u64")]
pub validator_committee_index: u64,
pub slot: Slot,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ProposerData {
pub pubkey: PublicKeyBytes,
pub slot: Slot,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ValidatorBlocksQuery {
pub randao_reveal: SignatureBytes,
pub graffiti: Option<Graffiti>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ValidatorAttestationDataQuery {
pub slot: Slot,
pub committee_index: CommitteeIndex,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ValidatorAggregateAttestationQuery {
pub attestation_data_root: Hash256,
pub slot: Slot,
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct BeaconCommitteeSubscription {
#[serde(with = "serde_utils::quoted_u64")]
pub validator_index: u64,
#[serde(with = "serde_utils::quoted_u64")]
pub committee_index: u64,
#[serde(with = "serde_utils::quoted_u64")]
pub committees_at_slot: u64,
pub slot: Slot,
pub is_aggregator: bool,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn query_vec() {
assert_eq!(
QueryVec::try_from("0,1,2".to_string()).unwrap(),
QueryVec(vec![0_u64, 1, 2])
);
}
}

View File

@ -55,6 +55,7 @@
//! ``` //! ```
use prometheus::{HistogramOpts, HistogramTimer, Opts}; use prometheus::{HistogramOpts, HistogramTimer, Opts};
use std::time::Duration;
pub use prometheus::{ pub use prometheus::{
Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, Encoder, Gauge, GaugeVec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge,
@ -221,6 +222,19 @@ pub fn start_timer(histogram: &Result<Histogram>) -> Option<HistogramTimer> {
} }
} }
/// Starts a timer on `vec` with the given `name`.
pub fn observe_timer_vec(vec: &Result<HistogramVec>, name: &[&str], duration: Duration) {
// This conversion was taken from here:
//
// https://docs.rs/prometheus/0.5.0/src/prometheus/histogram.rs.html#550-555
let nanos = f64::from(duration.subsec_nanos()) / 1e9;
let secs = duration.as_secs() as f64 + nanos;
if let Some(h) = get_histogram(vec, name) {
h.observe(secs)
}
}
/// Stops a timer created with `start_timer(..)`. /// Stops a timer created with `start_timer(..)`.
pub fn stop_timer(timer: Option<HistogramTimer>) { pub fn stop_timer(timer: Option<HistogramTimer>) {
if let Some(t) = timer { if let Some(t) = timer {

View File

@ -1,21 +0,0 @@
[package]
name = "remote_beacon_node"
version = "0.2.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
reqwest = { version = "0.10.4", features = ["json", "native-tls-vendored"] }
url = "2.1.1"
serde = "1.0.110"
futures = "0.3.5"
types = { path = "../../consensus/types" }
rest_types = { path = "../rest_types" }
hex = "0.4.2"
eth2_ssz = "0.1.2"
serde_json = "1.0.52"
eth2_config = { path = "../eth2_config" }
proto_array = { path = "../../consensus/proto_array" }
operation_pool = { path = "../../beacon_node/operation_pool" }

View File

@ -1,732 +0,0 @@
//! Provides a `RemoteBeaconNode` which interacts with a HTTP API on another Lighthouse (or
//! compatible) instance.
//!
//! Presently, this is only used for testing but it _could_ become a user-facing library.
use eth2_config::Eth2Config;
use reqwest::{Client, ClientBuilder, Response, StatusCode};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use ssz::Encode;
use std::marker::PhantomData;
use std::time::Duration;
use types::{
Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex,
Epoch, EthSpec, Fork, Graffiti, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes,
Signature, SignedAggregateAndProof, SignedBeaconBlock, Slot, SubnetId,
};
use url::Url;
pub use operation_pool::PersistedOperationPool;
pub use proto_array::core::ProtoArray;
pub use rest_types::{
CanonicalHeadResponse, Committee, HeadBeaconBlock, Health, IndividualVotesRequest,
IndividualVotesResponse, SyncingResponse, ValidatorDutiesRequest, ValidatorDutyBytes,
ValidatorRequest, ValidatorResponse, ValidatorSubscription,
};
// Setting a long timeout for debug ensures that crypto-heavy operations can still succeed.
#[cfg(debug_assertions)]
pub const REQUEST_TIMEOUT_SECONDS: u64 = 15;
#[cfg(not(debug_assertions))]
pub const REQUEST_TIMEOUT_SECONDS: u64 = 5;
#[derive(Clone)]
/// Connects to a remote Lighthouse (or compatible) node via HTTP.
pub struct RemoteBeaconNode<E: EthSpec> {
pub http: HttpClient<E>,
}
impl<E: EthSpec> RemoteBeaconNode<E> {
/// Uses the default HTTP timeout.
pub fn new(http_endpoint: String) -> Result<Self, String> {
Self::new_with_timeout(http_endpoint, Duration::from_secs(REQUEST_TIMEOUT_SECONDS))
}
pub fn new_with_timeout(http_endpoint: String, timeout: Duration) -> Result<Self, String> {
Ok(Self {
http: HttpClient::new(http_endpoint, timeout)
.map_err(|e| format!("Unable to create http client: {:?}", e))?,
})
}
}
#[derive(Debug)]
pub enum Error {
/// Unable to parse a URL. Check the server URL.
UrlParseError(url::ParseError),
/// The `reqwest` library returned an error.
ReqwestError(reqwest::Error),
/// There was an error when encoding/decoding an object using serde.
SerdeJsonError(serde_json::Error),
/// The server responded to the request, however it did not return a 200-type success code.
DidNotSucceed { status: StatusCode, body: String },
/// The request input was invalid.
InvalidInput,
}
#[derive(Clone)]
pub struct HttpClient<E> {
client: Client,
url: Url,
timeout: Duration,
_phantom: PhantomData<E>,
}
impl<E: EthSpec> HttpClient<E> {
/// Creates a new instance (without connecting to the node).
pub fn new(server_url: String, timeout: Duration) -> Result<Self, Error> {
Ok(Self {
client: ClientBuilder::new()
.timeout(timeout)
.build()
.expect("should build from static configuration"),
url: Url::parse(&server_url)?,
timeout: Duration::from_secs(15),
_phantom: PhantomData,
})
}
pub fn beacon(&self) -> Beacon<E> {
Beacon(self.clone())
}
pub fn validator(&self) -> Validator<E> {
Validator(self.clone())
}
pub fn spec(&self) -> Spec<E> {
Spec(self.clone())
}
pub fn node(&self) -> Node<E> {
Node(self.clone())
}
pub fn advanced(&self) -> Advanced<E> {
Advanced(self.clone())
}
pub fn consensus(&self) -> Consensus<E> {
Consensus(self.clone())
}
fn url(&self, path: &str) -> Result<Url, Error> {
self.url.join(path).map_err(|e| e.into())
}
pub async fn json_post<T: Serialize>(&self, url: Url, body: T) -> Result<Response, Error> {
self.client
.post(&url.to_string())
.json(&body)
.send()
.await
.map_err(Error::from)
}
pub async fn json_get<T: DeserializeOwned>(
&self,
mut url: Url,
query_pairs: Vec<(String, String)>,
) -> Result<T, Error> {
query_pairs.into_iter().for_each(|(key, param)| {
url.query_pairs_mut().append_pair(&key, &param);
});
let response = self
.client
.get(&url.to_string())
.send()
.await
.map_err(Error::from)?;
let success = error_for_status(response).await.map_err(Error::from)?;
success.json::<T>().await.map_err(Error::from)
}
}
/// Returns an `Error` (with a description) if the `response` was not a 200-type success response.
///
/// Distinct from `Response::error_for_status` because it includes the body of the response as
/// text. This ensures the error message from the server is not discarded.
async fn error_for_status(response: Response) -> Result<Response, Error> {
let status = response.status();
if status.is_success() {
Ok(response)
} else {
let text_result = response.text().await;
match text_result {
Err(e) => Err(Error::ReqwestError(e)),
Ok(body) => Err(Error::DidNotSucceed { status, body }),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum PublishStatus {
/// The object was valid and has been published to the network.
Valid,
/// The object was not valid and may or may not have been published to the network.
Invalid(String),
/// The server responded with an unknown status code. The object may or may not have been
/// published to the network.
Unknown,
}
impl PublishStatus {
/// Returns `true` if `*self == PublishStatus::Valid`.
pub fn is_valid(&self) -> bool {
*self == PublishStatus::Valid
}
}
/// Provides the functions on the `/validator` endpoint of the node.
#[derive(Clone)]
pub struct Validator<E>(HttpClient<E>);
impl<E: EthSpec> Validator<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("validator/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
/// Produces an unsigned attestation.
pub async fn produce_attestation(
&self,
slot: Slot,
committee_index: CommitteeIndex,
) -> Result<Attestation<E>, Error> {
let query_params = vec![
("slot".into(), format!("{}", slot)),
("committee_index".into(), format!("{}", committee_index)),
];
let client = self.0.clone();
let url = self.url("attestation")?;
client.json_get(url, query_params).await
}
/// Produces an aggregate attestation.
pub async fn produce_aggregate_attestation(
&self,
attestation_data: &AttestationData,
) -> Result<Attestation<E>, Error> {
let query_params = vec![(
"attestation_data".into(),
as_ssz_hex_string(attestation_data),
)];
let client = self.0.clone();
let url = self.url("aggregate_attestation")?;
client.json_get(url, query_params).await
}
/// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network.
pub async fn publish_attestations(
&self,
attestation: Vec<(Attestation<E>, SubnetId)>,
) -> Result<PublishStatus, Error> {
let client = self.0.clone();
let url = self.url("attestations")?;
let response = client.json_post::<_>(url, attestation).await?;
match response.status() {
StatusCode::OK => Ok(PublishStatus::Valid),
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
response.text().await.map_err(Error::from)?,
)),
_ => response
.error_for_status()
.map_err(Error::from)
.map(|_| PublishStatus::Unknown),
}
}
/// Posts a list of signed aggregates and proofs to the beacon node, expecting it to verify it and publish it to the network.
pub async fn publish_aggregate_and_proof(
&self,
signed_aggregate_and_proofs: Vec<SignedAggregateAndProof<E>>,
) -> Result<PublishStatus, Error> {
let client = self.0.clone();
let url = self.url("aggregate_and_proofs")?;
let response = client
.json_post::<_>(url, signed_aggregate_and_proofs)
.await?;
match response.status() {
StatusCode::OK => Ok(PublishStatus::Valid),
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
response.text().await.map_err(Error::from)?,
)),
_ => response
.error_for_status()
.map_err(Error::from)
.map(|_| PublishStatus::Unknown),
}
}
/// Returns the duties required of the given validator pubkeys in the given epoch.
pub async fn get_duties(
&self,
epoch: Epoch,
validator_pubkeys: &[PublicKey],
) -> Result<Vec<ValidatorDutyBytes>, Error> {
let client = self.0.clone();
let bulk_request = ValidatorDutiesRequest {
epoch,
pubkeys: validator_pubkeys
.iter()
.map(|pubkey| pubkey.clone().into())
.collect(),
};
let url = self.url("duties")?;
let response = client.json_post::<_>(url, bulk_request).await?;
let success = error_for_status(response).await.map_err(Error::from)?;
success.json().await.map_err(Error::from)
}
/// Posts a block to the beacon node, expecting it to verify it and publish it to the network.
pub async fn publish_block(&self, block: SignedBeaconBlock<E>) -> Result<PublishStatus, Error> {
let client = self.0.clone();
let url = self.url("block")?;
let response = client.json_post::<_>(url, block).await?;
match response.status() {
StatusCode::OK => Ok(PublishStatus::Valid),
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
response.text().await.map_err(Error::from)?,
)),
_ => response
.error_for_status()
.map_err(Error::from)
.map(|_| PublishStatus::Unknown),
}
}
/// Requests a new (unsigned) block from the beacon node.
pub async fn produce_block(
&self,
slot: Slot,
randao_reveal: Signature,
graffiti: Option<Graffiti>,
) -> Result<BeaconBlock<E>, Error> {
let client = self.0.clone();
let url = self.url("block")?;
let mut query_pairs = vec![
("slot".into(), format!("{}", slot.as_u64())),
("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)),
];
if let Some(graffiti_bytes) = graffiti {
query_pairs.push(("graffiti".into(), as_ssz_hex_string(&graffiti_bytes)));
}
client.json_get::<BeaconBlock<E>>(url, query_pairs).await
}
/// Subscribes a list of validators to particular slots for attestation production/publication.
pub async fn subscribe(
&self,
subscriptions: Vec<ValidatorSubscription>,
) -> Result<PublishStatus, Error> {
let client = self.0.clone();
let url = self.url("subscribe")?;
let response = client.json_post::<_>(url, subscriptions).await?;
match response.status() {
StatusCode::OK => Ok(PublishStatus::Valid),
StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(
response.text().await.map_err(Error::from)?,
)),
_ => response
.error_for_status()
.map_err(Error::from)
.map(|_| PublishStatus::Unknown),
}
}
}
/// Provides the functions on the `/beacon` endpoint of the node.
#[derive(Clone)]
pub struct Beacon<E>(HttpClient<E>);
impl<E: EthSpec> Beacon<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("beacon/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
/// Returns the genesis time.
pub async fn get_genesis_time(&self) -> Result<u64, Error> {
let client = self.0.clone();
let url = self.url("genesis_time")?;
client.json_get(url, vec![]).await
}
/// Returns the genesis validators root.
pub async fn get_genesis_validators_root(&self) -> Result<Hash256, Error> {
let client = self.0.clone();
let url = self.url("genesis_validators_root")?;
client.json_get(url, vec![]).await
}
/// Returns the fork at the head of the beacon chain.
pub async fn get_fork(&self) -> Result<Fork, Error> {
let client = self.0.clone();
let url = self.url("fork")?;
client.json_get(url, vec![]).await
}
/// Returns info about the head of the canonical beacon chain.
pub async fn get_head(&self) -> Result<CanonicalHeadResponse, Error> {
let client = self.0.clone();
let url = self.url("head")?;
client.json_get::<CanonicalHeadResponse>(url, vec![]).await
}
/// Returns the set of known beacon chain head blocks. One of these will be the canonical head.
pub async fn get_heads(&self) -> Result<Vec<HeadBeaconBlock>, Error> {
let client = self.0.clone();
let url = self.url("heads")?;
client.json_get(url, vec![]).await
}
/// Returns the block and block root at the given slot.
pub async fn get_block_by_slot(
&self,
slot: Slot,
) -> Result<(SignedBeaconBlock<E>, Hash256), Error> {
self.get_block("slot".to_string(), format!("{}", slot.as_u64()))
.await
}
/// Returns the block and block root at the given root.
pub async fn get_block_by_root(
&self,
root: Hash256,
) -> Result<(SignedBeaconBlock<E>, Hash256), Error> {
self.get_block("root".to_string(), root_as_string(root))
.await
}
/// Returns the block and block root at the given slot.
async fn get_block(
&self,
query_key: String,
query_param: String,
) -> Result<(SignedBeaconBlock<E>, Hash256), Error> {
let client = self.0.clone();
let url = self.url("block")?;
client
.json_get::<BlockResponse<E>>(url, vec![(query_key, query_param)])
.await
.map(|response| (response.beacon_block, response.root))
}
/// Returns the state and state root at the given slot.
pub async fn get_state_by_slot(&self, slot: Slot) -> Result<(BeaconState<E>, Hash256), Error> {
self.get_state("slot".to_string(), format!("{}", slot.as_u64()))
.await
}
/// Returns the state and state root at the given root.
pub async fn get_state_by_root(
&self,
root: Hash256,
) -> Result<(BeaconState<E>, Hash256), Error> {
self.get_state("root".to_string(), root_as_string(root))
.await
}
/// Returns the root of the state at the given slot.
pub async fn get_state_root(&self, slot: Slot) -> Result<Hash256, Error> {
let client = self.0.clone();
let url = self.url("state_root")?;
client
.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))])
.await
}
/// Returns the root of the block at the given slot.
pub async fn get_block_root(&self, slot: Slot) -> Result<Hash256, Error> {
let client = self.0.clone();
let url = self.url("block_root")?;
client
.json_get(url, vec![("slot".into(), format!("{}", slot.as_u64()))])
.await
}
/// Returns the state and state root at the given slot.
async fn get_state(
&self,
query_key: String,
query_param: String,
) -> Result<(BeaconState<E>, Hash256), Error> {
let client = self.0.clone();
let url = self.url("state")?;
client
.json_get::<StateResponse<E>>(url, vec![(query_key, query_param)])
.await
.map(|response| (response.beacon_state, response.root))
}
/// Returns the block and block root at the given slot.
///
/// If `state_root` is `Some`, the query will use the given state instead of the default
/// canonical head state.
pub async fn get_validators(
&self,
validator_pubkeys: Vec<PublicKey>,
state_root: Option<Hash256>,
) -> Result<Vec<ValidatorResponse>, Error> {
let client = self.0.clone();
let bulk_request = ValidatorRequest {
state_root,
pubkeys: validator_pubkeys
.iter()
.map(|pubkey| pubkey.clone().into())
.collect(),
};
let url = self.url("validators")?;
let response = client.json_post::<_>(url, bulk_request).await?;
let success = error_for_status(response).await.map_err(Error::from)?;
success.json().await.map_err(Error::from)
}
/// Returns all validators.
///
/// If `state_root` is `Some`, the query will use the given state instead of the default
/// canonical head state.
pub async fn get_all_validators(
&self,
state_root: Option<Hash256>,
) -> Result<Vec<ValidatorResponse>, Error> {
let client = self.0.clone();
let query_params = if let Some(state_root) = state_root {
vec![("state_root".into(), root_as_string(state_root))]
} else {
vec![]
};
let url = self.url("validators/all")?;
client.json_get(url, query_params).await
}
/// Returns the active validators.
///
/// If `state_root` is `Some`, the query will use the given state instead of the default
/// canonical head state.
pub async fn get_active_validators(
&self,
state_root: Option<Hash256>,
) -> Result<Vec<ValidatorResponse>, Error> {
let client = self.0.clone();
let query_params = if let Some(state_root) = state_root {
vec![("state_root".into(), root_as_string(state_root))]
} else {
vec![]
};
let url = self.url("validators/active")?;
client.json_get(url, query_params).await
}
/// Returns committees at the given epoch.
pub async fn get_committees(&self, epoch: Epoch) -> Result<Vec<Committee>, Error> {
let client = self.0.clone();
let url = self.url("committees")?;
client
.json_get(url, vec![("epoch".into(), format!("{}", epoch.as_u64()))])
.await
}
pub async fn proposer_slashing(
&self,
proposer_slashing: ProposerSlashing,
) -> Result<bool, Error> {
let client = self.0.clone();
let url = self.url("proposer_slashing")?;
let response = client.json_post::<_>(url, proposer_slashing).await?;
let success = error_for_status(response).await.map_err(Error::from)?;
success.json().await.map_err(Error::from)
}
pub async fn attester_slashing(
&self,
attester_slashing: AttesterSlashing<E>,
) -> Result<bool, Error> {
let client = self.0.clone();
let url = self.url("attester_slashing")?;
let response = client.json_post::<_>(url, attester_slashing).await?;
let success = error_for_status(response).await.map_err(Error::from)?;
success.json().await.map_err(Error::from)
}
}
/// Provides the functions on the `/spec` endpoint of the node.
#[derive(Clone)]
pub struct Spec<E>(HttpClient<E>);
impl<E: EthSpec> Spec<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("spec/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
pub async fn get_eth2_config(&self) -> Result<Eth2Config, Error> {
let client = self.0.clone();
let url = self.url("eth2_config")?;
client.json_get(url, vec![]).await
}
}
/// Provides the functions on the `/node` endpoint of the node.
#[derive(Clone)]
pub struct Node<E>(HttpClient<E>);
impl<E: EthSpec> Node<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("node/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
pub async fn get_version(&self) -> Result<String, Error> {
let client = self.0.clone();
let url = self.url("version")?;
client.json_get(url, vec![]).await
}
pub async fn get_health(&self) -> Result<Health, Error> {
let client = self.0.clone();
let url = self.url("health")?;
client.json_get(url, vec![]).await
}
pub async fn syncing_status(&self) -> Result<SyncingResponse, Error> {
let client = self.0.clone();
let url = self.url("syncing")?;
client.json_get(url, vec![]).await
}
}
/// Provides the functions on the `/advanced` endpoint of the node.
#[derive(Clone)]
pub struct Advanced<E>(HttpClient<E>);
impl<E: EthSpec> Advanced<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("advanced/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
/// Gets the core `ProtoArray` struct from the node.
pub async fn get_fork_choice(&self) -> Result<ProtoArray, Error> {
let client = self.0.clone();
let url = self.url("fork_choice")?;
client.json_get(url, vec![]).await
}
/// Gets the core `PersistedOperationPool` struct from the node.
pub async fn get_operation_pool(&self) -> Result<PersistedOperationPool<E>, Error> {
let client = self.0.clone();
let url = self.url("operation_pool")?;
client.json_get(url, vec![]).await
}
}
/// Provides the functions on the `/consensus` endpoint of the node.
#[derive(Clone)]
pub struct Consensus<E>(HttpClient<E>);
impl<E: EthSpec> Consensus<E> {
fn url(&self, path: &str) -> Result<Url, Error> {
self.0
.url("consensus/")
.and_then(move |url| url.join(path).map_err(Error::from))
.map_err(Into::into)
}
/// Gets a `IndividualVote` for each of the given `pubkeys`.
pub async fn get_individual_votes(
&self,
epoch: Epoch,
pubkeys: Vec<PublicKeyBytes>,
) -> Result<IndividualVotesResponse, Error> {
let client = self.0.clone();
let req_body = IndividualVotesRequest { epoch, pubkeys };
let url = self.url("individual_votes")?;
let response = client.json_post::<_>(url, req_body).await?;
let success = error_for_status(response).await.map_err(Error::from)?;
success.json().await.map_err(Error::from)
}
/// Gets a `VoteCount` for the given `epoch`.
pub async fn get_vote_count(&self, epoch: Epoch) -> Result<IndividualVotesResponse, Error> {
let client = self.0.clone();
let query_params = vec![("epoch".into(), format!("{}", epoch.as_u64()))];
let url = self.url("vote_count")?;
client.json_get(url, query_params).await
}
}
#[derive(Deserialize)]
#[serde(bound = "T: EthSpec")]
pub struct BlockResponse<T: EthSpec> {
pub beacon_block: SignedBeaconBlock<T>,
pub root: Hash256,
}
#[derive(Deserialize)]
#[serde(bound = "T: EthSpec")]
pub struct StateResponse<T: EthSpec> {
pub beacon_state: BeaconState<T>,
pub root: Hash256,
}
fn root_as_string(root: Hash256) -> String {
format!("0x{:?}", root)
}
fn as_ssz_hex_string<T: Encode>(item: &T) -> String {
format!("0x{}", hex::encode(item.as_ssz_bytes()))
}
impl From<reqwest::Error> for Error {
fn from(e: reqwest::Error) -> Error {
Error::ReqwestError(e)
}
}
impl From<url::ParseError> for Error {
fn from(e: url::ParseError) -> Error {
Error::UrlParseError(e)
}
}
impl From<serde_json::Error> for Error {
fn from(e: serde_json::Error) -> Error {
Error::SerdeJsonError(e)
}
}

View File

@ -1,27 +0,0 @@
[package]
name = "rest_types"
version = "0.2.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"
[dependencies]
types = { path = "../../consensus/types" }
eth2_ssz_derive = "0.1.0"
eth2_ssz = "0.1.2"
eth2_hashing = "0.1.0"
tree_hash = "0.1.0"
state_processing = { path = "../../consensus/state_processing" }
bls = { path = "../../crypto/bls" }
serde = { version = "1.0.110", features = ["derive"] }
rayon = "1.3.0"
hyper = "0.13.5"
tokio = { version = "0.2.21", features = ["sync"] }
environment = { path = "../../lighthouse/environment" }
store = { path = "../../beacon_node/store" }
beacon_chain = { path = "../../beacon_node/beacon_chain" }
serde_json = "1.0.52"
serde_yaml = "0.8.11"
[target.'cfg(target_os = "linux")'.dependencies]
psutil = "3.1.0"
procinfo = "0.4.2"

View File

@ -1,99 +0,0 @@
use hyper::{Body, Response, StatusCode};
use std::error::Error as StdError;
#[derive(PartialEq, Debug, Clone)]
pub enum ApiError {
MethodNotAllowed(String),
ServerError(String),
NotImplemented(String),
BadRequest(String),
NotFound(String),
UnsupportedType(String),
ImATeapot(String), // Just in case.
ProcessingError(String), // A 202 error, for when a block/attestation cannot be processed, but still transmitted.
InvalidHeaderValue(String),
}
pub type ApiResult = Result<Response<Body>, ApiError>;
impl ApiError {
pub fn status_code(self) -> (StatusCode, String) {
match self {
ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc),
ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc),
ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc),
ApiError::BadRequest(desc) => (StatusCode::BAD_REQUEST, desc),
ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc),
ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc),
ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc),
ApiError::ProcessingError(desc) => (StatusCode::ACCEPTED, desc),
ApiError::InvalidHeaderValue(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc),
}
}
}
impl Into<Response<Body>> for ApiError {
fn into(self) -> Response<Body> {
let (status_code, desc) = self.status_code();
Response::builder()
.status(status_code)
.header("content-type", "text/plain; charset=utf-8")
.body(Body::from(desc))
.expect("Response should always be created.")
}
}
impl From<store::Error> for ApiError {
fn from(e: store::Error) -> ApiError {
ApiError::ServerError(format!("Database error: {:?}", e))
}
}
impl From<types::BeaconStateError> for ApiError {
fn from(e: types::BeaconStateError) -> ApiError {
ApiError::ServerError(format!("BeaconState error: {:?}", e))
}
}
impl From<beacon_chain::BeaconChainError> for ApiError {
fn from(e: beacon_chain::BeaconChainError) -> ApiError {
ApiError::ServerError(format!("BeaconChainError error: {:?}", e))
}
}
impl From<state_processing::per_slot_processing::Error> for ApiError {
fn from(e: state_processing::per_slot_processing::Error) -> ApiError {
ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e))
}
}
impl From<hyper::error::Error> for ApiError {
fn from(e: hyper::error::Error) -> ApiError {
ApiError::ServerError(format!("Networking error: {:?}", e))
}
}
impl From<std::io::Error> for ApiError {
fn from(e: std::io::Error) -> ApiError {
ApiError::ServerError(format!("IO error: {:?}", e))
}
}
impl From<hyper::header::InvalidHeaderValue> for ApiError {
fn from(e: hyper::header::InvalidHeaderValue) -> ApiError {
ApiError::InvalidHeaderValue(format!("Invalid CORS header value: {:?}", e))
}
}
impl StdError for ApiError {
fn cause(&self) -> Option<&dyn StdError> {
None
}
}
impl std::fmt::Display for ApiError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let status = self.clone().status_code();
write!(f, "{:?}: {:?}", status.0, status.1)
}
}

View File

@ -1,65 +0,0 @@
//! A collection of REST API types for interaction with the beacon node.
use bls::PublicKeyBytes;
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use types::beacon_state::EthSpec;
use types::{BeaconState, CommitteeIndex, Hash256, SignedBeaconBlock, Slot, Validator};
/// Information about a block that is at the head of a chain. May or may not represent the
/// canonical head.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
pub struct HeadBeaconBlock {
pub beacon_block_root: Hash256,
pub beacon_block_slot: Slot,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
#[serde(bound = "T: EthSpec")]
pub struct BlockResponse<T: EthSpec> {
pub root: Hash256,
pub beacon_block: SignedBeaconBlock<T>,
}
/// Information about the block and state that are at head of the beacon chain.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
pub struct CanonicalHeadResponse {
pub slot: Slot,
pub block_root: Hash256,
pub state_root: Hash256,
pub finalized_slot: Slot,
pub finalized_block_root: Hash256,
pub justified_slot: Slot,
pub justified_block_root: Hash256,
pub previous_justified_slot: Slot,
pub previous_justified_block_root: Hash256,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
pub struct ValidatorResponse {
pub pubkey: PublicKeyBytes,
pub validator_index: Option<usize>,
pub balance: Option<u64>,
pub validator: Option<Validator>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
pub struct ValidatorRequest {
/// If set to `None`, uses the canonical head state.
pub state_root: Option<Hash256>,
pub pubkeys: Vec<PublicKeyBytes>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
pub struct Committee {
pub slot: Slot,
pub index: CommitteeIndex,
pub committee: Vec<usize>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
#[serde(bound = "T: EthSpec")]
pub struct StateResponse<T: EthSpec> {
pub root: Hash256,
pub beacon_state: BeaconState<T>,
}

View File

@ -1,66 +0,0 @@
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use state_processing::per_epoch_processing::ValidatorStatus;
use types::{Epoch, PublicKeyBytes};
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
pub struct IndividualVotesRequest {
pub epoch: Epoch,
pub pubkeys: Vec<PublicKeyBytes>,
}
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
pub struct IndividualVote {
/// True if the validator has been slashed, ever.
pub is_slashed: bool,
/// True if the validator can withdraw in the current epoch.
pub is_withdrawable_in_current_epoch: bool,
/// True if the validator was active in the state's _current_ epoch.
pub is_active_in_current_epoch: bool,
/// True if the validator was active in the state's _previous_ epoch.
pub is_active_in_previous_epoch: bool,
/// The validator's effective balance in the _current_ epoch.
pub current_epoch_effective_balance_gwei: u64,
/// True if the validator had an attestation included in the _current_ epoch.
pub is_current_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _current_
/// epoch matches the block root known to the state.
pub is_current_epoch_target_attester: bool,
/// True if the validator had an attestation included in the _previous_ epoch.
pub is_previous_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _previous_
/// epoch matches the block root known to the state.
pub is_previous_epoch_target_attester: bool,
/// True if the validator's beacon block root attestation in the _previous_ epoch at the
/// attestation's slot (`attestation_data.slot`) matches the block root known to the state.
pub is_previous_epoch_head_attester: bool,
}
impl Into<IndividualVote> for ValidatorStatus {
fn into(self) -> IndividualVote {
IndividualVote {
is_slashed: self.is_slashed,
is_withdrawable_in_current_epoch: self.is_withdrawable_in_current_epoch,
is_active_in_current_epoch: self.is_active_in_current_epoch,
is_active_in_previous_epoch: self.is_active_in_previous_epoch,
current_epoch_effective_balance_gwei: self.current_epoch_effective_balance,
is_current_epoch_attester: self.is_current_epoch_attester,
is_current_epoch_target_attester: self.is_current_epoch_target_attester,
is_previous_epoch_attester: self.is_previous_epoch_attester,
is_previous_epoch_target_attester: self.is_previous_epoch_target_attester,
is_previous_epoch_head_attester: self.is_previous_epoch_head_attester,
}
}
}
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
pub struct IndividualVotesResponse {
/// The epoch which is considered the "current" epoch.
pub epoch: Epoch,
/// The validators public key.
pub pubkey: PublicKeyBytes,
/// The index of the validator in state.validators.
pub validator_index: Option<usize>,
/// Voting statistics for the validator, if they voted in the given epoch.
pub vote: Option<IndividualVote>,
}

View File

@ -1,247 +0,0 @@
use crate::{ApiError, ApiResult};
use environment::TaskExecutor;
use hyper::header;
use hyper::{Body, Request, Response, StatusCode};
use serde::Deserialize;
use serde::Serialize;
use ssz::Encode;
/// Defines the encoding for the API.
#[derive(Clone, Serialize, Deserialize, Copy)]
pub enum ApiEncodingFormat {
JSON,
YAML,
SSZ,
}
impl ApiEncodingFormat {
pub fn get_content_type(&self) -> &str {
match self {
ApiEncodingFormat::JSON => "application/json",
ApiEncodingFormat::YAML => "application/yaml",
ApiEncodingFormat::SSZ => "application/ssz",
}
}
}
impl From<&str> for ApiEncodingFormat {
fn from(f: &str) -> ApiEncodingFormat {
match f {
"application/yaml" => ApiEncodingFormat::YAML,
"application/ssz" => ApiEncodingFormat::SSZ,
_ => ApiEncodingFormat::JSON,
}
}
}
/// Provides a HTTP request handler with Lighthouse-specific functionality.
pub struct Handler<T> {
executor: TaskExecutor,
req: Request<()>,
body: Body,
ctx: T,
encoding: ApiEncodingFormat,
allow_body: bool,
}
impl<T: Clone + Send + Sync + 'static> Handler<T> {
/// Start handling a new request.
pub fn new(req: Request<Body>, ctx: T, executor: TaskExecutor) -> Result<Self, ApiError> {
let (req_parts, body) = req.into_parts();
let req = Request::from_parts(req_parts, ());
let accept_header: String = req
.headers()
.get(header::ACCEPT)
.map_or(Ok(""), |h| h.to_str())
.map_err(|e| {
ApiError::BadRequest(format!(
"The Accept header contains invalid characters: {:?}",
e
))
})
.map(String::from)?;
Ok(Self {
executor,
req,
body,
ctx,
allow_body: false,
encoding: ApiEncodingFormat::from(accept_header.as_str()),
})
}
/// The default behaviour is to return an error if any body is supplied in the request. Calling
/// this function disables that error.
pub fn allow_body(mut self) -> Self {
self.allow_body = true;
self
}
/// Return a simple static value.
///
/// Does not use the blocking executor.
pub async fn static_value<V>(self, value: V) -> Result<HandledRequest<V>, ApiError> {
// Always check and disallow a body for a static value.
let _ = Self::get_body(self.body, false).await?;
Ok(HandledRequest {
value,
encoding: self.encoding,
})
}
/// Calls `func` in-line, on the core executor.
///
/// This should only be used for very fast tasks.
pub async fn in_core_task<F, V>(self, func: F) -> Result<HandledRequest<V>, ApiError>
where
V: Send + Sync + 'static,
F: Fn(Request<Vec<u8>>, T) -> Result<V, ApiError> + Send + Sync + 'static,
{
let body = Self::get_body(self.body, self.allow_body).await?;
let (req_parts, _) = self.req.into_parts();
let req = Request::from_parts(req_parts, body);
let value = func(req, self.ctx)?;
Ok(HandledRequest {
value,
encoding: self.encoding,
})
}
/// Spawns `func` on the blocking executor.
///
/// This method is suitable for handling long-running or intensive tasks.
pub async fn in_blocking_task<F, V>(self, func: F) -> Result<HandledRequest<V>, ApiError>
where
V: Send + Sync + 'static,
F: Fn(Request<Vec<u8>>, T) -> Result<V, ApiError> + Send + Sync + 'static,
{
let ctx = self.ctx;
let body = Self::get_body(self.body, self.allow_body).await?;
let (req_parts, _) = self.req.into_parts();
let req = Request::from_parts(req_parts, body);
let value = self
.executor
.clone()
.handle
.spawn_blocking(move || func(req, ctx))
.await
.map_err(|e| {
ApiError::ServerError(format!(
"Failed to get blocking join handle: {}",
e.to_string()
))
})??;
Ok(HandledRequest {
value,
encoding: self.encoding,
})
}
/// Call `func`, then return a response that is suitable for an SSE stream.
pub async fn sse_stream<F>(self, func: F) -> ApiResult
where
F: Fn(Request<()>, T) -> Result<Body, ApiError>,
{
let body = func(self.req, self.ctx)?;
Response::builder()
.status(200)
.header("Content-Type", "text/event-stream")
.header("Connection", "Keep-Alive")
.header("Cache-Control", "no-cache")
.header("Access-Control-Allow-Origin", "*")
.body(body)
.map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e)))
}
/// Downloads the bytes for `body`.
async fn get_body(body: Body, allow_body: bool) -> Result<Vec<u8>, ApiError> {
let bytes = hyper::body::to_bytes(body)
.await
.map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e)))?;
if !allow_body && !bytes[..].is_empty() {
Err(ApiError::BadRequest(
"The request body must be empty".to_string(),
))
} else {
Ok(bytes.into_iter().collect())
}
}
}
/// A request that has been "handled" and now a result (`value`) needs to be serialize and
/// returned.
pub struct HandledRequest<V> {
encoding: ApiEncodingFormat,
value: V,
}
impl HandledRequest<String> {
/// Simple encode a string as utf-8.
pub fn text_encoding(self) -> ApiResult {
Response::builder()
.status(StatusCode::OK)
.header("content-type", "text/plain; charset=utf-8")
.body(Body::from(self.value))
.map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e)))
}
}
impl<V: Serialize + Encode> HandledRequest<V> {
/// Suitable for all items which implement `serde` and `ssz`.
pub fn all_encodings(self) -> ApiResult {
match self.encoding {
ApiEncodingFormat::SSZ => Response::builder()
.status(StatusCode::OK)
.header("content-type", "application/ssz")
.body(Body::from(self.value.as_ssz_bytes()))
.map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))),
_ => self.serde_encodings(),
}
}
}
impl<V: Serialize> HandledRequest<V> {
/// Suitable for items which only implement `serde`.
pub fn serde_encodings(self) -> ApiResult {
let (body, content_type) = match self.encoding {
ApiEncodingFormat::JSON => (
Body::from(serde_json::to_string(&self.value).map_err(|e| {
ApiError::ServerError(format!(
"Unable to serialize response body as JSON: {:?}",
e
))
})?),
"application/json",
),
ApiEncodingFormat::SSZ => {
return Err(ApiError::UnsupportedType(
"Response cannot be encoded as SSZ.".into(),
));
}
ApiEncodingFormat::YAML => (
Body::from(serde_yaml::to_string(&self.value).map_err(|e| {
ApiError::ServerError(format!(
"Unable to serialize response body as YAML: {:?}",
e
))
})?),
"application/yaml",
),
};
Response::builder()
.status(StatusCode::OK)
.header("content-type", content_type)
.body(body)
.map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e)))
}
}

View File

@ -1,22 +0,0 @@
//! A collection of types used to pass data across the rest HTTP API.
//!
//! This is primarily used by the validator client and the beacon node rest API.
mod api_error;
mod beacon;
mod consensus;
mod handler;
mod node;
mod validator;
pub use api_error::{ApiError, ApiResult};
pub use beacon::{
BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse,
ValidatorRequest, ValidatorResponse,
};
pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse};
pub use handler::{ApiEncodingFormat, Handler};
pub use node::{Health, SyncingResponse, SyncingStatus};
pub use validator::{
ValidatorDutiesRequest, ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription,
};

View File

@ -1,103 +0,0 @@
//! Collection of types for the /node HTTP
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use types::Slot;
#[cfg(target_os = "linux")]
use {procinfo::pid, psutil::process::Process};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
/// The current syncing status of the node.
pub struct SyncingStatus {
/// The starting slot of sync.
///
/// For a finalized sync, this is the start slot of the current finalized syncing
/// chain.
///
/// For head sync this is the last finalized slot.
pub starting_slot: Slot,
/// The current slot.
pub current_slot: Slot,
/// The highest known slot. For the current syncing chain.
///
/// For a finalized sync, the target finalized slot.
/// For head sync, this is the highest known slot of all head chains.
pub highest_slot: Slot,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)]
/// The response for the /node/syncing HTTP GET.
pub struct SyncingResponse {
/// Is the node syncing.
pub is_syncing: bool,
/// The current sync status.
pub sync_status: SyncingStatus,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
/// Reports on the health of the Lighthouse instance.
pub struct Health {
/// The pid of this process.
pub pid: u32,
/// The number of threads used by this pid.
pub pid_num_threads: i32,
/// The total resident memory used by this pid.
pub pid_mem_resident_set_size: u64,
/// The total virtual memory used by this pid.
pub pid_mem_virtual_memory_size: u64,
/// Total virtual memory on the system
pub sys_virt_mem_total: u64,
/// Total virtual memory available for new processes.
pub sys_virt_mem_available: u64,
/// Total virtual memory used on the system
pub sys_virt_mem_used: u64,
/// Total virtual memory not used on the system
pub sys_virt_mem_free: u64,
/// Percentage of virtual memory used on the system
pub sys_virt_mem_percent: f32,
/// System load average over 1 minute.
pub sys_loadavg_1: f64,
/// System load average over 5 minutes.
pub sys_loadavg_5: f64,
/// System load average over 15 minutes.
pub sys_loadavg_15: f64,
}
impl Health {
#[cfg(not(target_os = "linux"))]
pub fn observe() -> Result<Self, String> {
Err("Health is only available on Linux".into())
}
#[cfg(target_os = "linux")]
pub fn observe() -> Result<Self, String> {
let process =
Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?;
let process_mem = process
.memory_info()
.map_err(|e| format!("Unable to get process memory info: {:?}", e))?;
let stat = pid::stat_self().map_err(|e| format!("Unable to get stat: {:?}", e))?;
let vm = psutil::memory::virtual_memory()
.map_err(|e| format!("Unable to get virtual memory: {:?}", e))?;
let loadavg =
psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?;
Ok(Self {
pid: process.pid(),
pid_num_threads: stat.num_threads,
pid_mem_resident_set_size: process_mem.rss(),
pid_mem_virtual_memory_size: process_mem.vms(),
sys_virt_mem_total: vm.total(),
sys_virt_mem_available: vm.available(),
sys_virt_mem_used: vm.used(),
sys_virt_mem_free: vm.free(),
sys_virt_mem_percent: vm.percent(),
sys_loadavg_1: loadavg.one,
sys_loadavg_5: loadavg.five,
sys_loadavg_15: loadavg.fifteen,
})
}
}

View File

@ -1,103 +0,0 @@
use bls::{PublicKey, PublicKeyBytes};
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use types::{CommitteeIndex, Epoch, Slot};
/// A Validator duty with the validator public key represented a `PublicKeyBytes`.
pub type ValidatorDutyBytes = ValidatorDutyBase<PublicKeyBytes>;
/// A validator duty with the pubkey represented as a `PublicKey`.
pub type ValidatorDuty = ValidatorDutyBase<PublicKey>;
// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots`
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)]
pub struct ValidatorDutyBase<T> {
/// The validator's BLS public key, uniquely identifying them.
pub validator_pubkey: T,
/// The validator's index in `state.validators`
pub validator_index: Option<u64>,
/// The slot at which the validator must attest.
pub attestation_slot: Option<Slot>,
/// The index of the committee within `slot` of which the validator is a member.
pub attestation_committee_index: Option<CommitteeIndex>,
/// The position of the validator in the committee.
pub attestation_committee_position: Option<usize>,
/// The committee count at `attestation_slot`.
pub committee_count_at_slot: Option<u64>,
/// The slots in which a validator must propose a block (can be empty).
///
/// Should be set to `None` when duties are not yet known (before the current epoch).
pub block_proposal_slots: Option<Vec<Slot>>,
/// This provides the modulo: `max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)`
/// which allows the validator client to determine if this duty requires the validator to be
/// aggregate attestations.
pub aggregator_modulo: Option<u64>,
}
impl<T> ValidatorDutyBase<T> {
/// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`.
pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool
where
T: PartialEq,
{
self.validator_pubkey == other.validator_pubkey
&& self.validator_index == other.validator_index
&& self.attestation_slot == other.attestation_slot
&& self.attestation_committee_index == other.attestation_committee_index
&& self.attestation_committee_position == other.attestation_committee_position
&& self.committee_count_at_slot == other.committee_count_at_slot
&& self.aggregator_modulo == other.aggregator_modulo
}
}
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
pub struct ValidatorDutiesRequest {
pub epoch: Epoch,
pub pubkeys: Vec<PublicKeyBytes>,
}
/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation
/// duties.
#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)]
pub struct ValidatorSubscription {
/// The validators index.
pub validator_index: u64,
/// The index of the committee within `slot` of which the validator is a member. Used by the
/// beacon node to quickly evaluate the associated `SubnetId`.
pub attestation_committee_index: CommitteeIndex,
/// The slot in which to subscribe.
pub slot: Slot,
/// Committee count at slot to subscribe.
pub committee_count_at_slot: u64,
/// If true, the validator is an aggregator and the beacon node should aggregate attestations
/// for this slot.
pub is_aggregator: bool,
}
#[cfg(test)]
mod test {
use super::*;
use bls::SecretKey;
#[test]
fn eq_ignoring_proposal_slots() {
let validator_pubkey = SecretKey::deserialize(&[1; 32]).unwrap().public_key();
let duty1 = ValidatorDuty {
validator_pubkey,
validator_index: Some(10),
attestation_slot: Some(Slot::new(50)),
attestation_committee_index: Some(2),
attestation_committee_position: Some(6),
committee_count_at_slot: Some(4),
block_proposal_slots: None,
aggregator_modulo: Some(99),
};
let duty2 = ValidatorDuty {
block_proposal_slots: Some(vec![Slot::new(42), Slot::new(45)]),
..duty1.clone()
};
assert_ne!(duty1, duty2);
assert!(duty1.eq_ignoring_proposal_slots(&duty2));
assert!(duty2.eq_ignoring_proposal_slots(&duty1));
}
}

View File

@ -24,6 +24,16 @@ pub trait SlotClock: Send + Sync + Sized {
/// Returns the slot at this present time. /// Returns the slot at this present time.
fn now(&self) -> Option<Slot>; fn now(&self) -> Option<Slot>;
/// Returns the slot at this present time if genesis has happened. Otherwise, returns the
/// genesis slot. Returns `None` if there is an error reading the clock.
fn now_or_genesis(&self) -> Option<Slot> {
if self.is_prior_to_genesis()? {
Some(self.genesis_slot())
} else {
self.now()
}
}
/// Indicates if the current time is prior to genesis time. /// Indicates if the current time is prior to genesis time.
/// ///
/// Returns `None` if the system clock cannot be read. /// Returns `None` if the system clock cannot be read.

View File

@ -0,0 +1,15 @@
[package]
name = "warp_utils"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
warp = "0.2.5"
eth2 = { path = "../eth2" }
types = { path = "../../consensus/types" }
beacon_chain = { path = "../../beacon_node/beacon_chain" }
state_processing = { path = "../../consensus/state_processing" }
safe_arith = { path = "../../consensus/safe_arith" }

View File

@ -0,0 +1,5 @@
//! This crate contains functions that are common across multiple `warp` HTTP servers in the
//! Lighthouse project. E.g., the `http_api` and `http_metrics` crates.
pub mod reject;
pub mod reply;

View File

@ -0,0 +1,168 @@
use eth2::types::ErrorMessage;
use std::convert::Infallible;
use warp::{http::StatusCode, reject::Reject};
#[derive(Debug)]
pub struct BeaconChainError(pub beacon_chain::BeaconChainError);
impl Reject for BeaconChainError {}
pub fn beacon_chain_error(e: beacon_chain::BeaconChainError) -> warp::reject::Rejection {
warp::reject::custom(BeaconChainError(e))
}
#[derive(Debug)]
pub struct BeaconStateError(pub types::BeaconStateError);
impl Reject for BeaconStateError {}
pub fn beacon_state_error(e: types::BeaconStateError) -> warp::reject::Rejection {
warp::reject::custom(BeaconStateError(e))
}
#[derive(Debug)]
pub struct ArithError(pub safe_arith::ArithError);
impl Reject for ArithError {}
pub fn arith_error(e: safe_arith::ArithError) -> warp::reject::Rejection {
warp::reject::custom(ArithError(e))
}
#[derive(Debug)]
pub struct SlotProcessingError(pub state_processing::SlotProcessingError);
impl Reject for SlotProcessingError {}
pub fn slot_processing_error(e: state_processing::SlotProcessingError) -> warp::reject::Rejection {
warp::reject::custom(SlotProcessingError(e))
}
#[derive(Debug)]
pub struct BlockProductionError(pub beacon_chain::BlockProductionError);
impl Reject for BlockProductionError {}
pub fn block_production_error(e: beacon_chain::BlockProductionError) -> warp::reject::Rejection {
warp::reject::custom(BlockProductionError(e))
}
#[derive(Debug)]
pub struct CustomNotFound(pub String);
impl Reject for CustomNotFound {}
pub fn custom_not_found(msg: String) -> warp::reject::Rejection {
warp::reject::custom(CustomNotFound(msg))
}
#[derive(Debug)]
pub struct CustomBadRequest(pub String);
impl Reject for CustomBadRequest {}
pub fn custom_bad_request(msg: String) -> warp::reject::Rejection {
warp::reject::custom(CustomBadRequest(msg))
}
#[derive(Debug)]
pub struct CustomServerError(pub String);
impl Reject for CustomServerError {}
pub fn custom_server_error(msg: String) -> warp::reject::Rejection {
warp::reject::custom(CustomServerError(msg))
}
#[derive(Debug)]
pub struct BroadcastWithoutImport(pub String);
impl Reject for BroadcastWithoutImport {}
pub fn broadcast_without_import(msg: String) -> warp::reject::Rejection {
warp::reject::custom(BroadcastWithoutImport(msg))
}
#[derive(Debug)]
pub struct ObjectInvalid(pub String);
impl Reject for ObjectInvalid {}
pub fn object_invalid(msg: String) -> warp::reject::Rejection {
warp::reject::custom(ObjectInvalid(msg))
}
#[derive(Debug)]
pub struct NotSynced(pub String);
impl Reject for NotSynced {}
pub fn not_synced(msg: String) -> warp::reject::Rejection {
warp::reject::custom(NotSynced(msg))
}
/// This function receives a `Rejection` and tries to return a custom
/// value, otherwise simply passes the rejection along.
pub async fn handle_rejection(err: warp::Rejection) -> Result<impl warp::Reply, Infallible> {
let code;
let message;
if err.is_not_found() {
code = StatusCode::NOT_FOUND;
message = "NOT_FOUND".to_string();
} else if let Some(e) = err.find::<warp::filters::body::BodyDeserializeError>() {
message = format!("BAD_REQUEST: body deserialize error: {}", e);
code = StatusCode::BAD_REQUEST;
} else if let Some(e) = err.find::<warp::reject::InvalidQuery>() {
code = StatusCode::BAD_REQUEST;
message = format!("BAD_REQUEST: invalid query: {}", e);
} else if let Some(e) = err.find::<crate::reject::BeaconChainError>() {
code = StatusCode::INTERNAL_SERVER_ERROR;
message = format!("UNHANDLED_ERROR: {:?}", e.0);
} else if let Some(e) = err.find::<crate::reject::BeaconStateError>() {
code = StatusCode::INTERNAL_SERVER_ERROR;
message = format!("UNHANDLED_ERROR: {:?}", e.0);
} else if let Some(e) = err.find::<crate::reject::SlotProcessingError>() {
code = StatusCode::INTERNAL_SERVER_ERROR;
message = format!("UNHANDLED_ERROR: {:?}", e.0);
} else if let Some(e) = err.find::<crate::reject::BlockProductionError>() {
code = StatusCode::INTERNAL_SERVER_ERROR;
message = format!("UNHANDLED_ERROR: {:?}", e.0);
} else if let Some(e) = err.find::<crate::reject::CustomNotFound>() {
code = StatusCode::NOT_FOUND;
message = format!("NOT_FOUND: {}", e.0);
} else if let Some(e) = err.find::<crate::reject::CustomBadRequest>() {
code = StatusCode::BAD_REQUEST;
message = format!("BAD_REQUEST: {}", e.0);
} else if let Some(e) = err.find::<crate::reject::CustomServerError>() {
code = StatusCode::INTERNAL_SERVER_ERROR;
message = format!("INTERNAL_SERVER_ERROR: {}", e.0);
} else if let Some(e) = err.find::<crate::reject::BroadcastWithoutImport>() {
code = StatusCode::ACCEPTED;
message = format!(
"ACCEPTED: the object was broadcast to the network without being \
fully imported to the local database: {}",
e.0
);
} else if let Some(e) = err.find::<crate::reject::ObjectInvalid>() {
code = StatusCode::BAD_REQUEST;
message = format!("BAD_REQUEST: Invalid object: {}", e.0);
} else if let Some(e) = err.find::<crate::reject::NotSynced>() {
code = StatusCode::SERVICE_UNAVAILABLE;
message = format!("SERVICE_UNAVAILABLE: beacon node is syncing: {}", e.0);
} else if err.find::<warp::reject::MethodNotAllowed>().is_some() {
code = StatusCode::METHOD_NOT_ALLOWED;
message = "METHOD_NOT_ALLOWED".to_string();
} else {
code = StatusCode::INTERNAL_SERVER_ERROR;
message = "UNHANDLED_REJECTION".to_string();
}
let json = warp::reply::json(&ErrorMessage {
code: code.as_u16(),
message,
stacktraces: vec![],
});
Ok(warp::reply::with_status(json, code))
}

View File

@ -0,0 +1,15 @@
/// Add CORS headers to `reply` only if `allow_origin.is_some()`.
pub fn maybe_cors<T: warp::Reply + 'static>(
reply: T,
allow_origin: Option<&String>,
) -> Box<dyn warp::Reply> {
if let Some(allow_origin) = allow_origin {
Box::new(warp::reply::with_header(
reply,
"Access-Control-Allow-Origin",
allow_origin,
))
} else {
Box::new(reply)
}
}

View File

@ -4,7 +4,7 @@ use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use types::{ use types::{
BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256,
IndexedAttestation, Slot, IndexedAttestation, RelativeEpoch, ShufflingId, Slot,
}; };
use crate::ForkChoiceStore; use crate::ForkChoiceStore;
@ -240,10 +240,18 @@ where
/// Instantiates `Self` from the genesis parameters. /// Instantiates `Self` from the genesis parameters.
pub fn from_genesis( pub fn from_genesis(
fc_store: T, fc_store: T,
genesis_block_root: Hash256,
genesis_block: &BeaconBlock<E>, genesis_block: &BeaconBlock<E>,
genesis_state: &BeaconState<E>,
) -> Result<Self, Error<T::Error>> { ) -> Result<Self, Error<T::Error>> {
let finalized_block_slot = genesis_block.slot; let finalized_block_slot = genesis_block.slot;
let finalized_block_state_root = genesis_block.state_root; let finalized_block_state_root = genesis_block.state_root;
let current_epoch_shuffling_id =
ShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Current)
.map_err(Error::BeaconStateError)?;
let next_epoch_shuffling_id =
ShufflingId::new(genesis_block_root, genesis_state, RelativeEpoch::Next)
.map_err(Error::BeaconStateError)?;
let proto_array = ProtoArrayForkChoice::new( let proto_array = ProtoArrayForkChoice::new(
finalized_block_slot, finalized_block_slot,
@ -251,6 +259,8 @@ where
fc_store.justified_checkpoint().epoch, fc_store.justified_checkpoint().epoch,
fc_store.finalized_checkpoint().epoch, fc_store.finalized_checkpoint().epoch,
fc_store.finalized_checkpoint().root, fc_store.finalized_checkpoint().root,
current_epoch_shuffling_id,
next_epoch_shuffling_id,
)?; )?;
Ok(Self { Ok(Self {
@ -534,6 +544,10 @@ where
root: block_root, root: block_root,
parent_root: Some(block.parent_root), parent_root: Some(block.parent_root),
target_root, target_root,
current_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Current)
.map_err(Error::BeaconStateError)?,
next_epoch_shuffling_id: ShufflingId::new(block_root, state, RelativeEpoch::Next)
.map_err(Error::BeaconStateError)?,
state_root: block.state_root, state_root: block.state_root,
justified_epoch: state.current_justified_checkpoint.epoch, justified_epoch: state.current_justified_checkpoint.epoch,
finalized_epoch: state.finalized_checkpoint.epoch, finalized_epoch: state.finalized_checkpoint.epoch,

View File

@ -6,3 +6,4 @@ pub use crate::fork_choice::{
SAFE_SLOTS_TO_UPDATE_JUSTIFIED, SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
}; };
pub use fork_choice_store::ForkChoiceStore; pub use fork_choice_store::ForkChoiceStore;
pub use proto_array::Block as ProtoBlock;

View File

@ -351,7 +351,7 @@ impl ForkChoiceTest {
let mut verified_attestation = self let mut verified_attestation = self
.harness .harness
.chain .chain
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id) .verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id))
.expect("precondition: should gossip verify attestation"); .expect("precondition: should gossip verify attestation");
if let MutationDelay::Blocks(slots) = delay { if let MutationDelay::Blocks(slots) = delay {

View File

@ -4,7 +4,7 @@ mod votes;
use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use types::{Epoch, Hash256, Slot}; use types::{Epoch, Hash256, ShufflingId, Slot};
pub use ffg_updates::*; pub use ffg_updates::*;
pub use no_votes::*; pub use no_votes::*;
@ -55,12 +55,15 @@ pub struct ForkChoiceTestDefinition {
impl ForkChoiceTestDefinition { impl ForkChoiceTestDefinition {
pub fn run(self) { pub fn run(self) {
let junk_shuffling_id = ShufflingId::from_components(Epoch::new(0), Hash256::zero());
let mut fork_choice = ProtoArrayForkChoice::new( let mut fork_choice = ProtoArrayForkChoice::new(
self.finalized_block_slot, self.finalized_block_slot,
Hash256::zero(), Hash256::zero(),
self.justified_epoch, self.justified_epoch,
self.finalized_epoch, self.finalized_epoch,
self.finalized_root, self.finalized_root,
junk_shuffling_id.clone(),
junk_shuffling_id,
) )
.expect("should create fork choice struct"); .expect("should create fork choice struct");
@ -125,6 +128,14 @@ impl ForkChoiceTestDefinition {
parent_root: Some(parent_root), parent_root: Some(parent_root),
state_root: Hash256::zero(), state_root: Hash256::zero(),
target_root: Hash256::zero(), target_root: Hash256::zero(),
current_epoch_shuffling_id: ShufflingId::from_components(
Epoch::new(0),
Hash256::zero(),
),
next_epoch_shuffling_id: ShufflingId::from_components(
Epoch::new(0),
Hash256::zero(),
),
justified_epoch, justified_epoch,
finalized_epoch, finalized_epoch,
}; };

View File

@ -2,7 +2,7 @@ use crate::{error::Error, Block};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use std::collections::HashMap; use std::collections::HashMap;
use types::{Epoch, Hash256, Slot}; use types::{Epoch, Hash256, ShufflingId, Slot};
#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] #[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)]
pub struct ProtoNode { pub struct ProtoNode {
@ -18,6 +18,8 @@ pub struct ProtoNode {
/// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream /// The `target_root` is not necessary for `ProtoArray` either, it also just exists for upstream
/// components (namely fork choice attestation verification). /// components (namely fork choice attestation verification).
pub target_root: Hash256, pub target_root: Hash256,
pub current_epoch_shuffling_id: ShufflingId,
pub next_epoch_shuffling_id: ShufflingId,
pub root: Hash256, pub root: Hash256,
pub parent: Option<usize>, pub parent: Option<usize>,
pub justified_epoch: Epoch, pub justified_epoch: Epoch,
@ -142,6 +144,8 @@ impl ProtoArray {
slot: block.slot, slot: block.slot,
root: block.root, root: block.root,
target_root: block.target_root, target_root: block.target_root,
current_epoch_shuffling_id: block.current_epoch_shuffling_id,
next_epoch_shuffling_id: block.next_epoch_shuffling_id,
state_root: block.state_root, state_root: block.state_root,
parent: block parent: block
.parent_root .parent_root

View File

@ -4,7 +4,7 @@ use crate::ssz_container::SszContainer;
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use std::collections::HashMap; use std::collections::HashMap;
use types::{Epoch, Hash256, Slot}; use types::{Epoch, Hash256, ShufflingId, Slot};
pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256;
@ -25,6 +25,8 @@ pub struct Block {
pub parent_root: Option<Hash256>, pub parent_root: Option<Hash256>,
pub state_root: Hash256, pub state_root: Hash256,
pub target_root: Hash256, pub target_root: Hash256,
pub current_epoch_shuffling_id: ShufflingId,
pub next_epoch_shuffling_id: ShufflingId,
pub justified_epoch: Epoch, pub justified_epoch: Epoch,
pub finalized_epoch: Epoch, pub finalized_epoch: Epoch,
} }
@ -70,6 +72,8 @@ impl ProtoArrayForkChoice {
justified_epoch: Epoch, justified_epoch: Epoch,
finalized_epoch: Epoch, finalized_epoch: Epoch,
finalized_root: Hash256, finalized_root: Hash256,
current_epoch_shuffling_id: ShufflingId,
next_epoch_shuffling_id: ShufflingId,
) -> Result<Self, String> { ) -> Result<Self, String> {
let mut proto_array = ProtoArray { let mut proto_array = ProtoArray {
prune_threshold: DEFAULT_PRUNE_THRESHOLD, prune_threshold: DEFAULT_PRUNE_THRESHOLD,
@ -87,6 +91,8 @@ impl ProtoArrayForkChoice {
// We are using the finalized_root as the target_root, since it always lies on an // We are using the finalized_root as the target_root, since it always lies on an
// epoch boundary. // epoch boundary.
target_root: finalized_root, target_root: finalized_root,
current_epoch_shuffling_id,
next_epoch_shuffling_id,
justified_epoch, justified_epoch,
finalized_epoch, finalized_epoch,
}; };
@ -194,6 +200,8 @@ impl ProtoArrayForkChoice {
parent_root, parent_root,
state_root: block.state_root, state_root: block.state_root,
target_root: block.target_root, target_root: block.target_root,
current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(),
next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(),
justified_epoch: block.justified_epoch, justified_epoch: block.justified_epoch,
finalized_epoch: block.finalized_epoch, finalized_epoch: block.finalized_epoch,
}) })
@ -341,6 +349,7 @@ mod test_compute_deltas {
let finalized_desc = Hash256::from_low_u64_be(2); let finalized_desc = Hash256::from_low_u64_be(2);
let not_finalized_desc = Hash256::from_low_u64_be(3); let not_finalized_desc = Hash256::from_low_u64_be(3);
let unknown = Hash256::from_low_u64_be(4); let unknown = Hash256::from_low_u64_be(4);
let junk_shuffling_id = ShufflingId::from_components(Epoch::new(0), Hash256::zero());
let mut fc = ProtoArrayForkChoice::new( let mut fc = ProtoArrayForkChoice::new(
genesis_slot, genesis_slot,
@ -348,6 +357,8 @@ mod test_compute_deltas {
genesis_epoch, genesis_epoch,
genesis_epoch, genesis_epoch,
finalized_root, finalized_root,
junk_shuffling_id.clone(),
junk_shuffling_id.clone(),
) )
.unwrap(); .unwrap();
@ -359,6 +370,8 @@ mod test_compute_deltas {
parent_root: Some(finalized_root), parent_root: Some(finalized_root),
state_root, state_root,
target_root: finalized_root, target_root: finalized_root,
current_epoch_shuffling_id: junk_shuffling_id.clone(),
next_epoch_shuffling_id: junk_shuffling_id.clone(),
justified_epoch: genesis_epoch, justified_epoch: genesis_epoch,
finalized_epoch: genesis_epoch, finalized_epoch: genesis_epoch,
}) })
@ -372,6 +385,8 @@ mod test_compute_deltas {
parent_root: None, parent_root: None,
state_root, state_root,
target_root: finalized_root, target_root: finalized_root,
current_epoch_shuffling_id: junk_shuffling_id.clone(),
next_epoch_shuffling_id: junk_shuffling_id.clone(),
justified_epoch: genesis_epoch, justified_epoch: genesis_epoch,
finalized_epoch: genesis_epoch, finalized_epoch: genesis_epoch,
}) })

View File

@ -1,9 +0,0 @@
[package]
name = "serde_hex"
version = "0.2.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
serde = "1.0.110"
hex = "0.4.2"

View File

@ -7,6 +7,7 @@ edition = "2018"
[dependencies] [dependencies]
serde = { version = "1.0.110", features = ["derive"] } serde = { version = "1.0.110", features = ["derive"] }
serde_derive = "1.0.110" serde_derive = "1.0.110"
hex = "0.4.2"
[dev-dependencies] [dev-dependencies]
serde_json = "1.0.52" serde_json = "1.0.52"

View File

@ -0,0 +1,38 @@
//! Formats `[u8; 4]` as a 0x-prefixed hex string.
//!
//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`.
use crate::hex::PrefixedHexVisitor;
use serde::de::Error;
use serde::{Deserializer, Serializer};
const BYTES_LEN: usize = 4;
pub fn serialize<S>(bytes: &[u8; BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut hex_string: String = "0x".to_string();
hex_string.push_str(&hex::encode(&bytes));
serializer.serialize_str(&hex_string)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error>
where
D: Deserializer<'de>,
{
let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?;
if decoded.len() != BYTES_LEN {
return Err(D::Error::custom(format!(
"expected {} bytes for array, got {}",
BYTES_LEN,
decoded.len()
)));
}
let mut array = [0; BYTES_LEN];
array.copy_from_slice(&decoded);
Ok(array)
}

View File

@ -1,6 +1,9 @@
//! Provides utilities for parsing 0x-prefixed hex strings.
use serde::de::{self, Visitor}; use serde::de::{self, Visitor};
use std::fmt; use std::fmt;
/// Encode `data` as a 0x-prefixed hex string.
pub fn encode<T: AsRef<[u8]>>(data: T) -> String { pub fn encode<T: AsRef<[u8]>>(data: T) -> String {
let hex = hex::encode(data); let hex = hex::encode(data);
let mut s = "0x".to_string(); let mut s = "0x".to_string();
@ -8,6 +11,15 @@ pub fn encode<T: AsRef<[u8]>>(data: T) -> String {
s s
} }
/// Decode `data` from a 0x-prefixed hex string.
pub fn decode(s: &str) -> Result<Vec<u8>, String> {
if s.starts_with("0x") {
hex::decode(&s[2..]).map_err(|e| format!("invalid hex: {:?}", e))
} else {
Err("hex must have 0x prefix".to_string())
}
}
pub struct PrefixedHexVisitor; pub struct PrefixedHexVisitor;
impl<'de> Visitor<'de> for PrefixedHexVisitor { impl<'de> Visitor<'de> for PrefixedHexVisitor {

View File

@ -1,2 +1,9 @@
pub mod quoted_u64; mod quoted_int;
pub mod bytes_4_hex;
pub mod hex;
pub mod quoted_u64_vec; pub mod quoted_u64_vec;
pub mod u32_hex;
pub mod u8_hex;
pub use quoted_int::{quoted_u32, quoted_u64, quoted_u8};

View File

@ -0,0 +1,144 @@
//! Formats some integer types using quotes.
//!
//! E.g., `1` serializes as `"1"`.
//!
//! Quotes can be optional during decoding.
use serde::{Deserializer, Serializer};
use serde_derive::{Deserialize, Serialize};
use std::convert::TryFrom;
use std::marker::PhantomData;
macro_rules! define_mod {
($int: ty, $visit_fn: ident) => {
/// Serde support for deserializing quoted integers.
///
/// Configurable so that quotes are either required or optional.
pub struct QuotedIntVisitor<T> {
require_quotes: bool,
_phantom: PhantomData<T>,
}
impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor<T>
where
T: From<$int> + Into<$int> + Copy + TryFrom<u64>,
{
type Value = T;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.require_quotes {
write!(formatter, "a quoted integer")
} else {
write!(formatter, "a quoted or unquoted integer")
}
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
s.parse::<$int>()
.map(T::from)
.map_err(serde::de::Error::custom)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if self.require_quotes {
Err(serde::de::Error::custom(
"received unquoted integer when quotes are required",
))
} else {
T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer"))
}
}
}
/// Wrapper type for requiring quotes on a `$int`-like type.
///
/// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested
/// inside types like `Option`, `Result` and `Vec`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)]
#[serde(transparent)]
pub struct Quoted<T>
where
T: From<$int> + Into<$int> + Copy + TryFrom<u64>,
{
#[serde(with = "require_quotes")]
pub value: T,
}
/// Serialize with quotes.
pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: From<$int> + Into<$int> + Copy,
{
let v: $int = (*value).into();
serializer.serialize_str(&format!("{}", v))
}
/// Deserialize with or without quotes.
pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: From<$int> + Into<$int> + Copy + TryFrom<u64>,
{
deserializer.deserialize_any(QuotedIntVisitor {
require_quotes: false,
_phantom: PhantomData,
})
}
/// Requires quotes when deserializing.
///
/// Usage: `#[serde(with = "quoted_u64::require_quotes")]`.
pub mod require_quotes {
pub use super::serialize;
use super::*;
pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: From<$int> + Into<$int> + Copy + TryFrom<u64>,
{
deserializer.deserialize_any(QuotedIntVisitor {
require_quotes: true,
_phantom: PhantomData,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn require_quotes() {
let x = serde_json::from_str::<Quoted<$int>>("\"8\"").unwrap();
assert_eq!(x.value, 8);
serde_json::from_str::<Quoted<$int>>("8").unwrap_err();
}
}
};
}
pub mod quoted_u8 {
use super::*;
define_mod!(u8, visit_u8);
}
pub mod quoted_u32 {
use super::*;
define_mod!(u32, visit_u32);
}
pub mod quoted_u64 {
use super::*;
define_mod!(u64, visit_u64);
}

View File

@ -1,115 +0,0 @@
use serde::{Deserializer, Serializer};
use serde_derive::{Deserialize, Serialize};
use std::marker::PhantomData;
/// Serde support for deserializing quoted integers.
///
/// Configurable so that quotes are either required or optional.
pub struct QuotedIntVisitor<T> {
require_quotes: bool,
_phantom: PhantomData<T>,
}
impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor<T>
where
T: From<u64> + Into<u64> + Copy,
{
type Value = T;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.require_quotes {
write!(formatter, "a quoted integer")
} else {
write!(formatter, "a quoted or unquoted integer")
}
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
s.parse::<u64>()
.map(T::from)
.map_err(serde::de::Error::custom)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if self.require_quotes {
Err(serde::de::Error::custom(
"received unquoted integer when quotes are required",
))
} else {
Ok(T::from(v))
}
}
}
/// Wrapper type for requiring quotes on a `u64`-like type.
///
/// Unlike using `serde(with = "quoted_u64::require_quotes")` this is composable, and can be nested
/// inside types like `Option`, `Result` and `Vec`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)]
#[serde(transparent)]
pub struct Quoted<T>
where
T: From<u64> + Into<u64> + Copy,
{
#[serde(with = "require_quotes")]
pub value: T,
}
/// Serialize with quotes.
pub fn serialize<S, T>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: From<u64> + Into<u64> + Copy,
{
let v: u64 = (*value).into();
serializer.serialize_str(&format!("{}", v))
}
/// Deserialize with or without quotes.
pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: From<u64> + Into<u64> + Copy,
{
deserializer.deserialize_any(QuotedIntVisitor {
require_quotes: false,
_phantom: PhantomData,
})
}
/// Requires quotes when deserializing.
///
/// Usage: `#[serde(with = "quoted_u64::require_quotes")]`.
pub mod require_quotes {
pub use super::serialize;
use super::*;
pub fn deserialize<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: From<u64> + Into<u64> + Copy,
{
deserializer.deserialize_any(QuotedIntVisitor {
require_quotes: true,
_phantom: PhantomData,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn require_quotes() {
let x = serde_json::from_str::<Quoted<u64>>("\"8\"").unwrap();
assert_eq!(x.value, 8);
serde_json::from_str::<Quoted<u64>>("8").unwrap_err();
}
}

Some files were not shown because too many files have changed in this diff Show More