Merge remote-tracking branch 'origin/master' into spec-v0.12

This commit is contained in:
Michael Sproul 2020-06-09 18:34:44 +10:00
commit 7ce9a252a4
No known key found for this signature in database
GPG Key ID: 77B1309D2E54E914
82 changed files with 2913 additions and 1847 deletions

261
Cargo.lock generated
View File

@ -178,6 +178,12 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "assert_approx_eq"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c07dab4369547dbe5114677b33fbbf724971019f3818172d59a97a61c774ffd"
[[package]] [[package]]
name = "assert_matches" name = "assert_matches"
version = "1.3.0" version = "1.3.0"
@ -196,6 +202,12 @@ dependencies = [
"webpki-roots 0.19.0", "webpki-roots 0.19.0",
] ]
[[package]]
name = "atomic-option"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0db678acb667b525ac40a324fc5f7d3390e29239b31c7327bb8157f5b4fff593"
[[package]] [[package]]
name = "atty" name = "atty"
version = "0.2.14" version = "0.2.14"
@ -266,6 +278,7 @@ version = "0.1.2"
dependencies = [ dependencies = [
"bitvec", "bitvec",
"bls", "bls",
"bus",
"environment", "environment",
"eth1", "eth1",
"eth2_config", "eth2_config",
@ -279,7 +292,7 @@ dependencies = [
"lazy_static", "lazy_static",
"lighthouse_metrics", "lighthouse_metrics",
"log 0.4.8", "log 0.4.8",
"lru", "lru 0.5.1",
"merkle_proof", "merkle_proof",
"operation_pool", "operation_pool",
"parking_lot 0.10.2", "parking_lot 0.10.2",
@ -468,9 +481,21 @@ dependencies = [
[[package]] [[package]]
name = "bumpalo" name = "bumpalo"
version = "3.3.0" version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5356f1d23ee24a1f785a56d1d1a5f0fd5b0f6a0c0fb2412ce11da71649ab78f6" checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820"
[[package]]
name = "bus"
version = "2.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1e66e1779f5b1440f1a58220ba3b3ded4427175f0a9fb8d7066521f8b4e8f2b"
dependencies = [
"atomic-option",
"crossbeam-channel",
"num_cpus",
"parking_lot_core 0.7.2",
]
[[package]] [[package]]
name = "byte-slice-cast" name = "byte-slice-cast"
@ -598,9 +623,9 @@ dependencies = [
[[package]] [[package]]
name = "clear_on_drop" name = "clear_on_drop"
version = "0.2.3" version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97276801e127ffb46b66ce23f35cc96bd454fa311294bced4bbace7baa8b1d17" checksum = "c9cc5db465b294c3fa986d5bbb0f3017cd850bff6dd6c52f9ccff8b4d21b7b08"
dependencies = [ dependencies = [
"cc", "cc",
] ]
@ -610,6 +635,7 @@ name = "client"
version = "0.1.2" version = "0.1.2"
dependencies = [ dependencies = [
"beacon_chain", "beacon_chain",
"bus",
"dirs", "dirs",
"environment", "environment",
"error-chain", "error-chain",
@ -653,6 +679,15 @@ dependencies = [
"bitflags 1.2.1", "bitflags 1.2.1",
] ]
[[package]]
name = "cmake"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e56268c17a6248366d66d4a47a3381369d068cce8409bb1716ed77ea32163bb"
dependencies = [
"cc",
]
[[package]] [[package]]
name = "colored" name = "colored"
version = "1.9.3" version = "1.9.3"
@ -827,9 +862,9 @@ dependencies = [
[[package]] [[package]]
name = "crossbeam-queue" name = "crossbeam-queue"
version = "0.2.1" version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db" checksum = "ab6bffe714b6bb07e42f201352c34f51fefd355ace793f9e638ebd52d23f98d2"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"crossbeam-utils", "crossbeam-utils",
@ -906,14 +941,14 @@ dependencies = [
[[package]] [[package]]
name = "curve25519-dalek" name = "curve25519-dalek"
version = "2.0.0" version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5"
dependencies = [ dependencies = [
"byteorder", "byteorder",
"digest", "digest",
"rand_core 0.5.1", "rand_core 0.5.1",
"subtle 2.2.2", "subtle 2.2.3",
"zeroize", "zeroize",
] ]
@ -1161,7 +1196,10 @@ dependencies = [
"env_logger", "env_logger",
"eth2_config", "eth2_config",
"eth2_testnet_config", "eth2_testnet_config",
"exit-future",
"futures 0.3.5", "futures 0.3.5",
"lazy_static",
"lighthouse_metrics",
"logging", "logging",
"parking_lot 0.10.2", "parking_lot 0.10.2",
"slog", "slog",
@ -1231,10 +1269,12 @@ dependencies = [
"base64 0.12.1", "base64 0.12.1",
"dirs", "dirs",
"discv5", "discv5",
"environment",
"error-chain", "error-chain",
"eth2_ssz", "eth2_ssz",
"eth2_ssz_derive", "eth2_ssz_derive",
"eth2_ssz_types", "eth2_ssz_types",
"exit-future",
"fnv", "fnv",
"futures 0.3.5", "futures 0.3.5",
"hashset_delay", "hashset_delay",
@ -1243,7 +1283,7 @@ dependencies = [
"libp2p", "libp2p",
"libp2p-tcp", "libp2p-tcp",
"lighthouse_metrics", "lighthouse_metrics",
"lru", "lru 0.5.1",
"parking_lot 0.10.2", "parking_lot 0.10.2",
"serde", "serde",
"serde_derive", "serde_derive",
@ -1367,7 +1407,7 @@ dependencies = [
name = "eth2_testnet_config" name = "eth2_testnet_config"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"eth2-libp2p", "enr",
"eth2_ssz", "eth2_ssz",
"reqwest", "reqwest",
"serde", "serde",
@ -2007,9 +2047,9 @@ dependencies = [
[[package]] [[package]]
name = "hyper" name = "hyper"
version = "0.13.5" version = "0.13.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96816e1d921eca64d208a85aab4f7798455a8e34229ee5a88c935bdee1b78b14" checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f"
dependencies = [ dependencies = [
"bytes 0.5.4", "bytes 0.5.4",
"futures-channel", "futures-channel",
@ -2021,8 +2061,8 @@ dependencies = [
"httparse", "httparse",
"itoa", "itoa",
"log 0.4.8", "log 0.4.8",
"net2",
"pin-project", "pin-project",
"socket2",
"time 0.1.43", "time 0.1.43",
"tokio 0.2.21", "tokio 0.2.21",
"tower-service", "tower-service",
@ -2049,7 +2089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa" checksum = "3adcd308402b9553630734e9c36b77a7e48b3821251ca2493e8cd596763aafaa"
dependencies = [ dependencies = [
"bytes 0.5.4", "bytes 0.5.4",
"hyper 0.13.5", "hyper 0.13.6",
"native-tls", "native-tls",
"tokio 0.2.21", "tokio 0.2.21",
"tokio-tls 0.3.1", "tokio-tls 0.3.1",
@ -2106,9 +2146,9 @@ dependencies = [
[[package]] [[package]]
name = "indexmap" name = "indexmap"
version = "1.3.2" version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe"
dependencies = [ dependencies = [
"autocfg 1.0.0", "autocfg 1.0.0",
] ]
@ -2169,18 +2209,18 @@ checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
[[package]] [[package]]
name = "js-sys" name = "js-sys"
version = "0.3.39" version = "0.3.40"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa5a448de267e7358beaf4a5d849518fe9a0c13fce7afd44b06e68550e5562a7" checksum = "ce10c23ad2ea25ceca0093bd3192229da4c5b3c0f2de499c1ecac0d98d452177"
dependencies = [ dependencies = [
"wasm-bindgen", "wasm-bindgen",
] ]
[[package]] [[package]]
name = "jsonrpc-core" name = "jsonrpc-core"
version = "14.1.0" version = "14.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25525f6002338fb4debb5167a89a0b47f727a5a48418417545ad3429758b7fec" checksum = "a0747307121ffb9703afd93afbd0fb4f854c38fb873f2c8b90e0e902f27c7b62"
dependencies = [ dependencies = [
"futures 0.1.29", "futures 0.1.29",
"log 0.4.8", "log 0.4.8",
@ -2259,9 +2299,9 @@ dependencies = [
[[package]] [[package]]
name = "leveldb" name = "leveldb"
version = "0.8.4" version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8438a36a31c982ac399c4477d7e3c62cc7a6bf91bb6f42837b7e1033359fcbad" checksum = "a5a2fa830c44ac4762564389a7efe22688a469c8d7b71dd11da2e35c33ae96c2"
dependencies = [ dependencies = [
"db-key", "db-key",
"leveldb-sys", "leveldb-sys",
@ -2270,24 +2310,26 @@ dependencies = [
[[package]] [[package]]
name = "leveldb-sys" name = "leveldb-sys"
version = "2.0.5" version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71f46429bb70612c3e939aaeed27ffd31a24a773d21728a1a426e4089d6778d2" checksum = "2f4abc211bb716f076618bca48aaae128f6feb326195608f40f41a9cdbedc502"
dependencies = [ dependencies = [
"cmake",
"libc", "libc",
"num_cpus",
] ]
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.70" version = "0.2.71"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3baa92041a6fec78c687fa0cc2b3fae8884f743d672cf551bed1d6dac6988d0f" checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49"
[[package]] [[package]]
name = "libflate" name = "libflate"
version = "1.0.0" version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1fbe6b967a94346446d37ace319ae85be7eca261bb8149325811ac435d35d64" checksum = "784f4ec5908a9d7f4e53658906386667e8b02e9389a47cfebf45d324ba9e8d25"
dependencies = [ dependencies = [
"adler32", "adler32",
"crc32fast", "crc32fast",
@ -2405,7 +2447,7 @@ dependencies = [
"libp2p-core", "libp2p-core",
"libp2p-swarm", "libp2p-swarm",
"log 0.4.8", "log 0.4.8",
"lru", "lru 0.4.3",
"prost", "prost",
"prost-build", "prost-build",
"rand 0.7.3", "rand 0.7.3",
@ -2575,7 +2617,7 @@ dependencies = [
"hmac-drbg", "hmac-drbg",
"rand 0.7.3", "rand 0.7.3",
"sha2", "sha2",
"subtle 2.2.2", "subtle 2.2.3",
"typenum", "typenum",
] ]
@ -2679,9 +2721,18 @@ dependencies = [
[[package]] [[package]]
name = "lru" name = "lru"
version = "0.4.5" version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e488db3a9e108382265a30764f43cfc87517322e5d04ae0603b32a33461dca3" checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237"
dependencies = [
"hashbrown",
]
[[package]]
name = "lru"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28e0c685219cd60e49a2796bba7e4fe6523e10daca4fd721e84e7f905093d60c"
dependencies = [ dependencies = [
"hashbrown", "hashbrown",
] ]
@ -2933,10 +2984,13 @@ dependencies = [
name = "network" name = "network"
version = "0.1.2" version = "0.1.2"
dependencies = [ dependencies = [
"assert_approx_eq",
"beacon_chain", "beacon_chain",
"environment",
"error-chain", "error-chain",
"eth2-libp2p", "eth2-libp2p",
"eth2_ssz", "eth2_ssz",
"exit-future",
"fnv", "fnv",
"futures 0.3.5", "futures 0.3.5",
"genesis", "genesis",
@ -3276,18 +3330,18 @@ dependencies = [
[[package]] [[package]]
name = "pin-project" name = "pin-project"
version = "0.4.17" version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edc93aeee735e60ecb40cf740eb319ff23eab1c5748abfdb5c180e4ce49f7791" checksum = "ba3a1acf4a3e70849f8a673497ef984f043f95d2d8252dcdf74d54e6a1e47e8a"
dependencies = [ dependencies = [
"pin-project-internal", "pin-project-internal",
] ]
[[package]] [[package]]
name = "pin-project-internal" name = "pin-project-internal"
version = "0.4.17" version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" checksum = "194e88048b71a3e02eb4ee36a6995fed9b8236c11a7bb9f7247a9d9835b3f265"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -3296,9 +3350,9 @@ dependencies = [
[[package]] [[package]]
name = "pin-project-lite" name = "pin-project-lite"
version = "0.1.5" version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715"
[[package]] [[package]]
name = "pin-utils" name = "pin-utils"
@ -3320,9 +3374,9 @@ checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e"
[[package]] [[package]]
name = "plotters" name = "plotters"
version = "0.2.14" version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9b1d9ca091d370ea3a78d5619145d1b59426ab0c9eedbad2514a4cee08bf389" checksum = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb"
dependencies = [ dependencies = [
"js-sys", "js-sys",
"num-traits", "num-traits",
@ -3363,9 +3417,9 @@ checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.17" version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1502d12e458c49a4c9cbff560d0fe0060c252bc29799ed94ca2ed4bb665a0101" checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa"
dependencies = [ dependencies = [
"unicode-xid", "unicode-xid",
] ]
@ -3715,9 +3769,9 @@ dependencies = [
[[package]] [[package]]
name = "regex" name = "regex"
version = "1.3.7" version = "1.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
@ -3736,9 +3790,9 @@ dependencies = [
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.6.17" version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
[[package]] [[package]]
name = "remote_beacon_node" name = "remote_beacon_node"
@ -3769,18 +3823,18 @@ dependencies = [
[[package]] [[package]]
name = "reqwest" name = "reqwest"
version = "0.10.4" version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b81e49ddec5109a9dcfc5f2a317ff53377c915e9ae9d4f2fb50914b85614e2" checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680"
dependencies = [ dependencies = [
"base64 0.11.0", "base64 0.12.1",
"bytes 0.5.4", "bytes 0.5.4",
"encoding_rs", "encoding_rs",
"futures-core", "futures-core",
"futures-util", "futures-util",
"http 0.2.1", "http 0.2.1",
"http-body 0.3.1", "http-body 0.3.1",
"hyper 0.13.5", "hyper 0.13.6",
"hyper-tls 0.4.1", "hyper-tls 0.4.1",
"js-sys", "js-sys",
"lazy_static", "lazy_static",
@ -3793,7 +3847,6 @@ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
"serde_urlencoded", "serde_urlencoded",
"time 0.1.43",
"tokio 0.2.21", "tokio 0.2.21",
"tokio-tls 0.3.1", "tokio-tls 0.3.1",
"url 2.1.1", "url 2.1.1",
@ -3810,6 +3863,8 @@ dependencies = [
"assert_matches", "assert_matches",
"beacon_chain", "beacon_chain",
"bls", "bls",
"bus",
"environment",
"eth2-libp2p", "eth2-libp2p",
"eth2_config", "eth2_config",
"eth2_ssz", "eth2_ssz",
@ -3817,7 +3872,7 @@ dependencies = [
"futures 0.3.5", "futures 0.3.5",
"hex 0.4.2", "hex 0.4.2",
"http 0.2.1", "http 0.2.1",
"hyper 0.13.5", "hyper 0.13.6",
"lazy_static", "lazy_static",
"lighthouse_metrics", "lighthouse_metrics",
"network", "network",
@ -3839,6 +3894,7 @@ dependencies = [
"tokio 0.2.21", "tokio 0.2.21",
"tree_hash", "tree_hash",
"types", "types",
"uhttp_sse",
"url 2.1.1", "url 2.1.1",
"version", "version",
] ]
@ -3990,9 +4046,9 @@ dependencies = [
[[package]] [[package]]
name = "ryu" name = "ryu"
version = "1.0.4" version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
[[package]] [[package]]
name = "safe_arith" name = "safe_arith"
@ -4124,18 +4180,18 @@ checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.110" version = "1.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99e7b308464d16b56eba9964e4972a3eee817760ab60d88c3f86e1fecb08204c" checksum = "c9124df5b40cbd380080b2cc6ab894c040a3070d995f5c9dc77e18c34a8ae37d"
dependencies = [ dependencies = [
"serde_derive", "serde_derive",
] ]
[[package]] [[package]]
name = "serde_derive" name = "serde_derive"
version = "1.0.110" version = "1.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "818fbf6bfa9a42d3bfcaca148547aa00c7b915bec71d1757aa2d44ca68771984" checksum = "3f2c3ac8e6ca1e9c80b8be1023940162bf81ae3cffbb1809474152f2ce1eb250"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -4369,9 +4425,9 @@ dependencies = [
[[package]] [[package]]
name = "slog-term" name = "slog-term"
version = "2.5.0" version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "124501187c410b6a46fe8a47a48435ae462fae4e02d03c558d358f40b17308cb" checksum = "bab1d807cf71129b05ce36914e1dbb6fbfbdecaf686301cb457f4fa967f9f5b6"
dependencies = [ dependencies = [
"atty", "atty",
"chrono", "chrono",
@ -4465,7 +4521,7 @@ dependencies = [
"ring", "ring",
"rustc_version", "rustc_version",
"sha2", "sha2",
"subtle 2.2.2", "subtle 2.2.3",
"x25519-dalek", "x25519-dalek",
] ]
@ -4509,9 +4565,12 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
[[package]] [[package]]
name = "standback" name = "standback"
version = "0.2.8" version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47e4b8c631c998468961a9ea159f064c5c8499b95b5e4a34b77849d45949d540" checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246"
dependencies = [
"version_check 0.9.2",
]
[[package]] [[package]]
name = "state_processing" name = "state_processing"
@ -4618,7 +4677,7 @@ dependencies = [
"lazy_static", "lazy_static",
"leveldb", "leveldb",
"lighthouse_metrics", "lighthouse_metrics",
"lru", "lru 0.5.1",
"parking_lot 0.10.2", "parking_lot 0.10.2",
"rayon", "rayon",
"serde", "serde",
@ -4663,9 +4722,9 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee"
[[package]] [[package]]
name = "subtle" name = "subtle"
version = "2.2.2" version = "2.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" checksum = "502d53007c02d7605a05df1c1a73ee436952781653da5d0bf57ad608f66932c1"
[[package]] [[package]]
name = "swap_or_not_shuffle" name = "swap_or_not_shuffle"
@ -4680,9 +4739,9 @@ dependencies = [
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.25" version = "1.0.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f14a640819f79b72a710c0be059dce779f9339ae046c8bef12c361d56702146f" checksum = "93a56fabc59dce20fe48b6c832cc249c713e7ed88fa28b0ee0a3bfcaae5fe4e2"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -4855,6 +4914,7 @@ name = "timer"
version = "0.1.2" version = "0.1.2"
dependencies = [ dependencies = [
"beacon_chain", "beacon_chain",
"environment",
"futures 0.3.5", "futures 0.3.5",
"parking_lot 0.10.2", "parking_lot 0.10.2",
"slog", "slog",
@ -4899,9 +4959,9 @@ dependencies = [
[[package]] [[package]]
name = "tinytemplate" name = "tinytemplate"
version = "1.0.4" version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45e4bc5ac99433e0dcb8b9f309dd271a165ae37dde129b9e0ce1bfdd8bfe4891" checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f"
dependencies = [ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
@ -5358,6 +5418,12 @@ dependencies = [
"tree_hash_derive", "tree_hash_derive",
] ]
[[package]]
name = "uhttp_sse"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6ff93345ba2206230b1bb1aa3ece1a63dd9443b7531024575d16a0680a59444"
[[package]] [[package]]
name = "uint" name = "uint"
version = "0.8.3" version = "0.8.3"
@ -5548,9 +5614,9 @@ dependencies = [
[[package]] [[package]]
name = "vcpkg" name = "vcpkg"
version = "0.2.8" version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" checksum = "55d1e41d56121e07f1e223db0a4def204e45c85425f6a16d462fd07c8d10d74c"
[[package]] [[package]]
name = "vec_map" name = "vec_map"
@ -5623,9 +5689,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]] [[package]]
name = "wasm-bindgen" name = "wasm-bindgen"
version = "0.2.62" version = "0.2.63"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3c7d40d09cdbf0f4895ae58cf57d92e1e57a9dd8ed2e8390514b54a47cc5551" checksum = "4c2dc4aa152834bc334f506c1a06b866416a8b6697d5c9f75b9a689c8486def0"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"serde", "serde",
@ -5635,9 +5701,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-backend" name = "wasm-bindgen-backend"
version = "0.2.62" version = "0.2.63"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3972e137ebf830900db522d6c8fd74d1900dcfc733462e9a12e942b00b4ac94" checksum = "ded84f06e0ed21499f6184df0e0cb3494727b0c5da89534e0fcc55c51d812101"
dependencies = [ dependencies = [
"bumpalo", "bumpalo",
"lazy_static", "lazy_static",
@ -5650,9 +5716,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-futures" name = "wasm-bindgen-futures"
version = "0.4.12" version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a369c5e1dfb7569e14d62af4da642a3cbc2f9a3652fe586e26ac22222aa4b04" checksum = "64487204d863f109eb77e8462189d111f27cb5712cc9fdb3461297a76963a2f6"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"js-sys", "js-sys",
@ -5662,9 +5728,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-macro" name = "wasm-bindgen-macro"
version = "0.2.62" version = "0.2.63"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2cd85aa2c579e8892442954685f0d801f9129de24fa2136b2c6a539c76b65776" checksum = "838e423688dac18d73e31edce74ddfac468e37b1506ad163ffaf0a46f703ffe3"
dependencies = [ dependencies = [
"quote", "quote",
"wasm-bindgen-macro-support", "wasm-bindgen-macro-support",
@ -5672,9 +5738,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-macro-support" name = "wasm-bindgen-macro-support"
version = "0.2.62" version = "0.2.63"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eb197bd3a47553334907ffd2f16507b4f4f01bbec3ac921a7719e0decdfe72a" checksum = "3156052d8ec77142051a533cdd686cba889537b213f948cd1d20869926e68e92"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -5685,15 +5751,15 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-shared" name = "wasm-bindgen-shared"
version = "0.2.62" version = "0.2.63"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a91c2916119c17a8e316507afaaa2dd94b47646048014bbdf6bef098c1bb58ad" checksum = "c9ba19973a58daf4db6f352eda73dc0e289493cd29fb2632eb172085b6521acd"
[[package]] [[package]]
name = "wasm-bindgen-test" name = "wasm-bindgen-test"
version = "0.3.12" version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd8e9dad8040e378f0696b017570c6bc929aac373180e06b3d67ac5059c52da3" checksum = "0f0dfda4d3b3f8acbc3c291b09208081c203af457fb14a229783b06e2f128aa7"
dependencies = [ dependencies = [
"console_error_panic_hook", "console_error_panic_hook",
"js-sys", "js-sys",
@ -5705,9 +5771,9 @@ dependencies = [
[[package]] [[package]]
name = "wasm-bindgen-test-macro" name = "wasm-bindgen-test-macro"
version = "0.3.12" version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c358c8d2507c1bae25efa069e62ea907aa28700b25c8c33dafb0b15ba4603627" checksum = "2c2e18093f11c19ca4e188c177fecc7c372304c311189f12c2f9bea5b7324ac7"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -5731,9 +5797,9 @@ dependencies = [
[[package]] [[package]]
name = "web-sys" name = "web-sys"
version = "0.3.39" version = "0.3.40"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8bc359e5dd3b46cb9687a051d50a2fdd228e4ba7cf6fcf861a5365c3d671a642" checksum = "7b72fe77fd39e4bd3eaa4412fd299a0be6b3dfe9d2597e2f1c20beb968f41d17"
dependencies = [ dependencies = [
"js-sys", "js-sys",
"wasm-bindgen", "wasm-bindgen",
@ -5742,7 +5808,7 @@ dependencies = [
[[package]] [[package]]
name = "web3" name = "web3"
version = "0.11.0" version = "0.11.0"
source = "git+https://github.com/tomusdrw/rust-web3#a3e5a5315f0a6bf907183322844f7d6650fa8da7" source = "git+https://github.com/tomusdrw/rust-web3#69d5746f124033dee922d7d36acef9321c1df0b0"
dependencies = [ dependencies = [
"arrayvec 0.5.1", "arrayvec 0.5.1",
"base64 0.12.1", "base64 0.12.1",
@ -5825,6 +5891,7 @@ dependencies = [
name = "websocket_server" name = "websocket_server"
version = "0.1.2" version = "0.1.2"
dependencies = [ dependencies = [
"environment",
"futures 0.3.5", "futures 0.3.5",
"serde", "serde",
"serde_derive", "serde_derive",
@ -5889,9 +5956,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]] [[package]]
name = "winreg" name = "winreg"
version = "0.6.2" version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69"
dependencies = [ dependencies = [
"winapi 0.3.8", "winapi 0.3.8",
] ]
@ -5937,9 +6004,9 @@ dependencies = [
[[package]] [[package]]
name = "yaml-rust" name = "yaml-rust"
version = "0.4.3" version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65923dd1784f44da1d2c3dbbc5e822045628c590ba72123e1c73d3c230c4434d" checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d"
dependencies = [ dependencies = [
"linked-hash-map", "linked-hash-map",
] ]

View File

@ -1,4 +1,5 @@
FROM rust:1.43.1 AS builder FROM rust:1.43.1 AS builder
RUN apt-get update && apt-get install -y cmake
COPY . lighthouse COPY . lighthouse
RUN cd lighthouse && make RUN cd lighthouse && make
RUN cd lighthouse && cargo install --path lcli --locked RUN cd lighthouse && cargo install --path lcli --locked

View File

@ -31,5 +31,5 @@ eth2_wallet = { path = "../crypto/eth2_wallet" }
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
rand = "0.7.2" rand = "0.7.2"
validator_dir = { path = "../common/validator_dir", features = ["unencrypted_keys"] } validator_dir = { path = "../common/validator_dir", features = ["unencrypted_keys"] }
tokio = {version = "0.2.20", features = ["full"]} tokio = { version = "0.2.21", features = ["full"] }
eth2_keystore = { path = "../crypto/eth2_keystore" } eth2_keystore = { path = "../crypto/eth2_keystore" }

View File

@ -8,8 +8,9 @@ use slog::{info, Logger};
use std::path::PathBuf; use std::path::PathBuf;
use tokio::time::{delay_until, Duration, Instant}; use tokio::time::{delay_until, Duration, Instant};
use types::EthSpec; use types::EthSpec;
use validator_dir::Manager as ValidatorManager; use validator_dir::{Eth1DepositData, Manager as ValidatorManager, ValidatorDir};
use web3::{ use web3::{
transports::Http,
transports::Ipc, transports::Ipc,
types::{Address, SyncInfo, SyncState, TransactionRequest, U256}, types::{Address, SyncInfo, SyncState, TransactionRequest, U256},
Transport, Web3, Transport, Web3,
@ -18,6 +19,7 @@ use web3::{
pub const CMD: &str = "deposit"; pub const CMD: &str = "deposit";
pub const VALIDATOR_FLAG: &str = "validator"; pub const VALIDATOR_FLAG: &str = "validator";
pub const ETH1_IPC_FLAG: &str = "eth1-ipc"; pub const ETH1_IPC_FLAG: &str = "eth1-ipc";
pub const ETH1_HTTP_FLAG: &str = "eth1-http";
pub const FROM_ADDRESS_FLAG: &str = "from-address"; pub const FROM_ADDRESS_FLAG: &str = "from-address";
const GWEI: u64 = 1_000_000_000; const GWEI: u64 = 1_000_000_000;
@ -64,7 +66,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.value_name("ETH1_IPC_PATH") .value_name("ETH1_IPC_PATH")
.help("Path to an Eth1 JSON-RPC IPC endpoint") .help("Path to an Eth1 JSON-RPC IPC endpoint")
.takes_value(true) .takes_value(true)
.required(true), .required(false),
)
.arg(
Arg::with_name(ETH1_HTTP_FLAG)
.long(ETH1_HTTP_FLAG)
.value_name("ETH1_HTTP_URL")
.help("URL to an Eth1 JSON-RPC endpoint")
.takes_value(true)
.required(false),
) )
.arg( .arg(
Arg::with_name(FROM_ADDRESS_FLAG) Arg::with_name(FROM_ADDRESS_FLAG)
@ -79,11 +89,65 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
) )
} }
fn send_deposit_transactions<T1, T2: 'static>(
mut env: Environment<T1>,
log: Logger,
eth1_deposit_datas: Vec<(ValidatorDir, Eth1DepositData)>,
from_address: Address,
deposit_contract: Address,
transport: T2,
) -> Result<(), String>
where
T1: EthSpec,
T2: Transport + std::marker::Send,
<T2 as web3::Transport>::Out: std::marker::Send,
{
let web3 = Web3::new(transport);
let deposits_fut = async {
poll_until_synced(web3.clone(), log.clone()).await?;
for (mut validator_dir, eth1_deposit_data) in eth1_deposit_datas {
let tx_hash = web3
.eth()
.send_transaction(TransactionRequest {
from: from_address,
to: Some(deposit_contract),
gas: Some(DEPOSIT_GAS.into()),
gas_price: None,
value: Some(from_gwei(eth1_deposit_data.deposit_data.amount)),
data: Some(eth1_deposit_data.rlp.into()),
nonce: None,
condition: None,
})
.compat()
.await
.map_err(|e| format!("Failed to send transaction: {:?}", e))?;
info!(
log,
"Submitted deposit";
"tx_hash" => format!("{:?}", tx_hash),
);
validator_dir
.save_eth1_deposit_tx_hash(&format!("{:?}", tx_hash))
.map_err(|e| format!("Failed to save tx hash {:?} to disk: {:?}", tx_hash, e))?;
}
Ok::<(), String>(())
};
env.runtime().block_on(deposits_fut)?;
Ok(())
}
pub fn cli_run<T: EthSpec>( pub fn cli_run<T: EthSpec>(
matches: &ArgMatches<'_>, matches: &ArgMatches<'_>,
mut env: Environment<T>, mut env: Environment<T>,
) -> Result<(), String> { ) -> Result<(), String> {
let log = env.core_context().log; let log = env.core_context().log().clone();
let data_dir = clap_utils::parse_path_with_default_in_home_dir( let data_dir = clap_utils::parse_path_with_default_in_home_dir(
matches, matches,
@ -91,7 +155,8 @@ pub fn cli_run<T: EthSpec>(
PathBuf::new().join(".lighthouse").join("validators"), PathBuf::new().join(".lighthouse").join("validators"),
)?; )?;
let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?; let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?;
let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, ETH1_IPC_FLAG)?; let eth1_ipc_path: Option<PathBuf> = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?;
let eth1_http_url: Option<String> = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?;
let from_address: Address = clap_utils::parse_required(matches, FROM_ADDRESS_FLAG)?; let from_address: Address = clap_utils::parse_required(matches, FROM_ADDRESS_FLAG)?;
let manager = ValidatorManager::open(&data_dir) let manager = ValidatorManager::open(&data_dir)
@ -167,41 +232,40 @@ pub fn cli_run<T: EthSpec>(
return Err("Refusing to deposit to the zero address. Check testnet configuration.".into()); return Err("Refusing to deposit to the zero address. Check testnet configuration.".into());
} }
let (_event_loop_handle, transport) = match (eth1_ipc_path, eth1_http_url) {
Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; (Some(_), Some(_)) => Err(format!(
let web3 = Web3::new(transport); "error: Cannot supply both --{} and --{}",
ETH1_IPC_FLAG, ETH1_HTTP_FLAG
let deposits_fut = async { )),
poll_until_synced(web3.clone(), log.clone()).await?; (None, None) => Err(format!(
"error: Must supply one of --{} or --{}",
for (mut validator_dir, eth1_deposit_data) in eth1_deposit_datas { ETH1_IPC_FLAG, ETH1_HTTP_FLAG
let tx_hash = web3 )),
.eth() (Some(ipc_path), None) => {
.send_transaction(TransactionRequest { let (_event_loop_handle, ipc_transport) = Ipc::new(ipc_path)
from: from_address, .map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?;
to: Some(deposit_contract), send_deposit_transactions(
gas: Some(DEPOSIT_GAS.into()), env,
gas_price: None, log,
value: Some(from_gwei(eth1_deposit_data.deposit_data.amount)), eth1_deposit_datas,
data: Some(eth1_deposit_data.rlp.into()), from_address,
nonce: None, deposit_contract,
condition: None, ipc_transport,
}) )
.compat()
.await
.map_err(|e| format!("Failed to send transaction: {:?}", e))?;
validator_dir
.save_eth1_deposit_tx_hash(&format!("{:?}", tx_hash))
.map_err(|e| format!("Failed to save tx hash {:?} to disk: {:?}", tx_hash, e))?;
} }
(None, Some(http_url)) => {
Ok::<(), String>(()) let (_event_loop_handle, http_transport) = Http::new(http_url.as_str())
}; .map_err(|e| format!("Unable to connect to eth1 http RPC: {:?}", e))?;
send_deposit_transactions(
env.runtime().block_on(deposits_fut)?; env,
log,
Ok(()) eth1_deposit_datas,
from_address,
deposit_contract,
http_transport,
)
}
}
} }
/// Converts gwei to wei. /// Converts gwei to wei.

View File

@ -27,7 +27,7 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr
slog-term = "2.5.0" slog-term = "2.5.0"
slog-async = "2.5.0" slog-async = "2.5.0"
ctrlc = { version = "3.1.4", features = ["termination"] } ctrlc = { version = "3.1.4", features = ["termination"] }
tokio = {version = "0.2.20", features = ["time"] } tokio = { version = "0.2.21", features = ["time"] }
exit-future = "0.2.0" exit-future = "0.2.0"
env_logger = "0.7.1" env_logger = "0.7.1"
dirs = "2.0.2" dirs = "2.0.2"

View File

@ -33,7 +33,7 @@ eth2_ssz_derive = "0.1.0"
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
tree_hash = "0.1.0" tree_hash = "0.1.0"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
tokio = "0.2.20" tokio = "0.2.21"
eth1 = { path = "../eth1" } eth1 = { path = "../eth1" }
websocket_server = { path = "../websocket_server" } websocket_server = { path = "../websocket_server" }
futures = "0.3.5" futures = "0.3.5"
@ -41,12 +41,14 @@ genesis = { path = "../genesis" }
integer-sqrt = "0.1.3" integer-sqrt = "0.1.3"
rand = "0.7.3" rand = "0.7.3"
proto_array_fork_choice = { path = "../../consensus/proto_array_fork_choice" } proto_array_fork_choice = { path = "../../consensus/proto_array_fork_choice" }
lru = "0.4.3" lru = "0.5.1"
tempfile = "3.1.0" tempfile = "3.1.0"
bitvec = "0.17.4" bitvec = "0.17.4"
bls = { path = "../../crypto/bls" } bls = { path = "../../crypto/bls" }
safe_arith = { path = "../../consensus/safe_arith" } safe_arith = { path = "../../consensus/safe_arith" }
environment = { path = "../../lighthouse/environment" }
bus = "2.2.3"
[dev-dependencies] [dev-dependencies]
lazy_static = "1.4.0" lazy_static = "1.4.0"
environment = { path = "../../lighthouse/environment" }

View File

@ -1,4 +1,5 @@
use crate::metrics; use crate::metrics;
use environment::TaskExecutor;
use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
use eth2_hashing::hash; use eth2_hashing::hash;
use slog::{debug, error, trace, Logger}; use slog::{debug, error, trace, Logger};
@ -285,10 +286,8 @@ impl<T: EthSpec, S: Store<T>> CachingEth1Backend<T, S> {
} }
/// Starts the routine which connects to the external eth1 node and updates the caches. /// Starts the routine which connects to the external eth1 node and updates the caches.
pub fn start(&self, exit: tokio::sync::oneshot::Receiver<()>) { pub fn start(&self, handle: TaskExecutor) {
// don't need to spawn as a task is being spawned in auto_update HttpService::auto_update(self.core.clone(), handle);
// TODO: check if this is correct
HttpService::auto_update(self.core.clone(), exit);
} }
/// Instantiates `self` from an existing service. /// Instantiates `self` from an existing service.

View File

@ -1,6 +1,10 @@
use bus::Bus;
use parking_lot::Mutex;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use slog::{error, Logger};
use std::marker::PhantomData; use std::marker::PhantomData;
use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock}; use std::sync::Arc;
use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockHash};
pub use websocket_server::WebSocketSender; pub use websocket_server::WebSocketSender;
pub trait EventHandler<T: EthSpec>: Sized + Send + Sync { pub trait EventHandler<T: EthSpec>: Sized + Send + Sync {
@ -18,6 +22,80 @@ impl<T: EthSpec> EventHandler<T> for WebSocketSender<T> {
} }
} }
pub struct ServerSentEvents<T: EthSpec> {
// Bus<> is itself Sync + Send. We use Mutex<> here only because of the surrounding code does
// not enforce mutability statically (i.e. relies on interior mutability).
head_changed_queue: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
log: Logger,
_phantom: PhantomData<T>,
}
impl<T: EthSpec> ServerSentEvents<T> {
pub fn new(log: Logger) -> (Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>) {
let bus = Bus::new(T::slots_per_epoch() as usize);
let mutex = Mutex::new(bus);
let arc = Arc::new(mutex);
let this = Self {
head_changed_queue: arc.clone(),
log: log,
_phantom: PhantomData,
};
(this, arc)
}
}
impl<T: EthSpec> EventHandler<T> for ServerSentEvents<T> {
fn register(&self, kind: EventKind<T>) -> Result<(), String> {
match kind {
EventKind::BeaconHeadChanged {
current_head_beacon_block_root,
..
} => {
let mut guard = self.head_changed_queue.lock();
if let Err(_) = guard.try_broadcast(current_head_beacon_block_root.into()) {
error!(
self.log,
"Head change streaming queue full";
"dropped_change" => format!("{}", current_head_beacon_block_root),
);
}
Ok(())
}
_ => Ok(()),
}
}
}
// An event handler that pushes events to both the websockets handler and the SSE handler.
// Named after the unix `tee` command. Meant as a temporary solution before ditching WebSockets
// completely once SSE functions well enough.
pub struct TeeEventHandler<E: EthSpec> {
websockets_handler: WebSocketSender<E>,
sse_handler: ServerSentEvents<E>,
}
impl<E: EthSpec> TeeEventHandler<E> {
pub fn new(
log: Logger,
websockets_handler: WebSocketSender<E>,
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
let (sse_handler, bus) = ServerSentEvents::new(log);
let result = Self {
websockets_handler: websockets_handler,
sse_handler: sse_handler,
};
Ok((result, bus))
}
}
impl<E: EthSpec> EventHandler<E> for TeeEventHandler<E> {
fn register(&self, kind: EventKind<E>) -> Result<(), String> {
self.websockets_handler.register(kind.clone())?;
self.sse_handler.register(kind)?;
Ok(())
}
}
impl<T: EthSpec> EventHandler<T> for NullEventHandler<T> { impl<T: EthSpec> EventHandler<T> for NullEventHandler<T> {
fn register(&self, _kind: EventKind<T>) -> Result<(), String> { fn register(&self, _kind: EventKind<T>) -> Result<(), String> {
Ok(()) Ok(())
@ -30,7 +108,7 @@ impl<T: EthSpec> Default for NullEventHandler<T> {
} }
} }
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize, Clone)]
#[serde( #[serde(
bound = "T: EthSpec", bound = "T: EthSpec",
rename_all = "snake_case", rename_all = "snake_case",

View File

@ -28,7 +28,7 @@ error-chain = "0.12.2"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
slog-async = "2.5.0" slog-async = "2.5.0"
tokio = "0.2.20" tokio = "0.2.21"
dirs = "2.0.2" dirs = "2.0.2"
futures = "0.3.5" futures = "0.3.5"
reqwest = "0.10.4" reqwest = "0.10.4"
@ -40,3 +40,4 @@ eth2_ssz = "0.1.2"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
time = "0.2.16" time = "0.2.16"
bus = "2.2.3"

View File

@ -1,6 +1,7 @@
use crate::config::{ClientGenesis, Config as ClientConfig}; use crate::config::{ClientGenesis, Config as ClientConfig};
use crate::notifier::spawn_notifier; use crate::notifier::spawn_notifier;
use crate::Client; use crate::Client;
use beacon_chain::events::TeeEventHandler;
use beacon_chain::{ use beacon_chain::{
builder::{BeaconChainBuilder, Witness}, builder::{BeaconChainBuilder, Witness},
eth1_chain::{CachingEth1Backend, Eth1Chain}, eth1_chain::{CachingEth1Backend, Eth1Chain},
@ -9,20 +10,26 @@ use beacon_chain::{
store::{HotColdDB, MemoryStore, Store, StoreConfig}, store::{HotColdDB, MemoryStore, Store, StoreConfig},
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler, BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
}; };
use bus::Bus;
use environment::RuntimeContext; use environment::RuntimeContext;
use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth1::{Config as Eth1Config, Service as Eth1Service};
use eth2_config::Eth2Config; use eth2_config::Eth2Config;
use eth2_libp2p::NetworkGlobals; use eth2_libp2p::NetworkGlobals;
use genesis::{interop_genesis_state, Eth1GenesisService}; use genesis::{interop_genesis_state, Eth1GenesisService};
use network::{NetworkConfig, NetworkMessage, NetworkService}; use network::{NetworkConfig, NetworkMessage, NetworkService};
use parking_lot::Mutex;
use slog::info; use slog::info;
use ssz::Decode; use ssz::Decode;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use timer::spawn_timer;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use types::{test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec}; use types::{
test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec,
SignedBeaconBlockHash,
};
use websocket_server::{Config as WebSocketConfig, WebSocketSender}; use websocket_server::{Config as WebSocketConfig, WebSocketSender};
/// Interval between polling the eth1 node for genesis information. /// Interval between polling the eth1 node for genesis information.
@ -50,7 +57,6 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
beacon_chain_builder: Option<BeaconChainBuilder<T>>, beacon_chain_builder: Option<BeaconChainBuilder<T>>,
beacon_chain: Option<Arc<BeaconChain<T>>>, beacon_chain: Option<Arc<BeaconChain<T>>>,
eth1_service: Option<Eth1Service>, eth1_service: Option<Eth1Service>,
exit_channels: Vec<tokio::sync::oneshot::Sender<()>>,
event_handler: Option<T::EventHandler>, event_handler: Option<T::EventHandler>,
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>, network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>,
@ -84,7 +90,6 @@ where
beacon_chain_builder: None, beacon_chain_builder: None,
beacon_chain: None, beacon_chain: None,
eth1_service: None, eth1_service: None,
exit_channels: vec![],
event_handler: None, event_handler: None,
network_globals: None, network_globals: None,
network_send: None, network_send: None,
@ -132,7 +137,7 @@ where
.ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?; .ok_or_else(|| "beacon_chain_start_method requires a chain spec".to_string())?;
let builder = BeaconChainBuilder::new(eth_spec_instance) let builder = BeaconChainBuilder::new(eth_spec_instance)
.logger(context.log.clone()) .logger(context.log().clone())
.store(store) .store(store)
.store_migrator(store_migrator) .store_migrator(store_migrator)
.data_dir(data_dir) .data_dir(data_dir)
@ -150,7 +155,7 @@ where
// Alternatively, if there's a beacon chain in the database then always resume // Alternatively, if there's a beacon chain in the database then always resume
// using it. // using it.
let client_genesis = if client_genesis == ClientGenesis::FromStore && !chain_exists { let client_genesis = if client_genesis == ClientGenesis::FromStore && !chain_exists {
info!(context.log, "Defaulting to deposit contract genesis"); info!(context.log(), "Defaulting to deposit contract genesis");
ClientGenesis::DepositContract ClientGenesis::DepositContract
} else if chain_exists { } else if chain_exists {
@ -172,7 +177,7 @@ where
genesis_state_bytes, genesis_state_bytes,
} => { } => {
info!( info!(
context.log, context.log(),
"Starting from known genesis state"; "Starting from known genesis state";
); );
@ -183,14 +188,14 @@ where
} }
ClientGenesis::DepositContract => { ClientGenesis::DepositContract => {
info!( info!(
context.log, context.log(),
"Waiting for eth2 genesis from eth1"; "Waiting for eth2 genesis from eth1";
"eth1_endpoint" => &config.eth1.endpoint, "eth1_endpoint" => &config.eth1.endpoint,
"contract_deploy_block" => config.eth1.deposit_contract_deploy_block, "contract_deploy_block" => config.eth1.deposit_contract_deploy_block,
"deposit_contract" => &config.eth1.deposit_contract_address "deposit_contract" => &config.eth1.deposit_contract_address
); );
let genesis_service = Eth1GenesisService::new(config.eth1, context.log.clone()); let genesis_service = Eth1GenesisService::new(config.eth1, context.log().clone());
let genesis_state = genesis_service let genesis_state = genesis_service
.wait_for_genesis_state( .wait_for_genesis_state(
@ -223,19 +228,18 @@ where
.ok_or_else(|| "network requires a runtime_context")? .ok_or_else(|| "network requires a runtime_context")?
.clone(); .clone();
let (network_globals, network_send, network_exit) = let (network_globals, network_send) =
NetworkService::start(beacon_chain, config, &context.runtime_handle, context.log) NetworkService::start(beacon_chain, config, context.executor)
.map_err(|e| format!("Failed to start network: {:?}", e))?; .map_err(|e| format!("Failed to start network: {:?}", e))?;
self.network_globals = Some(network_globals); self.network_globals = Some(network_globals);
self.network_send = Some(network_send); self.network_send = Some(network_send);
self.exit_channels.push(network_exit);
Ok(self) Ok(self)
} }
/// Immediately starts the timer service. /// Immediately starts the timer service.
fn timer(mut self) -> Result<Self, String> { fn timer(self) -> Result<Self, String> {
let context = self let context = self
.runtime_context .runtime_context
.as_ref() .as_ref()
@ -251,13 +255,9 @@ where
.ok_or_else(|| "node timer requires a chain spec".to_string())? .ok_or_else(|| "node timer requires a chain spec".to_string())?
.milliseconds_per_slot; .milliseconds_per_slot;
let timer_exit = context spawn_timer(context.executor, beacon_chain, milliseconds_per_slot)
.runtime_handle
.enter(|| timer::spawn(beacon_chain, milliseconds_per_slot))
.map_err(|e| format!("Unable to start node timer: {}", e))?; .map_err(|e| format!("Unable to start node timer: {}", e))?;
self.exit_channels.push(timer_exit);
Ok(self) Ok(self)
} }
@ -266,6 +266,7 @@ where
mut self, mut self,
client_config: &ClientConfig, client_config: &ClientConfig,
eth2_config: &Eth2Config, eth2_config: &Eth2Config,
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
) -> Result<Self, String> { ) -> Result<Self, String> {
let beacon_chain = self let beacon_chain = self
.beacon_chain .beacon_chain
@ -290,32 +291,29 @@ where
network_chan: network_send, network_chan: network_send,
}; };
let log = context.log.clone(); let listening_addr = rest_api::start_server(
let (exit_channel, listening_addr) = context.runtime_handle.enter(|| { context.executor,
rest_api::start_server( &client_config.rest_api,
&client_config.rest_api, beacon_chain,
beacon_chain, network_info,
network_info, client_config
client_config .create_db_path()
.create_db_path() .map_err(|_| "unable to read data dir")?,
.map_err(|_| "unable to read data dir")?, client_config
client_config .create_freezer_db_path()
.create_freezer_db_path() .map_err(|_| "unable to read freezer DB dir")?,
.map_err(|_| "unable to read freezer DB dir")?, eth2_config.clone(),
eth2_config.clone(), events,
log, )
) .map_err(|e| format!("Failed to start HTTP API: {:?}", e))?;
.map_err(|e| format!("Failed to start HTTP API: {:?}", e))
})?;
self.exit_channels.push(exit_channel);
self.http_listen_addr = Some(listening_addr); self.http_listen_addr = Some(listening_addr);
Ok(self) Ok(self)
} }
/// Immediately starts the service that periodically logs information each slot. /// Immediately starts the service that periodically logs information each slot.
pub fn notifier(mut self) -> Result<Self, String> { pub fn notifier(self) -> Result<Self, String> {
let context = self let context = self
.runtime_context .runtime_context
.as_ref() .as_ref()
@ -335,19 +333,13 @@ where
.ok_or_else(|| "slot_notifier requires a chain spec".to_string())? .ok_or_else(|| "slot_notifier requires a chain spec".to_string())?
.milliseconds_per_slot; .milliseconds_per_slot;
let exit_channel = context spawn_notifier(
.runtime_handle context.executor,
.enter(|| { beacon_chain,
spawn_notifier( network_globals,
beacon_chain, milliseconds_per_slot,
network_globals, )
milliseconds_per_slot, .map_err(|e| format!("Unable to start slot notifier: {}", e))?;
context.log.clone(),
)
})
.map_err(|e| format!("Unable to start slot notifier: {}", e))?;
self.exit_channels.push(exit_channel);
Ok(self) Ok(self)
} }
@ -365,7 +357,6 @@ where
network_globals: self.network_globals, network_globals: self.network_globals,
http_listen_addr: self.http_listen_addr, http_listen_addr: self.http_listen_addr,
websocket_listen_addr: self.websocket_listen_addr, websocket_listen_addr: self.websocket_listen_addr,
_exit_channels: self.exit_channels,
} }
} }
} }
@ -436,22 +427,14 @@ where
.ok_or_else(|| "websocket_event_handler requires a runtime_context")? .ok_or_else(|| "websocket_event_handler requires a runtime_context")?
.service_context("ws".into()); .service_context("ws".into());
let (sender, exit_channel, listening_addr): ( let (sender, listening_addr): (WebSocketSender<TEthSpec>, Option<_>) = if config.enabled {
WebSocketSender<TEthSpec>, let (sender, listening_addr) =
Option<_>, websocket_server::start_server(context.executor, &config)?;
Option<_>, (sender, Some(listening_addr))
) = if config.enabled {
let (sender, exit, listening_addr) = context
.runtime_handle
.enter(|| websocket_server::start_server(&config, &context.log))?;
(sender, Some(exit), Some(listening_addr))
} else { } else {
(WebSocketSender::dummy(), None, None) (WebSocketSender::dummy(), None)
}; };
if let Some(channel) = exit_channel {
self.exit_channels.push(channel);
}
self.event_handler = Some(sender); self.event_handler = Some(sender);
self.websocket_listen_addr = listening_addr; self.websocket_listen_addr = listening_addr;
@ -459,6 +442,51 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
ClientBuilder<
Witness<
TStore,
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TeeEventHandler<TEthSpec>,
>,
>
where
TStore: Store<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec>,
TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static,
{
/// Specifies that the `BeaconChain` should publish events using the WebSocket server.
pub fn tee_event_handler(
mut self,
config: WebSocketConfig,
) -> Result<(Self, Arc<Mutex<Bus<SignedBeaconBlockHash>>>), String> {
let context = self
.runtime_context
.as_ref()
.ok_or_else(|| "websocket_event_handler requires a runtime_context")?
.service_context("ws".into());
let log = context.log().clone();
let (sender, listening_addr): (WebSocketSender<TEthSpec>, Option<_>) = if config.enabled {
let (sender, listening_addr) =
websocket_server::start_server(context.executor, &config)?;
(sender, Some(listening_addr))
} else {
(WebSocketSender::dummy(), None)
};
self.websocket_listen_addr = listening_addr;
let (tee_event_handler, bus) = TeeEventHandler::new(log, sender)?;
self.event_handler = Some(tee_event_handler);
Ok((self, bus))
}
}
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder< ClientBuilder<
Witness< Witness<
@ -494,7 +522,7 @@ where
.clone() .clone()
.ok_or_else(|| "disk_store requires a chain spec".to_string())?; .ok_or_else(|| "disk_store requires a chain spec".to_string())?;
let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log) let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone())
.map_err(|e| format!("Unable to open database: {:?}", e))?; .map_err(|e| format!("Unable to open database: {:?}", e))?;
self.store = Some(Arc::new(store)); self.store = Some(Arc::new(store));
Ok(self) Ok(self)
@ -555,7 +583,7 @@ where
let store = self.store.clone().ok_or_else(|| { let store = self.store.clone().ok_or_else(|| {
"background_migrator requires the store to be initialized".to_string() "background_migrator requires the store to be initialized".to_string()
})?; })?;
self.store_migrator = Some(BackgroundMigrator::new(store, context.log.clone())); self.store_migrator = Some(BackgroundMigrator::new(store, context.log().clone()));
Ok(self) Ok(self)
} }
} }
@ -617,25 +645,23 @@ where
&persisted, &persisted,
config.clone(), config.clone(),
store.clone(), store.clone(),
&context.log, &context.log().clone(),
) )
.map(|chain| chain.into_backend()) .map(|chain| chain.into_backend())
}) })
.unwrap_or_else(|| { .unwrap_or_else(|| {
Ok(CachingEth1Backend::new(config, context.log.clone(), store)) Ok(CachingEth1Backend::new(
config,
context.log().clone(),
store,
))
})? })?
}; };
self.eth1_service = None; self.eth1_service = None;
let exit = {
let (tx, rx) = tokio::sync::oneshot::channel();
self.exit_channels.push(tx);
rx
};
// Starts the service that connects to an eth1 node and periodically updates caches. // Starts the service that connects to an eth1 node and periodically updates caches.
context.runtime_handle.enter(|| backend.start(exit)); backend.start(context.executor);
self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend))); self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend)));

View File

@ -25,8 +25,6 @@ pub struct Client<T: BeaconChainTypes> {
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>, network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
http_listen_addr: Option<SocketAddr>, http_listen_addr: Option<SocketAddr>,
websocket_listen_addr: Option<SocketAddr>, websocket_listen_addr: Option<SocketAddr>,
/// Exit channels will complete/error when dropped, causing each service to exit gracefully.
_exit_channels: Vec<tokio::sync::oneshot::Sender<()>>,
} }
impl<T: BeaconChainTypes> Client<T> { impl<T: BeaconChainTypes> Client<T> {

View File

@ -23,11 +23,11 @@ const SPEEDO_OBSERVATIONS: usize = 4;
/// Spawns a notifier service which periodically logs information about the node. /// Spawns a notifier service which periodically logs information about the node.
pub fn spawn_notifier<T: BeaconChainTypes>( pub fn spawn_notifier<T: BeaconChainTypes>(
executor: environment::TaskExecutor,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network: Arc<NetworkGlobals<T::EthSpec>>, network: Arc<NetworkGlobals<T::EthSpec>>,
milliseconds_per_slot: u64, milliseconds_per_slot: u64,
log: slog::Logger, ) -> Result<(), String> {
) -> Result<tokio::sync::oneshot::Sender<()>, String> {
let slot_duration = Duration::from_millis(milliseconds_per_slot); let slot_duration = Duration::from_millis(milliseconds_per_slot);
let duration_to_next_slot = beacon_chain let duration_to_next_slot = beacon_chain
.slot_clock .slot_clock
@ -41,6 +41,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
let interval_duration = slot_duration; let interval_duration = slot_duration;
let speedo = Mutex::new(Speedo::default()); let speedo = Mutex::new(Speedo::default());
let log = executor.log().clone();
let mut interval = tokio::time::interval_at(start_instant, interval_duration); let mut interval = tokio::time::interval_at(start_instant, interval_duration);
let interval_future = async move { let interval_future = async move {
@ -163,12 +164,10 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
Ok::<(), ()>(()) Ok::<(), ()>(())
}; };
let (exit_signal, exit) = tokio::sync::oneshot::channel();
// run the notifier on the current executor // run the notifier on the current executor
tokio::spawn(futures::future::select(Box::pin(interval_future), exit)); executor.spawn(interval_future.unwrap_or_else(|_| ()), "notifier");
Ok(exit_signal) Ok(())
} }
/// Returns the peer count, returning something helpful if it's `usize::max_value` (effectively a /// Returns the peer count, returning something helpful if it's `usize::max_value` (effectively a

View File

@ -6,7 +6,6 @@ edition = "2018"
[dev-dependencies] [dev-dependencies]
eth1_test_rig = { path = "../../testing/eth1_test_rig" } eth1_test_rig = { path = "../../testing/eth1_test_rig" }
environment = { path = "../../lighthouse/environment" }
toml = "0.5.6" toml = "0.5.6"
web3 = "0.11.0" web3 = "0.11.0"
sloggers = "1.0.0" sloggers = "1.0.0"
@ -25,8 +24,9 @@ tree_hash = "0.1.0"
eth2_hashing = "0.1.0" eth2_hashing = "0.1.0"
parking_lot = "0.10.2" parking_lot = "0.10.2"
slog = "2.5.2" slog = "2.5.2"
tokio = { version = "0.2.20", features = ["full"] } tokio = { version = "0.2.21", features = ["full"] }
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
libflate = "1.0.0" libflate = "1.0.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics"} lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
lazy_static = "1.4.0" lazy_static = "1.4.0"
environment = { path = "../../lighthouse/environment" }

View File

@ -290,7 +290,7 @@ impl Service {
/// - Err(_) if there is an error. /// - Err(_) if there is an error.
/// ///
/// Emits logs for debugging and errors. /// Emits logs for debugging and errors.
pub fn auto_update(service: Self, exit: tokio::sync::oneshot::Receiver<()>) { pub fn auto_update(service: Self, handle: environment::TaskExecutor) {
let update_interval = Duration::from_millis(service.config().auto_update_interval_millis); let update_interval = Duration::from_millis(service.config().auto_update_interval_millis);
let mut interval = interval_at(Instant::now(), update_interval); let mut interval = interval_at(Instant::now(), update_interval);
@ -303,9 +303,7 @@ impl Service {
} }
}; };
let future = futures::future::select(Box::pin(update_future), exit); handle.spawn(update_future, "eth1");
tokio::task::spawn(future);
} }
async fn do_update(service: Self, update_interval: Duration) -> Result<(), ()> { async fn do_update(service: Self, update_interval: Duration) -> Result<(), ()> {

View File

@ -24,7 +24,7 @@ unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "l
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
smallvec = "1.4.0" smallvec = "1.4.0"
lru = "0.4.5" lru = "0.5.1"
parking_lot = "0.10.2" parking_lot = "0.10.2"
sha2 = "0.8.2" sha2 = "0.8.2"
base64 = "0.12.1" base64 = "0.12.1"
@ -35,6 +35,7 @@ tokio-util = { version = "0.3.1", features = ["codec", "compat"] }
# Patched for quick updates # Patched for quick updates
discv5 = { git = "https://github.com/sigp/discv5", rev = "7b3bd40591b62b8c002ffdb85de008aa9f82e2e5" } discv5 = { git = "https://github.com/sigp/discv5", rev = "7b3bd40591b62b8c002ffdb85de008aa9f82e2e5" }
tiny-keccak = "2.0.2" tiny-keccak = "2.0.2"
environment = { path = "../../lighthouse/environment" }
libp2p-tcp = { version = "0.19.1", default-features = false, features = ["tokio"] } libp2p-tcp = { version = "0.19.1", default-features = false, features = ["tokio"] }
[dependencies.libp2p] [dependencies.libp2p]
@ -49,3 +50,4 @@ slog-stdlog = "4.0.0"
slog-term = "2.5.0" slog-term = "2.5.0"
slog-async = "2.5.0" slog-async = "2.5.0"
tempdir = "0.3.7" tempdir = "0.3.7"
exit-future = "0.2.0"

View File

@ -25,8 +25,9 @@ use std::{
marker::PhantomData, marker::PhantomData,
sync::Arc, sync::Arc,
task::{Context, Poll}, task::{Context, Poll},
time::Instant,
}; };
use types::{EnrForkId, EthSpec, SubnetId}; use types::{EnrForkId, EthSpec, SignedBeaconBlock, SubnetId};
mod handler; mod handler;
@ -393,8 +394,36 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
/* Eth2 RPC behaviour functions */ /* Eth2 RPC behaviour functions */
/// Send a request to a peer over RPC.
pub fn send_request(&mut self, peer_id: PeerId, request_id: RequestId, request: Request) {
self.send_rpc(peer_id, RPCSend::Request(request_id, request.into()))
}
/// Send a successful response to a peer over RPC.
pub fn send_successful_response(
&mut self,
peer_id: PeerId,
stream_id: SubstreamId,
response: Response<TSpec>,
) {
self.send_rpc(peer_id, RPCSend::Response(stream_id, response.into()))
}
/// Inform the peer that their request produced an error.
pub fn _send_error_reponse(
&mut self,
peer_id: PeerId,
stream_id: SubstreamId,
error: RPCResponseErrorCode,
reason: String,
) {
self.send_rpc(
peer_id,
RPCSend::Response(stream_id, RPCCodedResponse::from_error_code(error, reason)),
)
}
/// Sends an RPC Request/Response via the RPC protocol. /// Sends an RPC Request/Response via the RPC protocol.
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent<TSpec>) { fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCSend<TSpec>) {
self.eth2_rpc.send_rpc(peer_id, rpc_event); self.eth2_rpc.send_rpc(peer_id, rpc_event);
} }
@ -431,9 +460,10 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
self.update_metadata(); self.update_metadata();
} }
/// A request to search for peers connected to a long-lived subnet. /// Attempts to discover new peers for a given subnet. The `min_ttl` gives the time at which we
pub fn peers_request(&mut self, subnet_id: SubnetId) { /// would like to retain the peers for.
self.discovery.peers_request(subnet_id); pub fn discover_subnet_peers(&mut self, subnet_id: SubnetId, min_ttl: Option<Instant>) {
self.discovery.discover_subnet_peers(subnet_id, min_ttl)
} }
/// Updates the local ENR's "eth2" field with the latest EnrForkId. /// Updates the local ENR's "eth2" field with the latest EnrForkId.
@ -476,32 +506,38 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
.expect("Local discovery must have bitfield"); .expect("Local discovery must have bitfield");
} }
/// Sends a PING/PONG request/response to a peer. /// Sends a Ping request to the peer.
fn send_ping(&mut self, id: RequestId, peer_id: PeerId, is_request: bool) { fn ping(&mut self, id: RequestId, peer_id: PeerId) {
let ping = crate::rpc::methods::Ping { let ping = crate::rpc::Ping {
data: self.meta_data.seq_number, data: self.meta_data.seq_number,
}; };
debug!(self.log, "Sending Ping"; "request_id" => id, "peer_id" => peer_id.to_string());
let event = RPCSend::Request(id, RPCRequest::Ping(ping));
let event = if is_request { self.send_rpc(peer_id, event);
debug!(self.log, "Sending Ping"; "request_id" => id, "peer_id" => peer_id.to_string()); }
RPCEvent::Request(id, RPCRequest::Ping(ping))
} else { /// Sends a Pong response to the peer.
debug!(self.log, "Sending Pong"; "request_id" => id, "peer_id" => peer_id.to_string()); fn pong(&mut self, id: SubstreamId, peer_id: PeerId) {
RPCEvent::Response(id, RPCCodedResponse::Success(RPCResponse::Pong(ping))) let ping = crate::rpc::Ping {
data: self.meta_data.seq_number,
}; };
debug!(self.log, "Sending Pong"; "request_id" => id, "peer_id" => peer_id.to_string());
let event = RPCSend::Response(id, RPCCodedResponse::Success(RPCResponse::Pong(ping)));
self.send_rpc(peer_id, event); self.send_rpc(peer_id, event);
} }
/// Sends a METADATA request to a peer. /// Sends a METADATA request to a peer.
fn send_meta_data_request(&mut self, peer_id: PeerId) { fn send_meta_data_request(&mut self, peer_id: PeerId) {
let metadata_request = let metadata_request =
RPCEvent::Request(RequestId::from(0usize), RPCRequest::MetaData(PhantomData)); RPCSend::Request(RequestId::Behaviour, RPCRequest::MetaData(PhantomData));
self.send_rpc(peer_id, metadata_request); self.send_rpc(peer_id, metadata_request);
} }
/// Sends a METADATA response to a peer. /// Sends a METADATA response to a peer.
fn send_meta_data_response(&mut self, id: RequestId, peer_id: PeerId) { fn send_meta_data_response(&mut self, id: SubstreamId, peer_id: PeerId) {
let metadata_response = RPCEvent::Response( let metadata_response = RPCSend::Response(
id, id,
RPCCodedResponse::Success(RPCResponse::MetaData(self.meta_data.clone())), RPCCodedResponse::Success(RPCResponse::MetaData(self.meta_data.clone())),
); );
@ -587,45 +623,112 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
} }
} }
/// Queues the response to be sent upwards as long at it was requested outside the Behaviour.
fn propagate_response(&mut self, id: RequestId, peer_id: PeerId, response: Response<TSpec>) {
if !matches!(id, RequestId::Behaviour) {
self.events.push(BehaviourEvent::ResponseReceived {
peer_id,
id,
response,
});
}
}
/// Convenience function to propagate a request.
fn propagate_request(&mut self, id: SubstreamId, peer_id: PeerId, request: Request) {
self.events.push(BehaviourEvent::RequestReceived {
peer_id,
id,
request,
});
}
fn on_rpc_event(&mut self, message: RPCMessage<TSpec>) { fn on_rpc_event(&mut self, message: RPCMessage<TSpec>) {
let peer_id = message.peer_id; let peer_id = message.peer_id;
// The METADATA and PING RPC responses are handled within the behaviour and not // The METADATA and PING RPC responses are handled within the behaviour and not propagated
// propagated
// TODO: Improve the RPC types to better handle this logic discrepancy
match message.event { match message.event {
RPCEvent::Request(id, RPCRequest::Ping(ping)) => { Err(handler_err) => {
// inform the peer manager and send the response match handler_err {
self.peer_manager.ping_request(&peer_id, ping.data); HandlerErr::Inbound {
// send a ping response id: _,
self.send_ping(id, peer_id, false); proto,
error,
} => {
// Inform the peer manager of the error.
// An inbound error here means we sent an error to the peer, or the stream
// timed out.
self.peer_manager.handle_rpc_error(&peer_id, proto, &error);
}
HandlerErr::Outbound { id, proto, error } => {
// Inform the peer manager that a request we sent to the peer failed
self.peer_manager.handle_rpc_error(&peer_id, proto, &error);
// inform failures of requests comming outside the behaviour
if !matches!(id, RequestId::Behaviour) {
self.events
.push(BehaviourEvent::RPCFailed { peer_id, id, error });
}
}
}
} }
RPCEvent::Request(id, RPCRequest::MetaData(_)) => { Ok(RPCReceived::Request(id, request)) => match request {
// send the requested meta-data /* Behaviour managed protocols: Ping and Metadata */
self.send_meta_data_response(id, peer_id); RPCRequest::Ping(ping) => {
// inform the peer manager and send the response
self.peer_manager.ping_request(&peer_id, ping.data);
// send a ping response
self.pong(id, peer_id);
}
RPCRequest::MetaData(_) => {
// send the requested meta-data
self.send_meta_data_response(id, peer_id);
// TODO: inform the peer manager?
}
/* Protocols propagated to the Network */
RPCRequest::Status(msg) => {
// inform the peer manager that we have received a status from a peer
self.peer_manager.peer_statusd(&peer_id);
// propagate the STATUS message upwards
self.propagate_request(id, peer_id, Request::Status(msg))
}
RPCRequest::BlocksByRange(req) => {
self.propagate_request(id, peer_id, Request::BlocksByRange(req))
}
RPCRequest::BlocksByRoot(req) => {
self.propagate_request(id, peer_id, Request::BlocksByRoot(req))
}
RPCRequest::Goodbye(reason) => {
// TODO: do not propagate
self.propagate_request(id, peer_id, Request::Goodbye(reason));
}
},
Ok(RPCReceived::Response(id, resp)) => {
match resp {
/* Behaviour managed protocols */
RPCResponse::Pong(ping) => self.peer_manager.pong_response(&peer_id, ping.data),
RPCResponse::MetaData(meta_data) => {
self.peer_manager.meta_data_response(&peer_id, meta_data)
}
/* Network propagated protocols */
RPCResponse::Status(msg) => {
// inform the peer manager that we have received a status from a peer
self.peer_manager.peer_statusd(&peer_id);
// propagate the STATUS message upwards
self.propagate_response(id, peer_id, Response::Status(msg));
}
RPCResponse::BlocksByRange(resp) => {
self.propagate_response(id, peer_id, Response::BlocksByRange(Some(resp)))
}
RPCResponse::BlocksByRoot(resp) => {
self.propagate_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
}
}
} }
RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Pong(ping))) => { Ok(RPCReceived::EndOfStream(id, termination)) => {
self.peer_manager.pong_response(&peer_id, ping.data); let response = match termination {
} ResponseTermination::BlocksByRange => Response::BlocksByRange(None),
RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::MetaData(meta_data))) => { ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None),
self.peer_manager.meta_data_response(&peer_id, meta_data); };
} self.propagate_response(id, peer_id, response);
RPCEvent::Request(_, RPCRequest::Status(_))
| RPCEvent::Response(_, RPCCodedResponse::Success(RPCResponse::Status(_))) => {
// inform the peer manager that we have received a status from a peer
self.peer_manager.peer_statusd(&peer_id);
// propagate the STATUS message upwards
self.events
.push(BehaviourEvent::RPC(peer_id, message.event));
}
RPCEvent::Error(_, protocol, ref err) => {
self.peer_manager.handle_rpc_error(&peer_id, protocol, err);
self.events
.push(BehaviourEvent::RPC(peer_id, message.event));
}
_ => {
// propagate all other RPC messages upwards
self.events
.push(BehaviourEvent::RPC(peer_id, message.event))
} }
} }
} }
@ -648,7 +751,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
} }
PeerManagerEvent::Ping(peer_id) => { PeerManagerEvent::Ping(peer_id) => {
// send a ping request to this peer // send a ping request to this peer
self.send_ping(RequestId::from(0usize), peer_id, true); self.ping(RequestId::Behaviour, peer_id);
} }
PeerManagerEvent::MetaData(peer_id) => { PeerManagerEvent::MetaData(peer_id) => {
self.send_meta_data_request(peer_id); self.send_meta_data_request(peer_id);
@ -707,11 +810,96 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
} }
} }
/* Public API types */
/// The type of RPC requests the Behaviour informs it has received and allows for sending.
///
// NOTE: This is an application-level wrapper over the lower network leve requests that can be
// sent. The main difference is the absense of the Ping and Metadata protocols, which don't
// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`.
#[derive(Debug, Clone, PartialEq)]
pub enum Request {
/// A Status message.
Status(StatusMessage),
/// A Goobye message.
Goodbye(GoodbyeReason),
/// A blocks by range request.
BlocksByRange(BlocksByRangeRequest),
/// A request blocks root request.
BlocksByRoot(BlocksByRootRequest),
}
impl<TSpec: EthSpec> std::convert::From<Request> for RPCRequest<TSpec> {
fn from(req: Request) -> RPCRequest<TSpec> {
match req {
Request::BlocksByRoot(r) => RPCRequest::BlocksByRoot(r),
Request::BlocksByRange(r) => RPCRequest::BlocksByRange(r),
Request::Goodbye(r) => RPCRequest::Goodbye(r),
Request::Status(s) => RPCRequest::Status(s),
}
}
}
/// The type of RPC responses the Behaviour informs it has received, and allows for sending.
///
// NOTE: This is an application-level wrapper over the lower network level responses that can be
// sent. The main difference is the absense of Pong and Metadata, which don't leave the
// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and
// `RPCCodedResponse`.
#[derive(Debug, Clone, PartialEq)]
pub enum Response<TSpec: EthSpec> {
/// A Status message.
Status(StatusMessage),
/// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch.
BlocksByRange(Option<Box<SignedBeaconBlock<TSpec>>>),
/// A response to a get BLOCKS_BY_ROOT request.
BlocksByRoot(Option<Box<SignedBeaconBlock<TSpec>>>),
}
impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TSpec> {
fn from(resp: Response<TSpec>) -> RPCCodedResponse<TSpec> {
match resp {
Response::BlocksByRoot(r) => match r {
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot),
},
Response::BlocksByRange(r) => match r {
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
},
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
}
}
}
/// The types of events than can be obtained from polling the behaviour. /// The types of events than can be obtained from polling the behaviour.
#[derive(Debug)] #[derive(Debug)]
pub enum BehaviourEvent<TSpec: EthSpec> { pub enum BehaviourEvent<TSpec: EthSpec> {
/// A received RPC event and the peer that it was received from. /// An RPC Request that was sent failed.
RPC(PeerId, RPCEvent<TSpec>), RPCFailed {
/// The id of the failed request.
id: RequestId,
/// The peer to which this request was sent.
peer_id: PeerId,
/// The error that occurred.
error: RPCError,
},
RequestReceived {
/// The peer that sent the request.
peer_id: PeerId,
/// Identifier of the request. All responses to this request must use this id.
id: SubstreamId,
/// Request the peer sent.
request: Request,
},
ResponseReceived {
/// Peer that sent the response.
peer_id: PeerId,
/// Id of the request to which the peer is responding.
id: RequestId,
/// Response the peer sent.
response: Response<TSpec>,
},
PubsubMessage { PubsubMessage {
/// The gossipsub message id. Used when propagating blocks after validation. /// The gossipsub message id. Used when propagating blocks after validation.
id: MessageId, id: MessageId,

View File

@ -8,7 +8,7 @@ pub use enr_ext::{CombinedKeyExt, EnrExt};
use crate::metrics; use crate::metrics;
use crate::{error, Enr, NetworkConfig, NetworkGlobals}; use crate::{error, Enr, NetworkConfig, NetworkGlobals};
use discv5::{enr::NodeId, Discv5, Discv5Event}; use discv5::{enr::NodeId, Discv5, Discv5Event, QueryId};
use enr::{Eth2Enr, BITFIELD_ENR_KEY, ETH2_ENR_KEY}; use enr::{Eth2Enr, BITFIELD_ENR_KEY, ETH2_ENR_KEY};
use futures::prelude::*; use futures::prelude::*;
use libp2p::core::{connection::ConnectionId, Multiaddr, PeerId}; use libp2p::core::{connection::ConnectionId, Multiaddr, PeerId};
@ -18,20 +18,24 @@ use libp2p::swarm::{
NetworkBehaviourAction, PollParameters, ProtocolsHandler, NetworkBehaviourAction, PollParameters, ProtocolsHandler,
}; };
use lru::LruCache; use lru::LruCache;
use slog::{crit, debug, info, warn}; use slog::{crit, debug, info, trace, warn};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_types::BitVector; use ssz_types::BitVector;
use std::{ use std::{
collections::{HashSet, VecDeque}, collections::{HashMap, HashSet, VecDeque},
net::SocketAddr, net::SocketAddr,
path::Path, path::Path,
sync::Arc, sync::Arc,
task::{Context, Poll}, task::{Context, Poll},
time::Duration, time::{Duration, Instant},
}; };
use tokio::time::{delay_until, Delay, Instant}; use tokio::time::{delay_until, Delay};
use types::{EnrForkId, EthSpec, SubnetId}; use types::{EnrForkId, EthSpec, SubnetId};
mod subnet_predicate;
use subnet_predicate::subnet_predicate;
/// Maximum seconds before searching for extra peers. /// Maximum seconds before searching for extra peers.
const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 120; const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 120;
/// Initial delay between peer searches. /// Initial delay between peer searches.
@ -41,7 +45,18 @@ const MINIMUM_PEERS_BEFORE_DELAY_INCREASE: usize = 5;
/// Local ENR storage filename. /// Local ENR storage filename.
pub const ENR_FILENAME: &str = "enr.dat"; pub const ENR_FILENAME: &str = "enr.dat";
/// Number of peers we'd like to have connected to a given long-lived subnet. /// Number of peers we'd like to have connected to a given long-lived subnet.
const TARGET_SUBNET_PEERS: u64 = 3; const TARGET_SUBNET_PEERS: usize = 3;
/// Number of times to attempt a discovery request
const MAX_DISCOVERY_RETRY: u64 = 3;
/// A struct representing the information associated with a single discovery request,
/// which can be retried with multiple queries
#[derive(Clone, Debug)]
pub struct Request {
pub query_id: Option<QueryId>,
pub min_ttl: Option<Instant>,
pub retries: u64,
}
/// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5 /// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5
/// libp2p protocol. /// libp2p protocol.
@ -79,6 +94,9 @@ pub struct Discovery<TSpec: EthSpec> {
/// A collection of network constants that can be read from other threads. /// A collection of network constants that can be read from other threads.
network_globals: Arc<NetworkGlobals<TSpec>>, network_globals: Arc<NetworkGlobals<TSpec>>,
/// A mapping of SubnetId that we are currently searching for to all information associated with each request.
subnet_queries: HashMap<SubnetId, Request>,
/// Logger for the discovery behaviour. /// Logger for the discovery behaviour.
log: slog::Logger, log: slog::Logger,
} }
@ -139,11 +157,12 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
cached_enrs: LruCache::new(50), cached_enrs: LruCache::new(50),
banned_peers: HashSet::new(), banned_peers: HashSet::new(),
max_peers: config.max_peers, max_peers: config.max_peers,
peer_discovery_delay: delay_until(Instant::now()), peer_discovery_delay: delay_until(tokio::time::Instant::now()),
past_discovery_delay: INITIAL_SEARCH_DELAY, past_discovery_delay: INITIAL_SEARCH_DELAY,
tcp_port: config.libp2p_port, tcp_port: config.libp2p_port,
discovery, discovery,
network_globals, network_globals,
subnet_queries: HashMap::new(),
log, log,
enr_dir, enr_dir,
}) })
@ -280,57 +299,93 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
} }
/// A request to find peers on a given subnet. /// A request to find peers on a given subnet.
// TODO: This logic should be improved with added sophistication in peer management pub fn discover_subnet_peers(&mut self, subnet_id: SubnetId, min_ttl: Option<Instant>) {
// This currently checks for currently connected peers and if we don't have // TODO: Extend this to an event once discovery becomes a thread managed by the peer
// PEERS_WANTED_BEFORE_DISCOVERY connected to a given subnet we search for more. // manager
pub fn peers_request(&mut self, subnet_id: SubnetId) { if let Some(min_ttl) = min_ttl {
self.network_globals
.peers
.write()
.extend_peers_on_subnet(subnet_id, min_ttl);
}
// If there is already a discovery request in process for this subnet, ignore this request,
// but update the min_ttl.
if let Some(request) = self.subnet_queries.get_mut(&subnet_id) {
// update the min_ttl if required
if let Some(min_ttl) = min_ttl {
if request.min_ttl < Some(min_ttl) {
request.min_ttl = Some(min_ttl);
}
}
return;
}
// Insert a request and start a query for the subnet
self.subnet_queries.insert(
subnet_id.clone(),
Request {
query_id: None,
min_ttl,
retries: 0,
},
);
self.run_subnet_query(subnet_id);
}
/// Runs a discovery request for a given subnet_id if one already exists.
fn run_subnet_query(&mut self, subnet_id: SubnetId) {
let mut request = match self.subnet_queries.remove(&subnet_id) {
Some(v) => v,
None => return, // request doesn't exist
};
// increment the retry count
request.retries += 1;
let peers_on_subnet = self let peers_on_subnet = self
.network_globals .network_globals
.peers .peers
.read() .read()
.peers_on_subnet(subnet_id) .peers_on_subnet(subnet_id)
.count() as u64; .count();
if peers_on_subnet < TARGET_SUBNET_PEERS { if peers_on_subnet > TARGET_SUBNET_PEERS {
let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet; trace!(self.log, "Discovery ignored";
debug!(self.log, "Searching for peers for subnet";
"subnet_id" => *subnet_id,
"connected_peers_on_subnet" => peers_on_subnet,
"target_subnet_peers" => TARGET_SUBNET_PEERS,
"peers_to_find" => target_peers
);
let log_clone = self.log.clone();
let subnet_predicate = move |enr: &Enr| {
if let Some(bitfield_bytes) = enr.get(BITFIELD_ENR_KEY) {
let bitfield = match BitVector::<TSpec::SubnetBitfieldLength>::from_ssz_bytes(
bitfield_bytes,
) {
Ok(v) => v,
Err(e) => {
warn!(log_clone, "Could not decode ENR bitfield for peer"; "peer_id" => format!("{}", enr.peer_id()), "error" => format!("{:?}", e));
return false;
}
};
return bitfield.get(*subnet_id as usize).unwrap_or_else(|_| {
debug!(log_clone, "Peer found but not on desired subnet"; "peer_id" => format!("{}", enr.peer_id()));
false
});
}
false
};
// start the query
self.start_query(subnet_predicate, target_peers as usize);
} else {
debug!(self.log, "Discovery ignored";
"reason" => "Already connected to desired peers", "reason" => "Already connected to desired peers",
"connected_peers_on_subnet" => peers_on_subnet, "connected_peers_on_subnet" => peers_on_subnet,
"target_subnet_peers" => TARGET_SUBNET_PEERS, "target_subnet_peers" => TARGET_SUBNET_PEERS,
); );
return;
} }
// remove the entry and complete the query if greater than the maximum search count
if request.retries >= MAX_DISCOVERY_RETRY {
debug!(
self.log,
"Subnet peer discovery did not find sufficient peers. Reached max retry limit"
);
return;
}
let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet;
debug!(self.log, "Searching for peers for subnet";
"subnet_id" => *subnet_id,
"connected_peers_on_subnet" => peers_on_subnet,
"target_subnet_peers" => TARGET_SUBNET_PEERS,
"peers_to_find" => target_peers,
"attempt" => request.retries,
);
// start the query, and update the queries map if necessary
let subnet_predicate = subnet_predicate::<TSpec>(subnet_id, &self.log);
if let Some(query_id) = self.start_query(subnet_predicate, target_peers) {
request.query_id = Some(query_id);
} else {
// ENR is not present remove the query
return;
}
self.subnet_queries.insert(subnet_id, request);
} }
/* Internal Functions */ /* Internal Functions */
@ -348,7 +403,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
/// This can optionally search for peers for a given predicate. Regardless of the predicate /// This can optionally search for peers for a given predicate. Regardless of the predicate
/// given, this will only search for peers on the same enr_fork_id as specified in the local /// given, this will only search for peers on the same enr_fork_id as specified in the local
/// ENR. /// ENR.
fn start_query<F>(&mut self, enr_predicate: F, num_nodes: usize) fn start_query<F>(&mut self, enr_predicate: F, num_nodes: usize) -> Option<QueryId>
where where
F: Fn(&Enr) -> bool + Send + 'static + Clone, F: Fn(&Enr) -> bool + Send + 'static + Clone,
{ {
@ -359,18 +414,54 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
crit!(self.log, "Local ENR has no fork id"; "error" => e); crit!(self.log, "Local ENR has no fork id"; "error" => e);
return; return None;
} }
}; };
// predicate for finding nodes with a matching fork // predicate for finding nodes with a matching fork
let eth2_fork_predicate = move |enr: &Enr| { let eth2_fork_predicate = move |enr: &Enr| enr.eth2() == Ok(enr_fork_id.clone());
enr.eth2().map(|enr| enr.fork_digest) == Ok(enr_fork_id.fork_digest.clone())
};
let predicate = move |enr: &Enr| eth2_fork_predicate(enr) && enr_predicate(enr); let predicate = move |enr: &Enr| eth2_fork_predicate(enr) && enr_predicate(enr);
// general predicate // general predicate
self.discovery Some(
.find_enr_predicate(random_node, predicate, num_nodes); self.discovery
.find_enr_predicate(random_node, predicate, num_nodes),
)
}
/// Peers that are found during discovery are optionally dialed.
// TODO: Shift to peer manager. As its own service, discovery should spit out discovered nodes
// and the peer manager should decide about who to connect to.
fn dial_discovered_peers(&mut self, peers: Vec<Enr>, min_ttl: Option<Instant>) {
for enr in peers {
// cache known peers
let peer_id = enr.peer_id();
self.cached_enrs.put(enr.peer_id(), enr);
// if we need more peers, attempt a connection
if self.network_globals.connected_or_dialing_peers() < self.max_peers
&& !self
.network_globals
.peers
.read()
.is_connected_or_dialing(&peer_id)
&& !self.banned_peers.contains(&peer_id)
{
debug!(self.log, "Connecting to discovered peer"; "peer_id"=> peer_id.to_string());
// TODO: Update output
// This should be updated with the peer dialing. In fact created once the peer is
// dialed
if let Some(min_ttl) = min_ttl {
self.network_globals
.peers
.write()
.update_min_ttl(&peer_id, min_ttl);
}
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id,
condition: DialPeerCondition::Disconnected,
});
}
}
} }
} }
@ -440,7 +531,8 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
} }
// Set to maximum, and update to earlier, once we get our results back. // Set to maximum, and update to earlier, once we get our results back.
self.peer_discovery_delay.reset( self.peer_discovery_delay.reset(
Instant::now() + Duration::from_secs(MAX_TIME_BETWEEN_PEER_SEARCHES), tokio::time::Instant::now()
+ Duration::from_secs(MAX_TIME_BETWEEN_PEER_SEARCHES),
); );
} }
Poll::Pending => break, Poll::Pending => break,
@ -477,7 +569,11 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
address, address,
}); });
} }
Discv5Event::FindNodeResult { closer_peers, .. } => { Discv5Event::FindNodeResult {
closer_peers,
query_id,
..
} => {
debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len()); debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len());
// update the time to the next query // update the time to the next query
if self.past_discovery_delay < MAX_TIME_BETWEEN_PEER_SEARCHES if self.past_discovery_delay < MAX_TIME_BETWEEN_PEER_SEARCHES
@ -486,40 +582,30 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
{ {
self.past_discovery_delay *= 2; self.past_discovery_delay *= 2;
} }
let delay = std::cmp::min( let delay = std::cmp::max(
self.past_discovery_delay, self.past_discovery_delay,
MAX_TIME_BETWEEN_PEER_SEARCHES, MAX_TIME_BETWEEN_PEER_SEARCHES,
); );
self.peer_discovery_delay self.peer_discovery_delay
.reset(Instant::now() + Duration::from_secs(delay)); .reset(tokio::time::Instant::now() + Duration::from_secs(delay));
for enr in closer_peers { // if this is a subnet query, run it to completion
// cache known peers if let Some((subnet_id, min_ttl)) = self
let peer_id = enr.peer_id(); .subnet_queries
self.cached_enrs.put(enr.peer_id(), enr); .iter()
.find(|(_, request)| request.query_id == Some(query_id))
// if we need more peers, attempt a connection .map(|(subnet_id, request)| {
if self.network_globals.connected_or_dialing_peers() (subnet_id.clone(), request.min_ttl.clone())
< self.max_peers })
&& !self {
.network_globals debug!(self.log, "Peer subnet discovery request completed"; "peers_found" => closer_peers.len(), "subnet_id" => *subnet_id);
.peers self.dial_discovered_peers(closer_peers, min_ttl);
.read() self.run_subnet_query(subnet_id);
.is_connected_or_dialing(&peer_id) } else {
&& !self.banned_peers.contains(&peer_id) if closer_peers.is_empty() {
{ debug!(self.log, "Peer Discovery request yielded no results.");
// TODO: Debugging only } else {
// NOTE: The peer manager will get updated by the global swarm. self.dial_discovered_peers(closer_peers, None);
let connection_status = self
.network_globals
.peers
.read()
.connection_status(&peer_id);
debug!(self.log, "Connecting to discovered peer"; "peer_id"=> peer_id.to_string(), "status" => format!("{:?}", connection_status));
self.events.push_back(NetworkBehaviourAction::DialPeer {
peer_id,
condition: DialPeerCondition::Disconnected,
});
} }
} }
} }

View File

@ -0,0 +1,33 @@
///! The subnet predicate used for searching for a particular subnet.
use super::*;
/// Returns the predicate for a given subnet.
pub fn subnet_predicate<TSpec>(
subnet_id: SubnetId,
log: &slog::Logger,
) -> impl Fn(&Enr) -> bool + Send + 'static + Clone
where
TSpec: EthSpec,
{
let log_clone = log.clone();
move |enr: &Enr| {
if let Some(bitfield_bytes) = enr.get(BITFIELD_ENR_KEY) {
let bitfield = match BitVector::<TSpec::SubnetBitfieldLength>::from_ssz_bytes(
bitfield_bytes,
) {
Ok(v) => v,
Err(e) => {
warn!(log_clone, "Could not decode ENR bitfield for peer"; "peer_id" => format!("{}", enr.peer_id()), "error" => format!("{:?}", e));
return false;
}
};
return bitfield.get(*subnet_id as usize).unwrap_or_else(|_| {
debug!(log_clone, "Peer found but not on desired subnet"; "peer_id" => format!("{}", enr.peer_id()));
false
});
}
false
}
}

View File

@ -15,12 +15,11 @@ mod service;
pub mod types; pub mod types;
pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage}; pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage};
pub use behaviour::BehaviourEvent; pub use behaviour::{BehaviourEvent, Request, Response};
pub use config::Config as NetworkConfig; pub use config::Config as NetworkConfig;
pub use discovery::enr_ext::{CombinedKeyExt, EnrExt}; pub use discovery::enr_ext::{CombinedKeyExt, EnrExt};
pub use libp2p::gossipsub::{MessageId, Topic, TopicHash}; pub use libp2p::gossipsub::{MessageId, Topic, TopicHash};
pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm};
pub use libp2p::{multiaddr, Multiaddr}; pub use libp2p::{multiaddr, Multiaddr};
pub use peer_manager::{client::Client, PeerDB, PeerInfo, PeerSyncStatus, SyncInfo}; pub use peer_manager::{client::Client, PeerDB, PeerInfo, PeerSyncStatus, SyncInfo};
pub use rpc::RPCEvent;
pub use service::{Libp2pEvent, Service, NETWORK_KEY_FILENAME}; pub use service::{Libp2pEvent, Service, NETWORK_KEY_FILENAME};

View File

@ -279,7 +279,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
// this could their fault or ours, so we tolerate this // this could their fault or ours, so we tolerate this
PeerAction::HighToleranceError PeerAction::HighToleranceError
} }
RPCError::ErrorResponse(code) => match code { RPCError::ErrorResponse(code, _) => match code {
RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError, RPCResponseErrorCode::Unknown => PeerAction::HighToleranceError,
RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError, RPCResponseErrorCode::ServerError => PeerAction::MidToleranceError,
RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError, RPCResponseErrorCode::InvalidRequest => PeerAction::LowToleranceError,

View File

@ -31,6 +31,10 @@ pub struct PeerInfo<T: EthSpec> {
/// The ENR subnet bitfield of the peer. This may be determined after it's initial /// The ENR subnet bitfield of the peer. This may be determined after it's initial
/// connection. /// connection.
pub meta_data: Option<MetaData<T>>, pub meta_data: Option<MetaData<T>>,
/// The time we would like to retain this peer. After this time, the peer is no longer
/// necessary.
#[serde(skip)]
pub min_ttl: Option<Instant>,
} }
impl<TSpec: EthSpec> Default for PeerInfo<TSpec> { impl<TSpec: EthSpec> Default for PeerInfo<TSpec> {
@ -43,6 +47,7 @@ impl<TSpec: EthSpec> Default for PeerInfo<TSpec> {
listening_addresses: vec![], listening_addresses: vec![],
sync_status: PeerSyncStatus::Unknown, sync_status: PeerSyncStatus::Unknown,
meta_data: None, meta_data: None,
min_ttl: None,
} }
} }
} }

View File

@ -2,7 +2,7 @@ use super::peer_info::{PeerConnectionStatus, PeerInfo};
use super::peer_sync_status::PeerSyncStatus; use super::peer_sync_status::PeerSyncStatus;
use crate::rpc::methods::MetaData; use crate::rpc::methods::MetaData;
use crate::PeerId; use crate::PeerId;
use slog::{crit, debug, warn}; use slog::{crit, debug, trace, warn};
use std::collections::{hash_map::Entry, HashMap}; use std::collections::{hash_map::Entry, HashMap};
use std::time::Instant; use std::time::Instant;
use types::{EthSpec, SubnetId}; use types::{EthSpec, SubnetId};
@ -233,7 +233,42 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
info.connection_status = PeerConnectionStatus::Dialing { info.connection_status = PeerConnectionStatus::Dialing {
since: Instant::now(), since: Instant::now(),
}; };
debug!(self.log, "Peer dialing in db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc); }
/// Update min ttl of a peer.
pub fn update_min_ttl(&mut self, peer_id: &PeerId, min_ttl: Instant) {
let info = self.peers.entry(peer_id.clone()).or_default();
// only update if the ttl is longer
if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl {
info.min_ttl = Some(min_ttl);
let min_ttl_secs = min_ttl
.checked_duration_since(Instant::now())
.map(|duration| duration.as_secs())
.unwrap_or_else(|| 0);
debug!(self.log, "Updating the time a peer is required for"; "peer_id" => peer_id.to_string(), "future_min_ttl_secs" => min_ttl_secs);
}
}
/// Extends the ttl of all peers on the given subnet that have a shorter
/// min_ttl than what's given.
pub fn extend_peers_on_subnet(&mut self, subnet_id: SubnetId, min_ttl: Instant) {
let log = &self.log;
self.peers.iter_mut()
.filter(move |(_, info)| {
info.connection_status.is_connected() && info.on_subnet(subnet_id)
})
.for_each(|(peer_id,info)| {
if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl {
info.min_ttl = Some(min_ttl);
}
let min_ttl_secs = min_ttl
.checked_duration_since(Instant::now())
.map(|duration| duration.as_secs())
.unwrap_or_else(|| 0);
trace!(log, "Updating minimum duration a peer is required for"; "peer_id" => peer_id.to_string(), "min_ttl" => min_ttl_secs);
});
} }
/// Sets a peer as connected with an ingoing connection. /// Sets a peer as connected with an ingoing connection.
@ -244,7 +279,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
self.n_dc = self.n_dc.saturating_sub(1); self.n_dc = self.n_dc.saturating_sub(1);
} }
info.connection_status.connect_ingoing(); info.connection_status.connect_ingoing();
debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
} }
/// Sets a peer as connected with an outgoing connection. /// Sets a peer as connected with an outgoing connection.
@ -255,7 +289,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
self.n_dc = self.n_dc.saturating_sub(1); self.n_dc = self.n_dc.saturating_sub(1);
} }
info.connection_status.connect_outgoing(); info.connection_status.connect_outgoing();
debug!(self.log, "Peer connected to db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
} }
/// Sets the peer as disconnected. A banned peer remains banned /// Sets the peer as disconnected. A banned peer remains banned
@ -270,7 +303,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
info.connection_status.disconnect(); info.connection_status.disconnect();
self.n_dc += 1; self.n_dc += 1;
} }
debug!(self.log, "Peer disconnected from db"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
self.shrink_to_fit(); self.shrink_to_fit();
} }
@ -302,7 +334,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
if info.connection_status.is_disconnected() { if info.connection_status.is_disconnected() {
self.n_dc = self.n_dc.saturating_sub(1); self.n_dc = self.n_dc.saturating_sub(1);
} }
debug!(self.log, "Peer banned"; "peer_id" => peer_id.to_string(), "n_dc" => self.n_dc);
info.connection_status.ban(); info.connection_status.ban();
} }
@ -375,7 +406,7 @@ mod tests {
} }
fn get_db() -> PeerDB<M> { fn get_db() -> PeerDB<M> {
let log = build_log(slog::Level::Debug, true); let log = build_log(slog::Level::Debug, false);
PeerDB::new(&log) PeerDB::new(&log)
} }

View File

@ -3,7 +3,7 @@
use super::methods::{RPCCodedResponse, RequestId, ResponseTermination}; use super::methods::{RPCCodedResponse, RequestId, ResponseTermination};
use super::protocol::{Protocol, RPCError, RPCProtocol, RPCRequest}; use super::protocol::{Protocol, RPCError, RPCProtocol, RPCRequest};
use super::RPCEvent; use super::{RPCReceived, RPCSend};
use crate::rpc::protocol::{InboundFramed, OutboundFramed}; use crate::rpc::protocol::{InboundFramed, OutboundFramed};
use fnv::FnvHashMap; use fnv::FnvHashMap;
use futures::prelude::*; use futures::prelude::*;
@ -33,12 +33,34 @@ pub const RESPONSE_TIMEOUT: u64 = 10;
/// The number of times to retry an outbound upgrade in the case of IO errors. /// The number of times to retry an outbound upgrade in the case of IO errors.
const IO_ERROR_RETRIES: u8 = 3; const IO_ERROR_RETRIES: u8 = 3;
/// Inbound requests are given a sequential `RequestId` to keep track of. All inbound streams are /// Identifier of inbound and outbound substreams from the handler's perspective.
/// identified by their substream ID which is identical to the RPC Id. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
type InboundRequestId = RequestId; pub struct SubstreamId(usize);
/// Outbound requests are associated with an id that is given by the application that sent the
/// request. /// An error encoutered by the handler.
type OutboundRequestId = RequestId; pub enum HandlerErr {
/// An error ocurred for this peer's request. This can occurr during protocol negotiation,
/// message passing, or if the handler identifies that we are sending an error reponse to the peer.
Inbound {
/// Id of the peer's request for which an error occurred.
id: SubstreamId,
/// Information of the negotiated protocol.
proto: Protocol,
/// The error that ocurred.
error: RPCError,
},
/// An error ocurred for this request. Such error can occurr during protocol negotiation,
/// message passing, or if we successfully received a response from the peer, but this response
/// indicates an error.
Outbound {
/// Application-given Id of the request for which an error occurred.
id: RequestId,
/// Information of the protocol.
proto: Protocol,
/// The error that ocurred.
error: RPCError,
},
}
/// Implementation of `ProtocolsHandler` for the RPC protocol. /// Implementation of `ProtocolsHandler` for the RPC protocol.
pub struct RPCHandler<TSpec> pub struct RPCHandler<TSpec>
@ -48,11 +70,11 @@ where
/// The upgrade for inbound substreams. /// The upgrade for inbound substreams.
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>>, listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>>,
/// If something bad happened and we should shut down the handler with an error. /// Errors ocurring on outbound and inbound connections queued for reporting back.
pending_error: Vec<(RequestId, Protocol, RPCError)>, pending_errors: Vec<HandlerErr>,
/// Queue of events to produce in `poll()`. /// Queue of events to produce in `poll()`.
events_out: SmallVec<[RPCEvent<TSpec>; 4]>, events_out: SmallVec<[RPCReceived<TSpec>; 4]>,
/// Queue of outbound substreams to open. /// Queue of outbound substreams to open.
dial_queue: SmallVec<[(RequestId, RPCRequest<TSpec>); 4]>, dial_queue: SmallVec<[(RequestId, RPCRequest<TSpec>); 4]>,
@ -62,7 +84,7 @@ where
/// Current inbound substreams awaiting processing. /// Current inbound substreams awaiting processing.
inbound_substreams: FnvHashMap< inbound_substreams: FnvHashMap<
InboundRequestId, SubstreamId,
( (
InboundSubstreamState<TSpec>, InboundSubstreamState<TSpec>,
Option<delay_queue::Key>, Option<delay_queue::Key>,
@ -71,29 +93,22 @@ where
>, >,
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout. /// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
inbound_substreams_delay: DelayQueue<InboundRequestId>, inbound_substreams_delay: DelayQueue<SubstreamId>,
/// Map of outbound substreams that need to be driven to completion. The `RequestId` is /// Map of outbound substreams that need to be driven to completion.
/// maintained by the application sending the request. outbound_substreams: FnvHashMap<SubstreamId, OutboundInfo<TSpec>>,
/// For Responses with multiple expected response chunks a counter is added to be able to terminate the stream when the expected number has been received
outbound_substreams: FnvHashMap<
OutboundRequestId,
(
OutboundSubstreamState<TSpec>,
delay_queue::Key,
Protocol,
Option<u64>,
),
>,
/// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout. /// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout.
outbound_substreams_delay: DelayQueue<OutboundRequestId>, outbound_substreams_delay: DelayQueue<SubstreamId>,
/// Map of outbound items that are queued as the stream processes them. /// Map of outbound items that are queued as the stream processes them.
queued_outbound_items: FnvHashMap<RequestId, Vec<RPCCodedResponse<TSpec>>>, queued_outbound_items: FnvHashMap<SubstreamId, Vec<RPCCodedResponse<TSpec>>>,
/// Sequential ID for waiting substreams. For inbound substreams, this is also the inbound request ID. /// Sequential ID for waiting substreams. For inbound substreams, this is also the inbound request ID.
current_inbound_substream_id: RequestId, current_inbound_substream_id: SubstreamId,
/// Sequential ID for outbound substreams.
current_outbound_substream_id: SubstreamId,
/// Maximum number of concurrent outbound substreams being opened. Value is never modified. /// Maximum number of concurrent outbound substreams being opened. Value is never modified.
max_dial_negotiated: u32, max_dial_negotiated: u32,
@ -112,6 +127,23 @@ where
log: slog::Logger, log: slog::Logger,
} }
/// Contains the information the handler keeps on established outbound substreams.
struct OutboundInfo<TSpec: EthSpec> {
/// State of the substream.
state: OutboundSubstreamState<TSpec>,
/// Key to keep track of the substream's timeout via `self.outbound_substreams_delay`.
delay_key: delay_queue::Key,
/// Info over the protocol this substream is handling.
proto: Protocol,
/// Number of chunks to be seen from the peer's response.
// TODO: removing the option could allow clossing the streams after the number of
// expected responses is met for all protocols.
// TODO: the type of this is wrong
remaining_chunks: Option<usize>,
/// RequestId as given by the application that sent the request.
req_id: RequestId,
}
pub enum InboundSubstreamState<TSpec> pub enum InboundSubstreamState<TSpec>
where where
TSpec: EthSpec, TSpec: EthSpec,
@ -208,7 +240,7 @@ where
} }
InboundSubstreamState::ResponseIdle(substream) => { InboundSubstreamState::ResponseIdle(substream) => {
*self = InboundSubstreamState::ResponsePendingSend { *self = InboundSubstreamState::ResponsePendingSend {
substream: substream, substream,
message: error, message: error,
closing: true, closing: true,
}; };
@ -235,7 +267,7 @@ where
) -> Self { ) -> Self {
RPCHandler { RPCHandler {
listen_protocol, listen_protocol,
pending_error: Vec::new(), pending_errors: Vec::new(),
events_out: SmallVec::new(), events_out: SmallVec::new(),
dial_queue: SmallVec::new(), dial_queue: SmallVec::new(),
dial_negotiated: 0, dial_negotiated: 0,
@ -244,7 +276,8 @@ where
outbound_substreams: FnvHashMap::default(), outbound_substreams: FnvHashMap::default(),
inbound_substreams_delay: DelayQueue::new(), inbound_substreams_delay: DelayQueue::new(),
outbound_substreams_delay: DelayQueue::new(), outbound_substreams_delay: DelayQueue::new(),
current_inbound_substream_id: 1, current_inbound_substream_id: SubstreamId(0),
current_outbound_substream_id: SubstreamId(0),
max_dial_negotiated: 8, max_dial_negotiated: 8,
keep_alive: KeepAlive::Yes, keep_alive: KeepAlive::Yes,
inactive_timeout, inactive_timeout,
@ -300,8 +333,8 @@ impl<TSpec> ProtocolsHandler for RPCHandler<TSpec>
where where
TSpec: EthSpec, TSpec: EthSpec,
{ {
type InEvent = RPCEvent<TSpec>; type InEvent = RPCSend<TSpec>;
type OutEvent = RPCEvent<TSpec>; type OutEvent = Result<RPCReceived<TSpec>, HandlerErr>;
type Error = RPCError; type Error = RPCError;
type InboundProtocol = RPCProtocol<TSpec>; type InboundProtocol = RPCProtocol<TSpec>;
type OutboundProtocol = RPCRequest<TSpec>; type OutboundProtocol = RPCRequest<TSpec>;
@ -316,9 +349,11 @@ where
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output, substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
) { ) {
let (req, substream) = substream; let (req, substream) = substream;
// drop the stream and return a 0 id for goodbye "requests" // drop the stream
if let r @ RPCRequest::Goodbye(_) = req { if let RPCRequest::Goodbye(_) = req {
self.events_out.push(RPCEvent::Request(0, r)); self.events_out
.push(RPCReceived::Request(self.current_inbound_substream_id, req));
self.current_inbound_substream_id.0 += 1;
return; return;
} }
@ -334,8 +369,8 @@ where
); );
self.events_out self.events_out
.push(RPCEvent::Request(self.current_inbound_substream_id, req)); .push(RPCReceived::Request(self.current_inbound_substream_id, req));
self.current_inbound_substream_id += 1; self.current_inbound_substream_id.0 += 1;
} }
fn inject_fully_negotiated_outbound( fn inject_fully_negotiated_outbound(
@ -346,43 +381,42 @@ where
self.dial_negotiated -= 1; self.dial_negotiated -= 1;
// add the stream to substreams if we expect a response, otherwise drop the stream. // add the stream to substreams if we expect a response, otherwise drop the stream.
let (mut id, request) = request_info; let (id, request) = request_info;
if request.expect_response() { let expected_responses = request.expected_responses();
// outbound requests can be sent from various aspects of lighthouse which don't if expected_responses > 0 {
// track request ids. In the future these will be flagged as None, currently they
// are flagged as 0. These can overlap. In this case, we pick the highest request
// Id available
if id == 0 && self.outbound_substreams.get(&id).is_some() {
// have duplicate outbound request with no id. Pick one that will not collide
let mut new_id = std::usize::MAX;
while self.outbound_substreams.get(&new_id).is_some() {
// panic all outbound substreams are full
new_id -= 1;
}
trace!(self.log, "New outbound stream id created"; "id" => new_id);
id = RequestId::from(new_id);
}
// new outbound request. Store the stream and tag the output. // new outbound request. Store the stream and tag the output.
let delay_key = self let delay_key = self.outbound_substreams_delay.insert(
.outbound_substreams_delay self.current_outbound_substream_id,
.insert(id, Duration::from_secs(RESPONSE_TIMEOUT)); Duration::from_secs(RESPONSE_TIMEOUT),
let protocol = request.protocol(); );
let response_chunk_count = match request { let proto = request.protocol();
RPCRequest::BlocksByRange(ref req) => Some(req.count),
RPCRequest::BlocksByRoot(ref req) => Some(req.block_roots.len() as u64),
_ => None, // Other requests do not have a known response chunk length,
};
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse { let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
substream: out, substream: out,
request: request, request,
}; };
if let Some(_) = self.outbound_substreams.insert( let expected_responses = if expected_responses > 1 {
id, // Currently enforced only for multiple responses
(awaiting_stream, delay_key, protocol, response_chunk_count), Some(expected_responses)
) { } else {
crit!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", id)); None
};
if self
.outbound_substreams
.insert(
self.current_outbound_substream_id,
OutboundInfo {
state: awaiting_stream,
delay_key,
proto,
remaining_chunks: expected_responses,
req_id: id,
},
)
.is_some()
{
crit!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", self.current_outbound_substream_id));
} }
self.current_outbound_substream_id.0 += 1;
} }
self.update_keep_alive(); self.update_keep_alive();
@ -392,113 +426,124 @@ where
// wrong state a response will fail silently. // wrong state a response will fail silently.
fn inject_event(&mut self, rpc_event: Self::InEvent) { fn inject_event(&mut self, rpc_event: Self::InEvent) {
match rpc_event { match rpc_event {
RPCEvent::Request(id, req) => self.send_request(id, req), RPCSend::Request(id, req) => self.send_request(id, req),
RPCEvent::Response(rpc_id, response) => { RPCSend::Response(inbound_id, response) => {
// Variables indicating if the response is an error response or a multi-part // Variables indicating if the response is an error response or a multi-part
// response // response
let res_is_error = response.is_error(); let res_is_error = response.is_error();
let res_is_multiple = response.multiple_responses(); let res_is_multiple = response.multiple_responses();
// check if the stream matching the response still exists // check if the stream matching the response still exists
match self.inbound_substreams.get_mut(&rpc_id) { let (substream_state, protocol) = match self.inbound_substreams.get_mut(&inbound_id)
Some((substream_state, _, protocol)) => { {
match std::mem::replace(substream_state, InboundSubstreamState::Poisoned) { Some((substream_state, _, protocol)) => (substream_state, protocol),
InboundSubstreamState::ResponseIdle(substream) => { None => {
// close the stream if there is no response warn!(self.log, "Stream has expired. Response not sent";
match response { "response" => response.to_string(), "id" => inbound_id);
RPCCodedResponse::StreamTermination(_) => { return;
//trace!(self.log, "Stream termination sent. Ending the stream"); }
*substream_state = };
InboundSubstreamState::Closing(substream);
}
_ => {
if let Some(error_code) = response.error_code() {
self.pending_error.push((
rpc_id,
*protocol,
RPCError::ErrorResponse(error_code),
));
}
// send the response
// if it's a single rpc request or an error, close the stream after
*substream_state =
InboundSubstreamState::ResponsePendingSend {
substream: substream,
message: response,
closing: !res_is_multiple | res_is_error, // close if an error or we are not expecting more responses
};
}
}
}
InboundSubstreamState::ResponsePendingSend {
substream,
message,
closing,
} if res_is_multiple => {
// the stream is in use, add the request to a pending queue
self.queued_outbound_items
.entry(rpc_id)
.or_insert_with(Vec::new)
.push(response);
// return the state // If the response we are sending is an error, report back for handling
*substream_state = InboundSubstreamState::ResponsePendingSend { match response {
substream, RPCCodedResponse::InvalidRequest(ref reason)
message, | RPCCodedResponse::ServerError(ref reason)
closing, | RPCCodedResponse::Unknown(ref reason) => {
}; let code = &response
} .error_code()
InboundSubstreamState::ResponsePendingFlush { substream, closing } .expect("Error response should map to an error code");
if res_is_multiple => let err = HandlerErr::Inbound {
{ id: inbound_id,
// the stream is in use, add the request to a pending queue proto: *protocol,
self.queued_outbound_items error: RPCError::ErrorResponse(*code, reason.clone()),
.entry(rpc_id) };
.or_insert_with(Vec::new) self.pending_errors.push(err);
.push(response); }
_ => {} // not an error, continue.
}
// return the state match std::mem::replace(substream_state, InboundSubstreamState::Poisoned) {
*substream_state = InboundSubstreamState::ResponsePendingFlush { InboundSubstreamState::ResponseIdle(substream) => {
substream, // close the stream if there is no response
closing, match response {
}; RPCCodedResponse::StreamTermination(_) => {
}
InboundSubstreamState::Closing(substream) => {
*substream_state = InboundSubstreamState::Closing(substream); *substream_state = InboundSubstreamState::Closing(substream);
debug!(self.log, "Response not sent. Stream is closing"; "response" => format!("{}",response));
} }
InboundSubstreamState::ResponsePendingSend { _ => {
substream, // send the response
message, // if it's a single rpc request or an error, close the stream after
..
} => {
*substream_state = InboundSubstreamState::ResponsePendingSend { *substream_state = InboundSubstreamState::ResponsePendingSend {
substream, substream,
message, message: response,
closing: true, closing: !res_is_multiple | res_is_error, // close if an error or we are not expecting more responses
}; };
error!(self.log, "Attempted sending multiple responses to a single response request");
}
InboundSubstreamState::ResponsePendingFlush { substream, .. } => {
*substream_state = InboundSubstreamState::ResponsePendingFlush {
substream,
closing: true,
};
error!(self.log, "Attempted sending multiple responses to a single response request");
}
InboundSubstreamState::Poisoned => {
crit!(self.log, "Poisoned inbound substream");
unreachable!("Coding error: Poisoned substream");
} }
} }
} }
None => { InboundSubstreamState::ResponsePendingSend {
warn!(self.log, "Stream has expired. Response not sent"; "response" => response.to_string(), "id" => rpc_id); substream,
message,
closing,
} if res_is_multiple => {
// the stream is in use, add the request to a pending queue
self.queued_outbound_items
.entry(inbound_id)
.or_insert_with(Vec::new)
.push(response);
// return the state
*substream_state = InboundSubstreamState::ResponsePendingSend {
substream,
message,
closing,
};
} }
}; InboundSubstreamState::ResponsePendingFlush { substream, closing }
if res_is_multiple =>
{
// the stream is in use, add the request to a pending queue
self.queued_outbound_items
.entry(inbound_id)
.or_insert_with(Vec::new)
.push(response);
// return the state
*substream_state =
InboundSubstreamState::ResponsePendingFlush { substream, closing };
}
InboundSubstreamState::Closing(substream) => {
*substream_state = InboundSubstreamState::Closing(substream);
debug!(self.log, "Response not sent. Stream is closing"; "response" => format!("{}",response));
}
InboundSubstreamState::ResponsePendingSend {
substream, message, ..
} => {
*substream_state = InboundSubstreamState::ResponsePendingSend {
substream,
message,
closing: true,
};
error!(
self.log,
"Attempted sending multiple responses to a single response request"
);
}
InboundSubstreamState::ResponsePendingFlush { substream, .. } => {
*substream_state = InboundSubstreamState::ResponsePendingFlush {
substream,
closing: true,
};
error!(
self.log,
"Attempted sending multiple responses to a single response request"
);
}
InboundSubstreamState::Poisoned => {
crit!(self.log, "Poisoned inbound substream");
unreachable!("Coding error: Poisoned substream");
}
}
} }
// We do not send errors as responses
RPCEvent::Error(..) => {}
} }
} }
@ -520,7 +565,7 @@ where
self.outbound_io_error_retries = 0; self.outbound_io_error_retries = 0;
// map the error // map the error
let rpc_error = match error { let error = match error {
ProtocolsHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"), ProtocolsHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"),
ProtocolsHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout, ProtocolsHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout,
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e, ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e,
@ -541,7 +586,11 @@ where
} }
}, },
}; };
self.pending_error.push((id, req.protocol(), rpc_error)); self.pending_errors.push(HandlerErr::Outbound {
id,
proto: req.protocol(),
error,
});
} }
fn connection_keep_alive(&self) -> KeepAlive { fn connection_keep_alive(&self) -> KeepAlive {
@ -559,16 +608,15 @@ where
Self::Error, Self::Error,
>, >,
> { > {
if !self.pending_error.is_empty() { // report failures
let (id, protocol, err) = self.pending_error.remove(0); if !self.pending_errors.is_empty() {
return Poll::Ready(ProtocolsHandlerEvent::Custom(RPCEvent::Error( let err_info = self.pending_errors.remove(0);
id, protocol, err, return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(err_info)));
)));
} }
// return any events that need to be reported // return any events that need to be reported
if !self.events_out.is_empty() { if !self.events_out.is_empty() {
return Poll::Ready(ProtocolsHandlerEvent::Custom(self.events_out.remove(0))); return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(self.events_out.remove(0))));
} else { } else {
self.events_out.shrink_to_fit(); self.events_out.shrink_to_fit();
} }
@ -576,17 +624,23 @@ where
// purge expired inbound substreams and send an error // purge expired inbound substreams and send an error
loop { loop {
match self.inbound_substreams_delay.poll_next_unpin(cx) { match self.inbound_substreams_delay.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(stream_id))) => { Poll::Ready(Some(Ok(inbound_id))) => {
// handle a stream timeout for various states // handle a stream timeout for various states
if let Some((substream_state, delay_key, _)) = if let Some((substream_state, delay_key, protocol)) =
self.inbound_substreams.get_mut(stream_id.get_ref()) self.inbound_substreams.get_mut(inbound_id.get_ref())
{ {
// the delay has been removed // the delay has been removed
*delay_key = None; *delay_key = None;
self.pending_errors.push(HandlerErr::Inbound {
id: *inbound_id.get_ref(),
proto: *protocol,
error: RPCError::StreamTimeout,
});
let outbound_queue = self let outbound_queue = self
.queued_outbound_items .queued_outbound_items
.entry(stream_id.into_inner()) .entry(inbound_id.into_inner())
.or_insert_with(Vec::new); .or_insert_with(Vec::new);
substream_state.close(outbound_queue); substream_state.close(outbound_queue);
} }
@ -605,20 +659,21 @@ where
// purge expired outbound substreams // purge expired outbound substreams
loop { loop {
match self.outbound_substreams_delay.poll_next_unpin(cx) { match self.outbound_substreams_delay.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(stream_id))) => { Poll::Ready(Some(Ok(outbound_id))) => {
if let Some((_id, _stream, protocol, _)) = if let Some(OutboundInfo { proto, req_id, .. }) =
self.outbound_substreams.remove(stream_id.get_ref()) self.outbound_substreams.remove(outbound_id.get_ref())
{ {
self.update_keep_alive(); self.update_keep_alive();
let outbound_err = HandlerErr::Outbound {
id: req_id,
proto,
error: RPCError::StreamTimeout,
};
// notify the user // notify the user
return Poll::Ready(ProtocolsHandlerEvent::Custom(RPCEvent::Error( return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
*stream_id.get_ref(),
protocol,
RPCError::StreamTimeout,
)));
} else { } else {
crit!(self.log, "timed out substream not in the books"; "stream_id" => stream_id.get_ref()); crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref());
} }
} }
Poll::Ready(Some(Err(e))) => { Poll::Ready(Some(Err(e))) => {
@ -797,155 +852,161 @@ where
} }
// drive outbound streams that need to be processed // drive outbound streams that need to be processed
for request_id in self.outbound_substreams.keys().copied().collect::<Vec<_>>() { for outbound_id in self.outbound_substreams.keys().copied().collect::<Vec<_>>() {
match self.outbound_substreams.entry(request_id) { // get the state and mark it as poisoned
let (mut entry, state) = match self.outbound_substreams.entry(outbound_id) {
Entry::Occupied(mut entry) => { Entry::Occupied(mut entry) => {
match std::mem::replace( let state = std::mem::replace(
&mut entry.get_mut().0, &mut entry.get_mut().state,
OutboundSubstreamState::Poisoned, OutboundSubstreamState::Poisoned,
) { );
OutboundSubstreamState::RequestPendingResponse { (entry, state)
mut substream, }
request, Entry::Vacant(_) => unreachable!(),
} => match substream.poll_next_unpin(cx) { };
Poll::Ready(Some(Ok(response))) => {
if request.multiple_responses() && !response.is_error() {
let substream_entry = entry.get_mut();
let delay_key = &substream_entry.1;
// chunks left after this one
let remaining_chunks = substream_entry
.3
.map(|count| count.saturating_sub(1))
.unwrap_or_else(|| 0);
if remaining_chunks == 0 {
// this is the last expected message, close the stream as all expected chunks have been received
substream_entry.0 =
OutboundSubstreamState::Closing(substream);
} else {
// If the response chunk was expected update the remaining number of chunks expected and reset the Timeout
substream_entry.0 =
OutboundSubstreamState::RequestPendingResponse {
substream,
request,
};
substream_entry.3 = Some(remaining_chunks);
self.outbound_substreams_delay.reset(
delay_key,
Duration::from_secs(RESPONSE_TIMEOUT),
);
}
} else {
// either this is a single response request or we received an
// error
// only expect a single response, close the stream
entry.get_mut().0 = OutboundSubstreamState::Closing(substream);
}
return Poll::Ready(ProtocolsHandlerEvent::Custom( match state {
RPCEvent::Response(request_id, response), OutboundSubstreamState::RequestPendingResponse {
)); mut substream,
request,
} => match substream.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(response))) => {
if request.expected_responses() > 1 && !response.is_error() {
let substream_entry = entry.get_mut();
let delay_key = &substream_entry.delay_key;
// chunks left after this one
let remaining_chunks = substream_entry
.remaining_chunks
.map(|count| count.saturating_sub(1))
.unwrap_or_else(|| 0);
if remaining_chunks == 0 {
// this is the last expected message, close the stream as all expected chunks have been received
substream_entry.state = OutboundSubstreamState::Closing(substream);
} else {
// If the response chunk was expected update the remaining number of chunks expected and reset the Timeout
substream_entry.state =
OutboundSubstreamState::RequestPendingResponse {
substream,
request,
};
substream_entry.remaining_chunks = Some(remaining_chunks);
self.outbound_substreams_delay
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT));
} }
Poll::Ready(None) => { } else {
// stream closed // either this is a single response request or we received an
// if we expected multiple streams send a stream termination, // error only expect a single response, close the stream
// else report the stream terminating only. entry.get_mut().state = OutboundSubstreamState::Closing(substream);
//trace!(self.log, "RPC Response - stream closed by remote"); }
// drop the stream
let delay_key = &entry.get().1;
self.outbound_substreams_delay.remove(delay_key);
entry.remove_entry();
self.update_keep_alive(); // Check what type of response we got and report it accordingly
// notify the application error let id = entry.get().req_id;
if request.multiple_responses() { let proto = entry.get().proto;
// return an end of stream result
return Poll::Ready(ProtocolsHandlerEvent::Custom(
RPCEvent::Response(
request_id,
RPCCodedResponse::StreamTermination(
request.stream_termination(),
),
),
));
} // else we return an error, stream should not have closed early.
return Poll::Ready(ProtocolsHandlerEvent::Custom(
RPCEvent::Error(
request_id,
request.protocol(),
RPCError::IncompleteStream,
),
));
}
Poll::Pending => {
entry.get_mut().0 = OutboundSubstreamState::RequestPendingResponse {
substream,
request,
}
}
Poll::Ready(Some(Err(e))) => {
// drop the stream
let delay_key = &entry.get().1;
self.outbound_substreams_delay.remove(delay_key);
let protocol = entry.get().2;
entry.remove_entry();
self.update_keep_alive();
return Poll::Ready(ProtocolsHandlerEvent::Custom(
RPCEvent::Error(request_id, protocol, e),
));
}
},
OutboundSubstreamState::Closing(mut substream) => {
match Sink::poll_close(Pin::new(&mut substream), cx) {
Poll::Ready(_) => {
// drop the stream and its corresponding timeout
let delay_key = &entry.get().1;
let protocol = entry.get().2;
self.outbound_substreams_delay.remove(delay_key);
entry.remove_entry();
self.update_keep_alive();
// report the stream termination to the user let received = match response {
// RPCCodedResponse::StreamTermination(t) => {
// Streams can be terminated here if a responder tries to Ok(RPCReceived::EndOfStream(id, t))
// continue sending responses beyond what we would expect. Here }
// we simply terminate the stream and report a stream RPCCodedResponse::Success(resp) => Ok(RPCReceived::Response(id, resp)),
// termination to the application RPCCodedResponse::InvalidRequest(ref r)
match protocol { | RPCCodedResponse::ServerError(ref r)
Protocol::BlocksByRange => { | RPCCodedResponse::Unknown(ref r) => {
return Poll::Ready(ProtocolsHandlerEvent::Custom( let code = response.error_code().expect(
RPCEvent::Response( "Response indicating and error should map to an error code",
request_id, );
RPCCodedResponse::StreamTermination( Err(HandlerErr::Outbound {
ResponseTermination::BlocksByRange, id,
), proto,
), error: RPCError::ErrorResponse(code, r.clone()),
)); })
} }
Protocol::BlocksByRoot => { };
return Poll::Ready(ProtocolsHandlerEvent::Custom(
RPCEvent::Response( return Poll::Ready(ProtocolsHandlerEvent::Custom(received));
request_id, }
RPCCodedResponse::StreamTermination( Poll::Ready(None) => {
ResponseTermination::BlocksByRoot, // stream closed
), // if we expected multiple streams send a stream termination,
), // else report the stream terminating only.
)); //trace!(self.log, "RPC Response - stream closed by remote");
} // drop the stream
_ => {} // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream. let delay_key = &entry.get().delay_key;
} let request_id = *&entry.get().req_id;
} self.outbound_substreams_delay.remove(delay_key);
Poll::Pending => { entry.remove_entry();
entry.get_mut().0 = OutboundSubstreamState::Closing(substream); self.update_keep_alive();
} // notify the application error
if request.expected_responses() > 1 {
// return an end of stream result
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
RPCReceived::EndOfStream(request_id, request.stream_termination()),
)));
}
// else we return an error, stream should not have closed early.
let outbound_err = HandlerErr::Outbound {
id: request_id,
proto: request.protocol(),
error: RPCError::IncompleteStream,
};
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
}
Poll::Pending => {
entry.get_mut().state =
OutboundSubstreamState::RequestPendingResponse { substream, request }
}
Poll::Ready(Some(Err(e))) => {
// drop the stream
let delay_key = &entry.get().delay_key;
self.outbound_substreams_delay.remove(delay_key);
let outbound_err = HandlerErr::Outbound {
id: entry.get().req_id,
proto: entry.get().proto,
error: e,
};
entry.remove_entry();
self.update_keep_alive();
return Poll::Ready(ProtocolsHandlerEvent::Custom(Err(outbound_err)));
}
},
OutboundSubstreamState::Closing(mut substream) => {
match Sink::poll_close(Pin::new(&mut substream), cx) {
Poll::Ready(_) => {
// drop the stream and its corresponding timeout
let delay_key = &entry.get().delay_key;
let protocol = entry.get().proto;
let request_id = entry.get().req_id;
self.outbound_substreams_delay.remove(delay_key);
entry.remove_entry();
self.update_keep_alive();
// report the stream termination to the user
//
// Streams can be terminated here if a responder tries to
// continue sending responses beyond what we would expect. Here
// we simply terminate the stream and report a stream
// termination to the application
let termination = match protocol {
Protocol::BlocksByRange => Some(ResponseTermination::BlocksByRange),
Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot),
_ => None, // all other protocols are do not have multiple responses and we do not inform the user, we simply drop the stream.
};
if let Some(termination) = termination {
return Poll::Ready(ProtocolsHandlerEvent::Custom(Ok(
RPCReceived::EndOfStream(request_id, termination),
)));
} }
} }
OutboundSubstreamState::Poisoned => { Poll::Pending => {
crit!(self.log, "Poisoned outbound substream"); entry.get_mut().state = OutboundSubstreamState::Closing(substream);
unreachable!("Coding Error: Outbound substream is poisoned")
} }
} }
} }
Entry::Vacant(_) => unreachable!(), OutboundSubstreamState::Poisoned => {
crit!(self.log, "Poisoned outbound substream");
unreachable!("Coding Error: Outbound substream is poisoned")
}
} }
} }
@ -980,7 +1041,7 @@ fn apply_queued_responses<TSpec: EthSpec>(
InboundSubstreamState::Closing(substream) InboundSubstreamState::Closing(substream)
} }
chunk => InboundSubstreamState::ResponsePendingSend { chunk => InboundSubstreamState::ResponsePendingSend {
substream: substream, substream,
message: chunk, message: chunk,
closing: false, closing: false,
}, },
@ -992,3 +1053,14 @@ fn apply_queued_responses<TSpec: EthSpec>(
} }
} }
} }
impl slog::Value for SubstreamId {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
slog::Value::serialize(&self.0, record, key, serializer)
}
}

View File

@ -9,7 +9,16 @@ use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
/* Requests */ /* Requests */
pub type RequestId = usize; /// Identifier of a request.
///
// NOTE: The handler stores the `RequestId` to inform back of responses and errors, but it's execution
// is independent of the contents on this type.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum RequestId {
Router,
Sync(usize),
Behaviour,
}
/// The STATUS request/response handshake message. /// The STATUS request/response handshake message.
#[derive(Encode, Decode, Clone, Debug, PartialEq)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
@ -194,7 +203,7 @@ pub enum RPCCodedResponse<T: EthSpec> {
} }
/// The code assigned to an erroneous `RPCResponse`. /// The code assigned to an erroneous `RPCResponse`.
#[derive(Debug, Clone)] #[derive(Debug, Clone, Copy)]
pub enum RPCResponseErrorCode { pub enum RPCResponseErrorCode {
InvalidRequest, InvalidRequest,
ServerError, ServerError,
@ -230,6 +239,15 @@ impl<T: EthSpec> RPCCodedResponse<T> {
} }
} }
/// Builds an RPCCodedResponse from a response code and an ErrorMessage
pub fn from_error_code(response_code: RPCResponseErrorCode, err: String) -> Self {
match response_code {
RPCResponseErrorCode::InvalidRequest => RPCCodedResponse::InvalidRequest(err),
RPCResponseErrorCode::ServerError => RPCCodedResponse::ServerError(err),
RPCResponseErrorCode::Unknown => RPCCodedResponse::Unknown(err),
}
}
/// Specifies which response allows for multiple chunks for the stream handler. /// Specifies which response allows for multiple chunks for the stream handler.
pub fn multiple_responses(&self) -> bool { pub fn multiple_responses(&self) -> bool {
match self { match self {
@ -333,3 +351,18 @@ impl std::fmt::Display for BlocksByRangeRequest {
) )
} }
} }
impl slog::Value for RequestId {
fn serialize(
&self,
record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
match self {
RequestId::Behaviour => slog::Value::serialize("Behaviour", record, key, serializer),
RequestId::Router => slog::Value::serialize("Router", record, key, serializer),
RequestId::Sync(ref id) => slog::Value::serialize(id, record, key, serializer),
}
}
}

View File

@ -11,34 +11,69 @@ use libp2p::swarm::{
PollParameters, SubstreamProtocol, PollParameters, SubstreamProtocol,
}; };
use libp2p::{Multiaddr, PeerId}; use libp2p::{Multiaddr, PeerId};
pub use methods::{
MetaData, RPCCodedResponse, RPCResponse, RPCResponseErrorCode, RequestId, ResponseTermination,
StatusMessage,
};
pub use protocol::{Protocol, RPCError, RPCProtocol, RPCRequest};
use slog::{debug, o}; use slog::{debug, o};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::time::Duration; use std::time::Duration;
use types::EthSpec; use types::EthSpec;
pub(crate) use handler::HandlerErr;
pub(crate) use methods::{MetaData, Ping, RPCCodedResponse, RPCResponse};
pub(crate) use protocol::{RPCProtocol, RPCRequest};
pub use handler::SubstreamId;
pub use methods::{
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, RPCResponseErrorCode, RequestId,
ResponseTermination, StatusMessage,
};
pub use protocol::{Protocol, RPCError};
pub(crate) mod codec; pub(crate) mod codec;
mod handler; mod handler;
pub mod methods; pub mod methods;
mod protocol; mod protocol;
/// The return type used in the behaviour and the resultant event from the protocols handler. /// RPC events sent from Lighthouse.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum RPCEvent<T: EthSpec> { pub enum RPCSend<T: EthSpec> {
/// An inbound/outbound request for RPC protocol. The first parameter is a sequential /// A request sent from Lighthouse.
/// id which tracks an awaiting substream for the response. ///
/// The `RequestId` is given by the application making the request. These
/// go over *outbound* connections.
Request(RequestId, RPCRequest<T>), Request(RequestId, RPCRequest<T>),
/// A response that is being sent or has been received from the RPC protocol. The first parameter returns /// A response sent from Lighthouse.
/// that which was sent with the corresponding request, the second is a single chunk of a ///
/// response. /// The `SubstreamId` must correspond to the RPC-given ID of the original request received from the
Response(RequestId, RPCCodedResponse<T>), /// peer. The second parameter is a single chunk of a response. These go over *inbound*
/// An Error occurred. /// connections.
Error(RequestId, Protocol, RPCError), Response(SubstreamId, RPCCodedResponse<T>),
}
/// RPC events received from outside Lighthouse.
#[derive(Debug, Clone)]
pub enum RPCReceived<T: EthSpec> {
/// A request received from the outside.
///
/// The `SubstreamId` is given by the `RPCHandler` as it identifies this request with the
/// *inbound* substream over which it is managed.
Request(SubstreamId, RPCRequest<T>),
/// A response received from the outside.
///
/// The `RequestId` corresponds to the application given ID of the original request sent to the
/// peer. The second parameter is a single chunk of a response. These go over *outbound*
/// connections.
Response(RequestId, RPCResponse<T>),
/// Marks a request as completed
EndOfStream(RequestId, ResponseTermination),
}
impl<T: EthSpec> std::fmt::Display for RPCSend<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req),
RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res),
}
}
} }
/// Messages sent to the user from the RPC protocol. /// Messages sent to the user from the RPC protocol.
@ -46,38 +81,14 @@ pub struct RPCMessage<TSpec: EthSpec> {
/// The peer that sent the message. /// The peer that sent the message.
pub peer_id: PeerId, pub peer_id: PeerId,
/// The message that was sent. /// The message that was sent.
pub event: RPCEvent<TSpec>, pub event: <RPCHandler<TSpec> as ProtocolsHandler>::OutEvent,
}
impl<T: EthSpec> RPCEvent<T> {
pub fn id(&self) -> usize {
match *self {
RPCEvent::Request(id, _) => id,
RPCEvent::Response(id, _) => id,
RPCEvent::Error(id, _, _) => id,
}
}
}
impl<T: EthSpec> std::fmt::Display for RPCEvent<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
RPCEvent::Request(id, req) => write!(f, "RPC Request(id: {}, {})", id, req),
RPCEvent::Response(id, res) => write!(f, "RPC Response(id: {}, {})", id, res),
RPCEvent::Error(id, prot, err) => write!(
f,
"RPC Error(id: {}, protocol: {:?} error: {:?})",
id, prot, err
),
}
}
} }
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
/// logic. /// logic.
pub struct RPC<TSpec: EthSpec> { pub struct RPC<TSpec: EthSpec> {
/// Queue of events to processed. /// Queue of events to be processed.
events: Vec<NetworkBehaviourAction<RPCEvent<TSpec>, RPCMessage<TSpec>>>, events: Vec<NetworkBehaviourAction<RPCSend<TSpec>, RPCMessage<TSpec>>>,
/// Slog logger for RPC behaviour. /// Slog logger for RPC behaviour.
log: slog::Logger, log: slog::Logger,
} }
@ -94,11 +105,11 @@ impl<TSpec: EthSpec> RPC<TSpec> {
/// Submits an RPC request. /// Submits an RPC request.
/// ///
/// The peer must be connected for this to succeed. /// The peer must be connected for this to succeed.
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent<TSpec>) { pub fn send_rpc(&mut self, peer_id: PeerId, event: RPCSend<TSpec>) {
self.events.push(NetworkBehaviourAction::NotifyHandler { self.events.push(NetworkBehaviourAction::NotifyHandler {
peer_id, peer_id,
handler: NotifyHandler::Any, handler: NotifyHandler::Any,
event: rpc_event, event,
}); });
} }
} }
@ -129,8 +140,7 @@ where
fn inject_connected(&mut self, peer_id: &PeerId) { fn inject_connected(&mut self, peer_id: &PeerId) {
// find the peer's meta-data // find the peer's meta-data
debug!(self.log, "Requesting new peer's metadata"; "peer_id" => format!("{}",peer_id)); debug!(self.log, "Requesting new peer's metadata"; "peer_id" => format!("{}",peer_id));
let rpc_event = let rpc_event = RPCSend::Request(RequestId::Behaviour, RPCRequest::MetaData(PhantomData));
RPCEvent::Request(RequestId::from(0usize), RPCRequest::MetaData(PhantomData));
self.events.push(NetworkBehaviourAction::NotifyHandler { self.events.push(NetworkBehaviourAction::NotifyHandler {
peer_id: peer_id.clone(), peer_id: peer_id.clone(),
handler: NotifyHandler::Any, handler: NotifyHandler::Any,
@ -158,14 +168,14 @@ where
fn inject_event( fn inject_event(
&mut self, &mut self,
source: PeerId, peer_id: PeerId,
_: ConnectionId, _: ConnectionId,
event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent, event: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
) { ) {
// send the event to the user // send the event to the user
self.events self.events
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage { .push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
peer_id: source, peer_id,
event, event,
})); }));
} }

View File

@ -290,32 +290,19 @@ impl<TSpec: EthSpec> RPCRequest<TSpec> {
/* These functions are used in the handler for stream management */ /* These functions are used in the handler for stream management */
/// This specifies whether a stream should remain open and await a response, given a request. /// Number of responses expected for this request.
/// A GOODBYE request has no response. pub fn expected_responses(&self) -> usize {
pub fn expect_response(&self) -> bool {
match self { match self {
RPCRequest::Status(_) => true, RPCRequest::Status(_) => 1,
RPCRequest::Goodbye(_) => false, RPCRequest::Goodbye(_) => 0,
RPCRequest::BlocksByRange(_) => true, RPCRequest::BlocksByRange(req) => req.count as usize,
RPCRequest::BlocksByRoot(_) => true, RPCRequest::BlocksByRoot(req) => req.block_roots.len(),
RPCRequest::Ping(_) => true, RPCRequest::Ping(_) => 1,
RPCRequest::MetaData(_) => true, RPCRequest::MetaData(_) => 1,
}
}
/// Returns which methods expect multiple responses from the stream. If this is false and
/// the stream terminates, an error is given.
pub fn multiple_responses(&self) -> bool {
match self {
RPCRequest::Status(_) => false,
RPCRequest::Goodbye(_) => false,
RPCRequest::BlocksByRange(_) => true,
RPCRequest::BlocksByRoot(_) => true,
RPCRequest::Ping(_) => false,
RPCRequest::MetaData(_) => false,
} }
} }
/// Gives the corresponding `Protocol` to this request.
pub fn protocol(&self) -> Protocol { pub fn protocol(&self) -> Protocol {
match self { match self {
RPCRequest::Status(_) => Protocol::Status, RPCRequest::Status(_) => Protocol::Status,
@ -390,7 +377,7 @@ pub enum RPCError {
/// IO Error. /// IO Error.
IoError(String), IoError(String),
/// The peer returned a valid response but the response indicated an error. /// The peer returned a valid response but the response indicated an error.
ErrorResponse(RPCResponseErrorCode), ErrorResponse(RPCResponseErrorCode, String),
/// Timed out waiting for a response. /// Timed out waiting for a response.
StreamTimeout, StreamTimeout,
/// Peer does not support the protocol. /// Peer does not support the protocol.
@ -430,7 +417,11 @@ impl std::fmt::Display for RPCError {
RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err), RPCError::SSZDecodeError(ref err) => write!(f, "Error while decoding ssz: {:?}", err),
RPCError::InvalidData => write!(f, "Peer sent unexpected data"), RPCError::InvalidData => write!(f, "Peer sent unexpected data"),
RPCError::IoError(ref err) => write!(f, "IO Error: {}", err), RPCError::IoError(ref err) => write!(f, "IO Error: {}", err),
RPCError::ErrorResponse(ref code) => write!(f, "RPC response was an error: {}", code), RPCError::ErrorResponse(ref code, ref reason) => write!(
f,
"RPC response was an error: {} with reason: {}",
code, reason
),
RPCError::StreamTimeout => write!(f, "Stream Timeout"), RPCError::StreamTimeout => write!(f, "Stream Timeout"),
RPCError::UnsupportedProtocol => write!(f, "Peer does not support the protocol"), RPCError::UnsupportedProtocol => write!(f, "Peer does not support the protocol"),
RPCError::IncompleteStream => write!(f, "Stream ended unexpectedly"), RPCError::IncompleteStream => write!(f, "Stream ended unexpectedly"),
@ -451,7 +442,7 @@ impl std::error::Error for RPCError {
RPCError::IncompleteStream => None, RPCError::IncompleteStream => None,
RPCError::InvalidData => None, RPCError::InvalidData => None,
RPCError::InternalError(_) => None, RPCError::InternalError(_) => None,
RPCError::ErrorResponse(_) => None, RPCError::ErrorResponse(_, _) => None,
RPCError::NegotiationTimeout => None, RPCError::NegotiationTimeout => None,
} }
} }

View File

@ -1,6 +1,7 @@
use crate::behaviour::{Behaviour, BehaviourEvent}; use crate::behaviour::{Behaviour, BehaviourEvent, Request, Response};
use crate::discovery::enr; use crate::discovery::enr;
use crate::multiaddr::Protocol; use crate::multiaddr::Protocol;
use crate::rpc::{RPCResponseErrorCode, RequestId, SubstreamId};
use crate::types::{error, GossipKind}; use crate::types::{error, GossipKind};
use crate::EnrExt; use crate::EnrExt;
use crate::{NetworkConfig, NetworkGlobals}; use crate::{NetworkConfig, NetworkGlobals};
@ -84,6 +85,7 @@ pub struct Service<TSpec: EthSpec> {
impl<TSpec: EthSpec> Service<TSpec> { impl<TSpec: EthSpec> Service<TSpec> {
pub fn new( pub fn new(
executor: environment::TaskExecutor,
config: &NetworkConfig, config: &NetworkConfig,
enr_fork_id: EnrForkId, enr_fork_id: EnrForkId,
log: &slog::Logger, log: &slog::Logger,
@ -122,15 +124,15 @@ impl<TSpec: EthSpec> Service<TSpec> {
let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?; let behaviour = Behaviour::new(&local_keypair, config, network_globals.clone(), &log)?;
// use the executor for libp2p // use the executor for libp2p
struct Executor(tokio::runtime::Handle); struct Executor(environment::TaskExecutor);
impl libp2p::core::Executor for Executor { impl libp2p::core::Executor for Executor {
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) { fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
self.0.spawn(f); self.0.spawn(f, "libp2p");
} }
} }
SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) SwarmBuilder::new(transport, behaviour, local_peer_id.clone())
.peer_connection_limit(MAX_CONNECTIONS_PER_PEER) .peer_connection_limit(MAX_CONNECTIONS_PER_PEER)
.executor(Box::new(Executor(tokio::runtime::Handle::current()))) .executor(Box::new(Executor(executor)))
.build() .build()
}; };
@ -228,126 +230,154 @@ impl<TSpec: EthSpec> Service<TSpec> {
self.peer_ban_timeout.insert(peer_id, timeout); self.peer_ban_timeout.insert(peer_id, timeout);
} }
/// Sends a request to a peer, with a given Id.
pub fn send_request(&mut self, peer_id: PeerId, request_id: RequestId, request: Request) {
self.swarm.send_request(peer_id, request_id, request);
}
/// Informs the peer that their request failed.
pub fn respond_with_error(
&mut self,
peer_id: PeerId,
stream_id: SubstreamId,
error: RPCResponseErrorCode,
reason: String,
) {
self.swarm
._send_error_reponse(peer_id, stream_id, error, reason);
}
/// Sends a response to a peer's request.
pub fn send_response(
&mut self,
peer_id: PeerId,
stream_id: SubstreamId,
response: Response<TSpec>,
) {
self.swarm
.send_successful_response(peer_id, stream_id, response);
}
pub async fn next_event(&mut self) -> Libp2pEvent<TSpec> { pub async fn next_event(&mut self) -> Libp2pEvent<TSpec> {
loop { loop {
tokio::select! { tokio::select! {
event = self.swarm.next_event() => { event = self.swarm.next_event() => {
match event { match event {
SwarmEvent::Behaviour(behaviour) => { SwarmEvent::Behaviour(behaviour) => {
return Libp2pEvent::Behaviour(behaviour) return Libp2pEvent::Behaviour(behaviour)
} }
SwarmEvent::ConnectionEstablished { SwarmEvent::ConnectionEstablished {
peer_id, peer_id,
endpoint, endpoint,
num_established, num_established,
} => { } => {
debug!(self.log, "Connection established"; "peer_id"=> peer_id.to_string(), "connections" => num_established.get()); debug!(self.log, "Connection established"; "peer_id" => peer_id.to_string(), "connections" => num_established.get());
// if this is the first connection inform the network layer a new connection // if this is the first connection inform the network layer a new connection
// has been established and update the db // has been established and update the db
if num_established.get() == 1 { if num_established.get() == 1 {
// update the peerdb // update the peerdb
match endpoint { match endpoint {
ConnectedPoint::Listener { .. } => { ConnectedPoint::Listener { .. } => {
self.swarm.peer_manager().connect_ingoing(&peer_id); self.swarm.peer_manager().connect_ingoing(&peer_id);
}
ConnectedPoint::Dialer { .. } => self
.network_globals
.peers
.write()
.connect_outgoing(&peer_id),
} }
ConnectedPoint::Dialer { .. } => self return Libp2pEvent::PeerConnected { peer_id, endpoint };
.network_globals
.peers
.write()
.connect_outgoing(&peer_id),
} }
return Libp2pEvent::PeerConnected { peer_id, endpoint };
} }
} SwarmEvent::ConnectionClosed {
SwarmEvent::ConnectionClosed { peer_id,
peer_id, cause,
cause, endpoint,
endpoint, num_established,
num_established, } => {
} => { debug!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => cause.to_string(), "connections" => num_established);
debug!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => cause.to_string(), "connections" => num_established); if num_established == 0 {
if num_established == 0 { // update the peer_db
// update the peer_db self.swarm.peer_manager().notify_disconnect(&peer_id);
self.swarm.peer_manager().notify_disconnect(&peer_id); // the peer has disconnected
// the peer has disconnected return Libp2pEvent::PeerDisconnected {
return Libp2pEvent::PeerDisconnected { peer_id,
peer_id, endpoint,
endpoint, };
}; }
}
SwarmEvent::NewListenAddr(multiaddr) => {
return Libp2pEvent::NewListenAddr(multiaddr)
} }
}
SwarmEvent::NewListenAddr(multiaddr) => {
return Libp2pEvent::NewListenAddr(multiaddr)
}
SwarmEvent::IncomingConnection { SwarmEvent::IncomingConnection {
local_addr, local_addr,
send_back_addr, send_back_addr,
} => { } => {
debug!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string()) debug!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string())
} }
SwarmEvent::IncomingConnectionError { SwarmEvent::IncomingConnectionError {
local_addr, local_addr,
send_back_addr, send_back_addr,
error, error,
} => { } => {
debug!(self.log, "Failed incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string(), "error" => error.to_string()) debug!(self.log, "Failed incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string(), "error" => error.to_string())
} }
SwarmEvent::BannedPeer { SwarmEvent::BannedPeer {
peer_id, peer_id,
endpoint: _, endpoint: _,
} => { } => {
debug!(self.log, "Attempted to dial a banned peer"; "peer_id" => peer_id.to_string()) debug!(self.log, "Attempted to dial a banned peer"; "peer_id" => peer_id.to_string())
} }
SwarmEvent::UnreachableAddr { SwarmEvent::UnreachableAddr {
peer_id, peer_id,
address, address,
error, error,
attempts_remaining, attempts_remaining,
} => { } => {
debug!(self.log, "Failed to dial address"; "peer_id" => peer_id.to_string(), "address" => address.to_string(), "error" => error.to_string(), "attempts_remaining" => attempts_remaining); debug!(self.log, "Failed to dial address"; "peer_id" => peer_id.to_string(), "address" => address.to_string(), "error" => error.to_string(), "attempts_remaining" => attempts_remaining);
self.swarm.peer_manager().notify_disconnect(&peer_id); self.swarm.peer_manager().notify_disconnect(&peer_id);
} }
SwarmEvent::UnknownPeerUnreachableAddr { address, error } => { SwarmEvent::UnknownPeerUnreachableAddr { address, error } => {
debug!(self.log, "Peer not known at dialed address"; "address" => address.to_string(), "error" => error.to_string()); debug!(self.log, "Peer not known at dialed address"; "address" => address.to_string(), "error" => error.to_string());
} }
SwarmEvent::ExpiredListenAddr(multiaddr) => { SwarmEvent::ExpiredListenAddr(multiaddr) => {
debug!(self.log, "Listen address expired"; "multiaddr" => multiaddr.to_string()) debug!(self.log, "Listen address expired"; "multiaddr" => multiaddr.to_string())
} }
SwarmEvent::ListenerClosed { addresses, reason } => { SwarmEvent::ListenerClosed { addresses, reason } => {
debug!(self.log, "Listener closed"; "addresses" => format!("{:?}", addresses), "reason" => format!("{:?}", reason)) debug!(self.log, "Listener closed"; "addresses" => format!("{:?}", addresses), "reason" => format!("{:?}", reason))
} }
SwarmEvent::ListenerError { error } => { SwarmEvent::ListenerError { error } => {
debug!(self.log, "Listener error"; "error" => format!("{:?}", error.to_string())) debug!(self.log, "Listener error"; "error" => format!("{:?}", error.to_string()))
} }
SwarmEvent::Dialing(peer_id) => { SwarmEvent::Dialing(peer_id) => {
debug!(self.log, "Dialing peer"; "peer" => peer_id.to_string()); debug!(self.log, "Dialing peer"; "peer" => peer_id.to_string());
self.swarm.peer_manager().dialing_peer(&peer_id); self.swarm.peer_manager().dialing_peer(&peer_id);
}
} }
} }
} Some(Ok(peer_to_ban)) = self.peers_to_ban.next() => {
Some(Ok(peer_to_ban)) = self.peers_to_ban.next() => { let peer_id = peer_to_ban.into_inner();
let peer_id = peer_to_ban.into_inner(); Swarm::ban_peer_id(&mut self.swarm, peer_id.clone());
Swarm::ban_peer_id(&mut self.swarm, peer_id.clone()); // TODO: Correctly notify protocols of the disconnect
// TODO: Correctly notify protocols of the disconnect // TODO: Also remove peer from the DHT: https://github.com/sigp/lighthouse/issues/629
// TODO: Also remove peer from the DHT: https://github.com/sigp/lighthouse/issues/629 self.swarm.inject_disconnected(&peer_id);
self.swarm.inject_disconnected(&peer_id); // inform the behaviour that the peer has been banned
// inform the behaviour that the peer has been banned self.swarm.peer_banned(peer_id);
self.swarm.peer_banned(peer_id); }
} Some(Ok(peer_to_unban)) = self.peer_ban_timeout.next() => {
Some(Ok(peer_to_unban)) = self.peer_ban_timeout.next() => { debug!(self.log, "Peer has been unbanned"; "peer" => format!("{:?}", peer_to_unban));
debug!(self.log, "Peer has been unbanned"; "peer" => format!("{:?}", peer_to_unban)); let unban_peer = peer_to_unban.into_inner();
let unban_peer = peer_to_unban.into_inner(); self.swarm.peer_unbanned(&unban_peer);
self.swarm.peer_unbanned(&unban_peer); Swarm::unban_peer_id(&mut self.swarm, unban_peer);
Swarm::unban_peer_id(&mut self.swarm, unban_peer); }
}
} }
} }
} }
} }
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise/secio as the encryption layer, and /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise/secio as the encryption
/// mplex or yamux as the multiplexing layer. /// layer, and mplex or yamux as the multiplexing layer.
fn build_transport( fn build_transport(
local_private_key: Keypair, local_private_key: Keypair,
) -> Result<Boxed<(PeerId, StreamMuxerBox), Error>, Error> { ) -> Result<Boxed<(PeerId, StreamMuxerBox), Error>, Error> {

View File

@ -12,6 +12,21 @@ use types::{EnrForkId, MinimalEthSpec};
type E = MinimalEthSpec; type E = MinimalEthSpec;
use tempdir::TempDir; use tempdir::TempDir;
pub struct Libp2pInstance(LibP2PService<E>, exit_future::Signal);
impl std::ops::Deref for Libp2pInstance {
type Target = LibP2PService<E>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for Libp2pInstance {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
let decorator = slog_term::TermDecorator::new().build(); let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse(); let drain = slog_term::FullFormat::new(decorator).build().fuse();
@ -82,13 +97,20 @@ pub fn build_libp2p_instance(
boot_nodes: Vec<Enr>, boot_nodes: Vec<Enr>,
secret_key: Option<String>, secret_key: Option<String>,
log: slog::Logger, log: slog::Logger,
) -> LibP2PService<E> { ) -> Libp2pInstance {
let port = unused_port("tcp").unwrap(); let port = unused_port("tcp").unwrap();
let config = build_config(port, boot_nodes, secret_key); let config = build_config(port, boot_nodes, secret_key);
// launch libp2p service // launch libp2p service
LibP2PService::new(&config, EnrForkId::default(), &log)
.expect("should build libp2p instance") let (signal, exit) = exit_future::signal();
.1 let executor =
environment::TaskExecutor::new(tokio::runtime::Handle::current(), exit, log.clone());
Libp2pInstance(
LibP2PService::new(executor, &config, EnrForkId::default(), &log)
.expect("should build libp2p instance")
.1,
signal,
)
} }
#[allow(dead_code)] #[allow(dead_code)]
@ -99,8 +121,8 @@ pub fn get_enr(node: &LibP2PService<E>) -> Enr {
// Returns `n` libp2p peers in fully connected topology. // Returns `n` libp2p peers in fully connected topology.
#[allow(dead_code)] #[allow(dead_code)]
pub fn build_full_mesh(log: slog::Logger, n: usize) -> Vec<LibP2PService<E>> { pub fn build_full_mesh(log: slog::Logger, n: usize) -> Vec<Libp2pInstance> {
let mut nodes: Vec<LibP2PService<E>> = (0..n) let mut nodes: Vec<_> = (0..n)
.map(|_| build_libp2p_instance(vec![], None, log.clone())) .map(|_| build_libp2p_instance(vec![], None, log.clone()))
.collect(); .collect();
let multiaddrs: Vec<Multiaddr> = nodes let multiaddrs: Vec<Multiaddr> = nodes
@ -124,7 +146,7 @@ pub fn build_full_mesh(log: slog::Logger, n: usize) -> Vec<LibP2PService<E>> {
// Constructs a pair of nodes with separate loggers. The sender dials the receiver. // Constructs a pair of nodes with separate loggers. The sender dials the receiver.
// This returns a (sender, receiver) pair. // This returns a (sender, receiver) pair.
#[allow(dead_code)] #[allow(dead_code)]
pub async fn build_node_pair(log: &slog::Logger) -> (LibP2PService<E>, LibP2PService<E>) { pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInstance) {
let sender_log = log.new(o!("who" => "sender")); let sender_log = log.new(o!("who" => "sender"));
let receiver_log = log.new(o!("who" => "receiver")); let receiver_log = log.new(o!("who" => "receiver"));
@ -168,8 +190,8 @@ pub async fn build_node_pair(log: &slog::Logger) -> (LibP2PService<E>, LibP2PSer
// Returns `n` peers in a linear topology // Returns `n` peers in a linear topology
#[allow(dead_code)] #[allow(dead_code)]
pub fn build_linear(log: slog::Logger, n: usize) -> Vec<LibP2PService<E>> { pub fn build_linear(log: slog::Logger, n: usize) -> Vec<Libp2pInstance> {
let mut nodes: Vec<LibP2PService<E>> = (0..n) let mut nodes: Vec<_> = (0..n)
.map(|_| build_libp2p_instance(vec![], None, log.clone())) .map(|_| build_libp2p_instance(vec![], None, log.clone()))
.collect(); .collect();
let multiaddrs: Vec<Multiaddr> = nodes let multiaddrs: Vec<Multiaddr> = nodes

View File

@ -1,3 +1,7 @@
/* These are temporarily disabled due to their non-deterministic behaviour and impending update to
* gossipsub 1.1. We leave these here as a template for future test upgrades
#![cfg(test)] #![cfg(test)]
use crate::types::GossipEncoding; use crate::types::GossipEncoding;
use ::types::{BeaconBlock, EthSpec, MinimalEthSpec, Signature, SignedBeaconBlock}; use ::types::{BeaconBlock, EthSpec, MinimalEthSpec, Signature, SignedBeaconBlock};
@ -164,3 +168,4 @@ async fn test_gossipsub_full_mesh_publish() {
} }
} }
} }
*/

View File

@ -136,7 +136,10 @@ async fn test_secio_noise_fallback() {
let port = common::unused_port("tcp").unwrap(); let port = common::unused_port("tcp").unwrap();
let noisy_config = common::build_config(port, vec![], None); let noisy_config = common::build_config(port, vec![], None);
let mut noisy_node = Service::new(&noisy_config, EnrForkId::default(), &log) let (_signal, exit) = exit_future::signal();
let executor =
environment::TaskExecutor::new(tokio::runtime::Handle::current(), exit, log.clone());
let mut noisy_node = Service::new(executor, &noisy_config, EnrForkId::default(), &log)
.expect("should build a libp2p instance") .expect("should build a libp2p instance")
.1; .1;

View File

@ -1,7 +1,6 @@
#![cfg(test)] #![cfg(test)]
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::*; use eth2_libp2p::{BehaviourEvent, Libp2pEvent, Request, Response};
use eth2_libp2p::{BehaviourEvent, Libp2pEvent, RPCEvent};
use slog::{debug, warn, Level}; use slog::{debug, warn, Level};
use std::time::Duration; use std::time::Duration;
use tokio::time::delay_for; use tokio::time::delay_for;
@ -26,7 +25,7 @@ async fn test_status_rpc() {
let (mut sender, mut receiver) = common::build_node_pair(&log).await; let (mut sender, mut receiver) = common::build_node_pair(&log).await;
// Dummy STATUS RPC message // Dummy STATUS RPC message
let rpc_request = RPCRequest::Status(StatusMessage { let rpc_request = Request::Status(StatusMessage {
fork_digest: [0; 4], fork_digest: [0; 4],
finalized_root: Hash256::from_low_u64_be(0), finalized_root: Hash256::from_low_u64_be(0),
finalized_epoch: Epoch::new(1), finalized_epoch: Epoch::new(1),
@ -35,7 +34,7 @@ async fn test_status_rpc() {
}); });
// Dummy STATUS RPC message // Dummy STATUS RPC message
let rpc_response = RPCResponse::Status(StatusMessage { let rpc_response = Response::Status(StatusMessage {
fork_digest: [0; 4], fork_digest: [0; 4],
finalized_root: Hash256::from_low_u64_be(0), finalized_root: Hash256::from_low_u64_be(0),
finalized_epoch: Epoch::new(1), finalized_epoch: Epoch::new(1),
@ -52,26 +51,19 @@ async fn test_status_rpc() {
debug!(log, "Sending RPC"); debug!(log, "Sending RPC");
sender sender
.swarm .swarm
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); .send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
} }
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
peer_id: _,
id: RequestId::Sync(10),
response,
}) => {
// Should receive the RPC response // Should receive the RPC response
RPCEvent::Response(id, response @ RPCCodedResponse::Success(_)) => { debug!(log, "Sender Received");
if id == 10 { assert_eq!(response, rpc_response.clone());
debug!(log, "Sender Received"); debug!(log, "Sender Completed");
let response = { return;
match response { }
RPCCodedResponse::Success(r) => r,
_ => unreachable!(),
}
};
assert_eq!(response, rpc_response.clone());
debug!(log, "Sender Completed");
return;
}
}
_ => {} // Ignore other RPC messages
},
_ => {} _ => {}
} }
} }
@ -81,23 +73,17 @@ async fn test_status_rpc() {
let receiver_future = async { let receiver_future = async {
loop { loop {
match receiver.next_event().await { match receiver.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
match event { peer_id,
// Should receive sent RPC request id,
RPCEvent::Request(id, request) => { request,
if request == rpc_request { }) => {
// send the response if request == rpc_request {
debug!(log, "Receiver Received"); // send the response
receiver.swarm.send_rpc( debug!(log, "Receiver Received");
peer_id, receiver
RPCEvent::Response( .swarm
id, .send_successful_response(peer_id, id, rpc_response.clone());
RPCCodedResponse::Success(rpc_response.clone()),
),
);
}
}
_ => {} // Ignore other RPC requests
} }
} }
_ => {} // Ignore other events _ => {} // Ignore other events
@ -108,7 +94,7 @@ async fn test_status_rpc() {
tokio::select! { tokio::select! {
_ = sender_future => {} _ = sender_future => {}
_ = receiver_future => {} _ = receiver_future => {}
_ = delay_for(Duration::from_millis(800)) => { _ = delay_for(Duration::from_millis(2000)) => {
panic!("Future timed out"); panic!("Future timed out");
} }
} }
@ -129,7 +115,7 @@ async fn test_blocks_by_range_chunked_rpc() {
let (mut sender, mut receiver) = common::build_node_pair(&log).await; let (mut sender, mut receiver) = common::build_node_pair(&log).await;
// BlocksByRange Request // BlocksByRange Request
let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
start_slot: 0, start_slot: 0,
count: messages_to_send, count: messages_to_send,
step: 0, step: 0,
@ -142,7 +128,7 @@ async fn test_blocks_by_range_chunked_rpc() {
message: empty_block, message: empty_block,
signature: Signature::empty_signature(), signature: Signature::empty_signature(),
}; };
let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed)); let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed)));
// keep count of the number of messages received // keep count of the number of messages received
let mut messages_received = 0; let mut messages_received = 0;
@ -155,31 +141,29 @@ async fn test_blocks_by_range_chunked_rpc() {
debug!(log, "Sending RPC"); debug!(log, "Sending RPC");
sender sender
.swarm .swarm
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); .send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
} }
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
// Should receive the RPC response peer_id: _,
RPCEvent::Response(id, response) => { id: RequestId::Sync(10),
if id == 10 { response,
warn!(log, "Sender received a response"); }) => {
match response { warn!(log, "Sender received a response");
RPCCodedResponse::Success(res) => { match response {
assert_eq!(res, rpc_response.clone()); Response::BlocksByRange(Some(_)) => {
messages_received += 1; assert_eq!(response, rpc_response.clone());
warn!(log, "Chunk received"); messages_received += 1;
} warn!(log, "Chunk received");
RPCCodedResponse::StreamTermination(_) => {
// should be exactly 10 messages before terminating
assert_eq!(messages_received, messages_to_send);
// end the test
return;
}
_ => panic!("Invalid RPC received"),
}
} }
Response::BlocksByRange(None) => {
// should be exactly 10 messages before terminating
assert_eq!(messages_received, messages_to_send);
// end the test
return;
}
_ => panic!("Invalid RPC received"),
} }
_ => {} // Ignore other RPC messages }
},
_ => {} // Ignore other behaviour events _ => {} // Ignore other behaviour events
} }
} }
@ -189,36 +173,27 @@ async fn test_blocks_by_range_chunked_rpc() {
let receiver_future = async { let receiver_future = async {
loop { loop {
match receiver.next_event().await { match receiver.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
match event { peer_id,
// Should receive sent RPC request id,
RPCEvent::Request(id, request) => { request,
if request == rpc_request { }) => {
// send the response if request == rpc_request {
warn!(log, "Receiver got request"); // send the response
warn!(log, "Receiver got request");
for _ in 1..=messages_to_send { for _ in 1..=messages_to_send {
receiver.swarm.send_rpc( receiver.swarm.send_successful_response(
peer_id.clone(), peer_id.clone(),
RPCEvent::Response( id,
id, rpc_response.clone(),
RPCCodedResponse::Success(rpc_response.clone()), );
),
);
}
// send the stream termination
receiver.swarm.send_rpc(
peer_id,
RPCEvent::Response(
id,
RPCCodedResponse::StreamTermination(
ResponseTermination::BlocksByRange,
),
),
);
}
} }
_ => {} // Ignore other events // send the stream termination
receiver.swarm.send_successful_response(
peer_id,
id,
Response::BlocksByRange(None),
);
} }
} }
_ => {} // Ignore other events _ => {} // Ignore other events
@ -229,7 +204,7 @@ async fn test_blocks_by_range_chunked_rpc() {
tokio::select! { tokio::select! {
_ = sender_future => {} _ = sender_future => {}
_ = receiver_future => {} _ = receiver_future => {}
_ = delay_for(Duration::from_millis(800)) => { _ = delay_for(Duration::from_millis(2000)) => {
panic!("Future timed out"); panic!("Future timed out");
} }
} }
@ -251,7 +226,7 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
let (mut sender, mut receiver) = common::build_node_pair(&log).await; let (mut sender, mut receiver) = common::build_node_pair(&log).await;
// BlocksByRange Request // BlocksByRange Request
let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
start_slot: 0, start_slot: 0,
count: messages_to_send, count: messages_to_send,
step: 0, step: 0,
@ -264,7 +239,7 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
message: empty_block, message: empty_block,
signature: Signature::empty_signature(), signature: Signature::empty_signature(),
}; };
let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed)); let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed)));
// keep count of the number of messages received // keep count of the number of messages received
let mut messages_received: u64 = 0; let mut messages_received: u64 = 0;
@ -277,28 +252,29 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
debug!(log, "Sending RPC"); debug!(log, "Sending RPC");
sender sender
.swarm .swarm
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); .send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
} }
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
// Should receive the RPC response peer_id: _,
RPCEvent::Response(id, response) => { id: RequestId::Sync(10),
if id == 10 { response,
debug!(log, "Sender received a response"); }) =>
match response { // Should receive the RPC response
RPCCodedResponse::Success(res) => { {
assert_eq!(res, rpc_response.clone()); debug!(log, "Sender received a response");
messages_received += 1; match response {
} Response::BlocksByRange(Some(_)) => {
RPCCodedResponse::StreamTermination(_) => { assert_eq!(response, rpc_response.clone());
// should be exactly 10 messages, as requested messages_received += 1;
assert_eq!(messages_received, messages_to_send);
}
_ => panic!("Invalid RPC received"),
}
} }
Response::BlocksByRange(None) => {
// should be exactly 10 messages, as requested
assert_eq!(messages_received, messages_to_send);
}
_ => panic!("Invalid RPC received"),
} }
_ => {} // Ignore other RPC messages }
},
_ => {} // Ignore other behaviour events _ => {} // Ignore other behaviour events
} }
} }
@ -320,21 +296,17 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
.await .await
{ {
futures::future::Either::Left(( futures::future::Either::Left((
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)), Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
peer_id,
id,
request,
}),
_, _,
)) => { )) => {
match event { if request == rpc_request {
// Should receive sent RPC request // send the response
RPCEvent::Request(id, request) => { warn!(log, "Receiver got request");
if request == rpc_request { message_info = Some((peer_id, id));
// send the response
warn!(log, "Receiver got request");
message_info = Some((peer_id, id));
} else {
continue;
}
}
_ => continue, // Ignore other events, don't send messages until ready
} }
} }
futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required
@ -344,12 +316,11 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
// if we need to send messages send them here. This will happen after a delay // if we need to send messages send them here. This will happen after a delay
if message_info.is_some() { if message_info.is_some() {
messages_sent += 1; messages_sent += 1;
receiver.swarm.send_rpc( let (peer_id, stream_id) = message_info.as_ref().unwrap();
message_info.as_ref().unwrap().0.clone(), receiver.swarm.send_successful_response(
RPCEvent::Response( peer_id.clone(),
message_info.as_ref().unwrap().1.clone(), stream_id.clone(),
RPCCodedResponse::Success(rpc_response.clone()), rpc_response.clone(),
),
); );
debug!(log, "Sending message {}", messages_sent); debug!(log, "Sending message {}", messages_sent);
if messages_sent == messages_to_send + extra_messages_to_send { if messages_sent == messages_to_send + extra_messages_to_send {
@ -363,7 +334,7 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
tokio::select! { tokio::select! {
_ = sender_future => {} _ = sender_future => {}
_ = receiver_future => {} _ = receiver_future => {}
_ = delay_for(Duration::from_millis(50000)) => { _ = delay_for(Duration::from_millis(2000)) => {
panic!("Future timed out"); panic!("Future timed out");
} }
} }
@ -382,7 +353,7 @@ async fn test_blocks_by_range_single_empty_rpc() {
let (mut sender, mut receiver) = common::build_node_pair(&log).await; let (mut sender, mut receiver) = common::build_node_pair(&log).await;
// BlocksByRange Request // BlocksByRange Request
let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
start_slot: 0, start_slot: 0,
count: 10, count: 10,
step: 0, step: 0,
@ -395,7 +366,7 @@ async fn test_blocks_by_range_single_empty_rpc() {
message: empty_block, message: empty_block,
signature: Signature::empty_signature(), signature: Signature::empty_signature(),
}; };
let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed)); let rpc_response = Response::BlocksByRange(Some(Box::new(empty_signed)));
let messages_to_send = 1; let messages_to_send = 1;
@ -410,30 +381,25 @@ async fn test_blocks_by_range_single_empty_rpc() {
debug!(log, "Sending RPC"); debug!(log, "Sending RPC");
sender sender
.swarm .swarm
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); .send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
} }
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
// Should receive the RPC response peer_id: _,
RPCEvent::Response(id, response) => { id: RequestId::Sync(10),
if id == 10 { response,
warn!(log, "Sender received a response"); }) => match response {
match response { Response::BlocksByRange(Some(_)) => {
RPCCodedResponse::Success(res) => { assert_eq!(response, rpc_response.clone());
assert_eq!(res, rpc_response.clone()); messages_received += 1;
messages_received += 1; warn!(log, "Chunk received");
warn!(log, "Chunk received");
}
RPCCodedResponse::StreamTermination(_) => {
// should be exactly 10 messages before terminating
assert_eq!(messages_received, messages_to_send);
// end the test
return;
}
_ => panic!("Invalid RPC received"),
}
}
} }
_ => {} // Ignore other RPC messages Response::BlocksByRange(None) => {
// should be exactly 10 messages before terminating
assert_eq!(messages_received, messages_to_send);
// end the test
return;
}
_ => panic!("Invalid RPC received"),
}, },
_ => {} // Ignore other behaviour events _ => {} // Ignore other behaviour events
} }
@ -444,36 +410,28 @@ async fn test_blocks_by_range_single_empty_rpc() {
let receiver_future = async { let receiver_future = async {
loop { loop {
match receiver.next_event().await { match receiver.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
match event { peer_id,
// Should receive sent RPC request id,
RPCEvent::Request(id, request) => { request,
if request == rpc_request { }) => {
// send the response if request == rpc_request {
warn!(log, "Receiver got request"); // send the response
warn!(log, "Receiver got request");
for _ in 1..=messages_to_send { for _ in 1..=messages_to_send {
receiver.swarm.send_rpc( receiver.swarm.send_successful_response(
peer_id.clone(), peer_id.clone(),
RPCEvent::Response( id,
id, rpc_response.clone(),
RPCCodedResponse::Success(rpc_response.clone()), );
),
);
}
// send the stream termination
receiver.swarm.send_rpc(
peer_id,
RPCEvent::Response(
id,
RPCCodedResponse::StreamTermination(
ResponseTermination::BlocksByRange,
),
),
);
}
} }
_ => {} // Ignore other events // send the stream termination
receiver.swarm.send_successful_response(
peer_id,
id,
Response::BlocksByRange(None),
);
} }
} }
_ => {} // Ignore other events _ => {} // Ignore other events
@ -508,7 +466,7 @@ async fn test_blocks_by_root_chunked_rpc() {
let (mut sender, mut receiver) = common::build_node_pair(&log).await; let (mut sender, mut receiver) = common::build_node_pair(&log).await;
// BlocksByRoot Request // BlocksByRoot Request
let rpc_request = RPCRequest::BlocksByRoot(BlocksByRootRequest { let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
block_roots: vec![ block_roots: vec![
Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0),
@ -522,7 +480,7 @@ async fn test_blocks_by_root_chunked_rpc() {
message: full_block, message: full_block,
signature: Signature::empty_signature(), signature: Signature::empty_signature(),
}; };
let rpc_response = RPCResponse::BlocksByRoot(Box::new(signed_full_block)); let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block)));
// keep count of the number of messages received // keep count of the number of messages received
let mut messages_received = 0; let mut messages_received = 0;
@ -535,28 +493,23 @@ async fn test_blocks_by_root_chunked_rpc() {
debug!(log, "Sending RPC"); debug!(log, "Sending RPC");
sender sender
.swarm .swarm
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); .send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
} }
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
// Should receive the RPC response peer_id: _,
RPCEvent::Response(id, response) => { id: RequestId::Sync(10),
if id == 10 { response,
debug!(log, "Sender received a response"); }) => match response {
match response { Response::BlocksByRoot(Some(_)) => {
RPCCodedResponse::Success(res) => { assert_eq!(response, rpc_response.clone());
assert_eq!(res, rpc_response.clone()); messages_received += 1;
messages_received += 1; debug!(log, "Chunk received");
debug!(log, "Chunk received"); }
} Response::BlocksByRoot(None) => {
RPCCodedResponse::StreamTermination(_) => { // should be exactly messages_to_send
// should be exactly messages_to_send assert_eq!(messages_received, messages_to_send);
assert_eq!(messages_received, messages_to_send); // end the test
// end the test return;
return;
}
_ => {} // Ignore other RPC messages
}
}
} }
_ => {} // Ignore other RPC messages _ => {} // Ignore other RPC messages
}, },
@ -569,38 +522,30 @@ async fn test_blocks_by_root_chunked_rpc() {
let receiver_future = async { let receiver_future = async {
loop { loop {
match receiver.next_event().await { match receiver.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)) => { Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
match event { peer_id,
// Should receive sent RPC request id,
RPCEvent::Request(id, request) => { request,
if request == rpc_request { }) => {
// send the response if request == rpc_request {
debug!(log, "Receiver got request"); // send the response
debug!(log, "Receiver got request");
for _ in 1..=messages_to_send { for _ in 1..=messages_to_send {
receiver.swarm.send_rpc( receiver.swarm.send_successful_response(
peer_id.clone(), peer_id.clone(),
RPCEvent::Response( id,
id, rpc_response.clone(),
RPCCodedResponse::Success(rpc_response.clone()), );
), debug!(log, "Sending message");
);
debug!(log, "Sending message");
}
// send the stream termination
receiver.swarm.send_rpc(
peer_id,
RPCEvent::Response(
id,
RPCCodedResponse::StreamTermination(
ResponseTermination::BlocksByRange,
),
),
);
debug!(log, "Send stream term");
}
} }
_ => {} // Ignore other events // send the stream termination
receiver.swarm.send_successful_response(
peer_id,
id,
Response::BlocksByRange(None),
);
debug!(log, "Send stream term");
} }
} }
_ => {} // Ignore other events _ => {} // Ignore other events
@ -610,7 +555,7 @@ async fn test_blocks_by_root_chunked_rpc() {
tokio::select! { tokio::select! {
_ = sender_future => {} _ = sender_future => {}
_ = receiver_future => {} _ = receiver_future => {}
_ = delay_for(Duration::from_millis(1000)) => { _ = delay_for(Duration::from_millis(2000)) => {
panic!("Future timed out"); panic!("Future timed out");
} }
} }
@ -633,7 +578,7 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
let (mut sender, mut receiver) = common::build_node_pair(&log).await; let (mut sender, mut receiver) = common::build_node_pair(&log).await;
// BlocksByRoot Request // BlocksByRoot Request
let rpc_request = RPCRequest::BlocksByRoot(BlocksByRootRequest { let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
block_roots: vec![ block_roots: vec![
Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0),
Hash256::from_low_u64_be(0), Hash256::from_low_u64_be(0),
@ -654,7 +599,7 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
message: full_block, message: full_block,
signature: Signature::empty_signature(), signature: Signature::empty_signature(),
}; };
let rpc_response = RPCResponse::BlocksByRoot(Box::new(signed_full_block)); let rpc_response = Response::BlocksByRoot(Some(Box::new(signed_full_block)));
// keep count of the number of messages received // keep count of the number of messages received
let mut messages_received = 0; let mut messages_received = 0;
@ -667,31 +612,29 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
debug!(log, "Sending RPC"); debug!(log, "Sending RPC");
sender sender
.swarm .swarm
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); .send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
} }
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_, event)) => match event { Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
// Should receive the RPC response peer_id: _,
RPCEvent::Response(id, response) => { id: RequestId::Sync(10),
if id == 10 { response,
debug!(log, "Sender received a response"); }) => {
match response { debug!(log, "Sender received a response");
RPCCodedResponse::Success(res) => { match response {
assert_eq!(res, rpc_response.clone()); Response::BlocksByRoot(Some(_)) => {
messages_received += 1; assert_eq!(response, rpc_response.clone());
debug!(log, "Chunk received"); messages_received += 1;
} debug!(log, "Chunk received");
RPCCodedResponse::StreamTermination(_) => {
// should be exactly messages_to_send
assert_eq!(messages_received, messages_to_send);
// end the test
return;
}
_ => {} // Ignore other RPC messages
}
} }
Response::BlocksByRoot(None) => {
// should be exactly messages_to_send
assert_eq!(messages_received, messages_to_send);
// end the test
return;
}
_ => {} // Ignore other RPC messages
} }
_ => {} // Ignore other RPC messages }
},
_ => {} // Ignore other behaviour events _ => {} // Ignore other behaviour events
} }
} }
@ -713,21 +656,17 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
.await .await
{ {
futures::future::Either::Left(( futures::future::Either::Left((
Libp2pEvent::Behaviour(BehaviourEvent::RPC(peer_id, event)), Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
peer_id,
id,
request,
}),
_, _,
)) => { )) => {
match event { if request == rpc_request {
// Should receive sent RPC request // send the response
RPCEvent::Request(id, request) => { warn!(log, "Receiver got request");
if request == rpc_request { message_info = Some((peer_id, id));
// send the response
warn!(log, "Receiver got request");
message_info = Some((peer_id, id));
} else {
continue;
}
}
_ => continue, // Ignore other events, don't send messages until ready
} }
} }
futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required futures::future::Either::Right((_, _)) => {} // The timeout hit, send messages if required
@ -737,12 +676,11 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
// if we need to send messages send them here. This will happen after a delay // if we need to send messages send them here. This will happen after a delay
if message_info.is_some() { if message_info.is_some() {
messages_sent += 1; messages_sent += 1;
receiver.swarm.send_rpc( let (peer_id, stream_id) = message_info.as_ref().unwrap();
message_info.as_ref().unwrap().0.clone(), receiver.swarm.send_successful_response(
RPCEvent::Response( peer_id.clone(),
message_info.as_ref().unwrap().1.clone(), stream_id.clone(),
RPCCodedResponse::Success(rpc_response.clone()), rpc_response.clone(),
),
); );
debug!(log, "Sending message {}", messages_sent); debug!(log, "Sending message {}", messages_sent);
if messages_sent == messages_to_send + extra_messages_to_send { if messages_sent == messages_to_send + extra_messages_to_send {
@ -756,7 +694,7 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
tokio::select! { tokio::select! {
_ = sender_future => {} _ = sender_future => {}
_ = receiver_future => {} _ = receiver_future => {}
_ = delay_for(Duration::from_millis(1000)) => { _ = delay_for(Duration::from_millis(2000)) => {
panic!("Future timed out"); panic!("Future timed out");
} }
} }
@ -775,7 +713,7 @@ async fn test_goodbye_rpc() {
let (mut sender, mut receiver) = common::build_node_pair(&log).await; let (mut sender, mut receiver) = common::build_node_pair(&log).await;
// Goodbye Request // Goodbye Request
let rpc_request = RPCRequest::Goodbye(GoodbyeReason::ClientShutdown); let rpc_request = Request::Goodbye(GoodbyeReason::ClientShutdown);
// build the sender future // build the sender future
let sender_future = async { let sender_future = async {
@ -786,7 +724,7 @@ async fn test_goodbye_rpc() {
debug!(log, "Sending RPC"); debug!(log, "Sending RPC");
sender sender
.swarm .swarm
.send_rpc(peer_id, RPCEvent::Request(10, rpc_request.clone())); .send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
} }
_ => {} // Ignore other RPC messages _ => {} // Ignore other RPC messages
} }
@ -797,18 +735,14 @@ async fn test_goodbye_rpc() {
let receiver_future = async { let receiver_future = async {
loop { loop {
match receiver.next_event().await { match receiver.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::RPC(_peer_id, event)) => { Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived {
match event { peer_id: _,
// Should receive sent RPC request id: _,
RPCEvent::Request(id, request) => { request,
if request == rpc_request { }) => {
assert_eq!(id, 0); // Should receive sent RPC request
assert_eq!(rpc_request.clone(), request); // receives the goodbye. Nothing left to do assert_eq!(rpc_request.clone(), request); // receives the goodbye. Nothing left to do
return; return;
}
}
_ => {} // Ignore other events
}
} }
_ => {} // Ignore other events _ => {} // Ignore other events
} }
@ -818,7 +752,7 @@ async fn test_goodbye_rpc() {
tokio::select! { tokio::select! {
_ = sender_future => {} _ = sender_future => {}
_ = receiver_future => {} _ = receiver_future => {}
_ = delay_for(Duration::from_millis(1000)) => { _ = delay_for(Duration::from_millis(2000)) => {
panic!("Future timed out"); panic!("Future timed out");
} }
} }

View File

@ -18,7 +18,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
eth2_hashing = "0.1.0" eth2_hashing = "0.1.0"
tree_hash = "0.1.0" tree_hash = "0.1.0"
tokio = { version = "0.2.20", features = ["full"] } tokio = { version = "0.2.21", features = ["full"] }
parking_lot = "0.10.2" parking_lot = "0.10.2"
slog = "2.5.2" slog = "2.5.2"
exit-future = "0.2.0" exit-future = "0.2.0"

View File

@ -24,7 +24,7 @@ pub fn new_env() -> Environment<MinimalEthSpec> {
#[test] #[test]
fn basic() { fn basic() {
let mut env = new_env(); let mut env = new_env();
let log = env.core_context().log.clone(); let log = env.core_context().log().clone();
let mut spec = env.eth2_config().spec.clone(); let mut spec = env.eth2_config().spec.clone();
env.runtime().block_on(async { env.runtime().block_on(async {

View File

@ -10,6 +10,8 @@ genesis = { path = "../genesis" }
lazy_static = "1.4.0" lazy_static = "1.4.0"
matches = "0.1.8" matches = "0.1.8"
tempfile = "3.1.0" tempfile = "3.1.0"
exit-future = "0.2.0"
assert_approx_eq = "1.1.0"
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
@ -25,7 +27,7 @@ eth2_ssz = "0.1.2"
tree_hash = "0.1.0" tree_hash = "0.1.0"
futures = "0.3.5" futures = "0.3.5"
error-chain = "0.12.2" error-chain = "0.12.2"
tokio = { version = "0.2.20", features = ["full"] } tokio = { version = "0.2.21", features = ["full"] }
parking_lot = "0.10.2" parking_lot = "0.10.2"
smallvec = "1.4.0" smallvec = "1.4.0"
# TODO: Remove rand crate for mainnet # TODO: Remove rand crate for mainnet
@ -34,3 +36,4 @@ fnv = "1.0.6"
rlp = "0.4.5" rlp = "0.4.5"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
environment = { path = "../../lighthouse/environment" }

View File

@ -28,15 +28,19 @@ const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 1;
const TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 6; const TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 6;
/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random /// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random
/// gossip topics that we subscribed to due to the validator connection. /// gossip topics that we subscribed to due to the validator connection.
const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; // 30 mins at a 12s slot time const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150;
// 30 mins at a 12s slot time
/// The fraction of a slot that we subscribe to a subnet before the required slot. /// The fraction of a slot that we subscribe to a subnet before the required slot.
/// ///
/// Note: The time is calculated as `time = milliseconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`. /// Note: The time is calculated as `time = milliseconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`.
const ADVANCE_SUBSCRIBE_TIME: u32 = 3; const ADVANCE_SUBSCRIBE_TIME: u32 = 3;
/// The default number of slots before items in hash delay sets used by this class should expire. /// The default number of slots before items in hash delay sets used by this class should expire.
const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3; // 36s at 12s slot time const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3;
// 36s at 12s slot time
/// The default number of slots before items in hash delay sets used by this class should expire.
const DURATION_DIFFERENCE: Duration = Duration::from_millis(1);
#[derive(Debug, PartialEq, Eq, Clone)] #[derive(Debug, Eq, Clone)]
pub enum AttServiceMessage { pub enum AttServiceMessage {
/// Subscribe to the specified subnet id. /// Subscribe to the specified subnet id.
Subscribe(SubnetId), Subscribe(SubnetId),
@ -47,12 +51,45 @@ pub enum AttServiceMessage {
/// Remove the `SubnetId` from the ENR bitfield. /// Remove the `SubnetId` from the ENR bitfield.
EnrRemove(SubnetId), EnrRemove(SubnetId),
/// Discover peers for a particular subnet. /// Discover peers for a particular subnet.
DiscoverPeers(SubnetId), /// The includes the `Instant` we need the discovered peer until.
DiscoverPeers {
subnet_id: SubnetId,
min_ttl: Option<Instant>,
},
}
impl PartialEq for AttServiceMessage {
fn eq(&self, other: &AttServiceMessage) -> bool {
match (self, other) {
(&AttServiceMessage::Subscribe(a), &AttServiceMessage::Subscribe(b)) => a == b,
(&AttServiceMessage::Unsubscribe(a), &AttServiceMessage::Unsubscribe(b)) => a == b,
(&AttServiceMessage::EnrAdd(a), &AttServiceMessage::EnrAdd(b)) => a == b,
(&AttServiceMessage::EnrRemove(a), &AttServiceMessage::EnrRemove(b)) => a == b,
(
&AttServiceMessage::DiscoverPeers { subnet_id, min_ttl },
&AttServiceMessage::DiscoverPeers {
subnet_id: other_subnet_id,
min_ttl: other_min_ttl,
},
) => match (min_ttl, other_min_ttl) {
(Some(min_ttl_instant), Some(other_min_ttl_instant)) => {
min_ttl_instant.saturating_duration_since(other_min_ttl_instant)
< DURATION_DIFFERENCE
&& other_min_ttl_instant.saturating_duration_since(min_ttl_instant)
< DURATION_DIFFERENCE
&& subnet_id == other_subnet_id
}
(None, None) => subnet_id == other_subnet_id,
_ => false,
},
_ => false,
}
}
} }
/// A particular subnet at a given slot. /// A particular subnet at a given slot.
#[derive(PartialEq, Eq, Hash, Clone)] #[derive(PartialEq, Eq, Hash, Clone, Debug)]
struct ExactSubnet { pub struct ExactSubnet {
/// The `SubnetId` associated with this subnet. /// The `SubnetId` associated with this subnet.
pub subnet_id: SubnetId, pub subnet_id: SubnetId,
/// The `Slot` associated with this subnet. /// The `Slot` associated with this subnet.
@ -244,24 +281,18 @@ impl<T: BeaconChainTypes> AttestationService<T> {
return Ok(()); return Ok(());
} }
// check current event log to see if there is a discovery event queued
if self
.events
.iter()
.find(|event| event == &&AttServiceMessage::DiscoverPeers(exact_subnet.subnet_id))
.is_some()
{
// already queued a discovery event
return Ok(());
}
// if the slot is more than epoch away, add an event to start looking for peers // if the slot is more than epoch away, add an event to start looking for peers
if exact_subnet.slot if exact_subnet.slot
< current_slot.saturating_add(TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD) < current_slot.saturating_add(TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD)
{ {
// then instantly add a discovery request // add one slot to ensure we keep the peer for the subscription slot
self.events let min_ttl = self
.push_back(AttServiceMessage::DiscoverPeers(exact_subnet.subnet_id)); .beacon_chain
.slot_clock
.duration_to_slot(exact_subnet.slot + 1)
.map(|duration| std::time::Instant::now() + duration);
self.send_or_update_discovery_event(exact_subnet.subnet_id, min_ttl);
} else { } else {
// Queue the discovery event to be executed for // Queue the discovery event to be executed for
// TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD // TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD
@ -296,6 +327,52 @@ impl<T: BeaconChainTypes> AttestationService<T> {
Ok(()) Ok(())
} }
/// Checks if we have a discover peers event already and sends a new event if necessary
///
/// If a message exists for the same subnet, compare the `min_ttl` of the current and
/// existing messages and extend the existing message as necessary.
fn send_or_update_discovery_event(&mut self, subnet_id: SubnetId, min_ttl: Option<Instant>) {
// track whether this message already exists in the event queue
let mut is_duplicate = false;
self.events.iter_mut().for_each(|event| {
match event {
AttServiceMessage::DiscoverPeers {
subnet_id: other_subnet_id,
min_ttl: other_min_ttl,
} => {
if subnet_id == *other_subnet_id {
let other_min_ttl_clone = other_min_ttl.clone();
match (min_ttl, other_min_ttl_clone) {
(Some(min_ttl_instant), Some(other_min_ttl_instant)) =>
// only update the min_ttl if it is greater than the existing min_ttl and a DURATION_DIFFERENCE padding
{
if min_ttl_instant.saturating_duration_since(other_min_ttl_instant)
> DURATION_DIFFERENCE
{
*other_min_ttl = min_ttl;
}
}
(None, Some(_)) => {
// Update the min_ttl to None, because the new message is longer-lived.
*other_min_ttl = None;
}
(Some(_), None) => {} // Don't replace this because the existing message is for a longer-lived peer.
(None, None) => {} // Duplicate message, do nothing.
}
is_duplicate = true;
return;
}
}
_ => {}
};
});
if !is_duplicate {
self.events
.push_back(AttServiceMessage::DiscoverPeers { subnet_id, min_ttl });
}
}
/// Checks the current random subnets and subscriptions to determine if a new subscription for this /// Checks the current random subnets and subscriptions to determine if a new subscription for this
/// subnet is required for the given slot. /// subnet is required for the given slot.
/// ///
@ -436,18 +513,17 @@ impl<T: BeaconChainTypes> AttestationService<T> {
// if we are not already subscribed, then subscribe // if we are not already subscribed, then subscribe
let topic_kind = &GossipKind::CommitteeIndex(subnet_id); let topic_kind = &GossipKind::CommitteeIndex(subnet_id);
if let None = self let already_subscribed = self
.network_globals .network_globals
.gossipsub_subscriptions .gossipsub_subscriptions
.read() .read()
.iter() .iter()
.find(|topic| topic.kind() == topic_kind) .find(|topic| topic.kind() == topic_kind)
{ .is_some();
// not already subscribed to the topic
if !already_subscribed {
// send a discovery request and a subscription // send a discovery request and a subscription
self.events self.send_or_update_discovery_event(subnet_id, None);
.push_back(AttServiceMessage::DiscoverPeers(subnet_id));
self.events self.events
.push_back(AttServiceMessage::Subscribe(subnet_id)); .push_back(AttServiceMessage::Subscribe(subnet_id));
} }
@ -461,8 +537,15 @@ impl<T: BeaconChainTypes> AttestationService<T> {
/// Request a discovery query to find peers for a particular subnet. /// Request a discovery query to find peers for a particular subnet.
fn handle_discover_peers(&mut self, exact_subnet: ExactSubnet) { fn handle_discover_peers(&mut self, exact_subnet: ExactSubnet) {
debug!(self.log, "Searching for peers for subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot); debug!(self.log, "Searching for peers for subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot);
self.events
.push_back(AttServiceMessage::DiscoverPeers(exact_subnet.subnet_id)); // add one slot to ensure we keep the peer for the subscription slot
let min_ttl = self
.beacon_chain
.slot_clock
.duration_to_slot(exact_subnet.slot + 1)
.map(|duration| std::time::Instant::now() + duration);
self.send_or_update_discovery_event(exact_subnet.subnet_id, min_ttl)
} }
/// A queued subscription is ready. /// A queued subscription is ready.
@ -619,7 +702,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
match self.discover_peers.poll_next_unpin(cx) { match self.discover_peers.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(exact_subnet))) => self.handle_discover_peers(exact_subnet), Poll::Ready(Some(Ok(exact_subnet))) => self.handle_discover_peers(exact_subnet),
Poll::Ready(Some(Err(e))) => { Poll::Ready(Some(Err(e))) => {
error!(self.log, "Failed to check for peer discovery requests"; "error"=> format!("{}", e)); error!(self.log, "Failed to check for peer discovery requests"; "error"=> format ! ("{}", e));
} }
Poll::Ready(None) | Poll::Pending => {} Poll::Ready(None) | Poll::Pending => {}
} }

View File

@ -16,10 +16,9 @@ mod tests {
use slog::Logger; use slog::Logger;
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use slot_clock::{SlotClock, SystemTimeSlotClock}; use slot_clock::{SlotClock, SystemTimeSlotClock};
use std::time::SystemTime; use std::time::{Duration, SystemTime};
use store::MemoryStore; use store::MemoryStore;
use tempfile::tempdir; use tempfile::tempdir;
use tokio::time::Duration;
use types::{CommitteeIndex, EnrForkId, EthSpec, MinimalEthSpec}; use types::{CommitteeIndex, EnrForkId, EthSpec, MinimalEthSpec};
const SLOT_DURATION_MILLIS: u64 = 200; const SLOT_DURATION_MILLIS: u64 = 200;
@ -192,7 +191,10 @@ mod tests {
assert_matches!( assert_matches!(
events[..3], events[..3],
[ [
AttServiceMessage::DiscoverPeers(_any2), AttServiceMessage::DiscoverPeers {
subnet_id: _any_subnet,
min_ttl: _any_instant
},
AttServiceMessage::Subscribe(_any1), AttServiceMessage::Subscribe(_any1),
AttServiceMessage::EnrAdd(_any3) AttServiceMessage::EnrAdd(_any3)
] ]
@ -240,7 +242,10 @@ mod tests {
assert_matches!( assert_matches!(
events[..3], events[..3],
[ [
AttServiceMessage::DiscoverPeers(_any2), AttServiceMessage::DiscoverPeers {
subnet_id: _any_subnet,
min_ttl: _any_instant
},
AttServiceMessage::Subscribe(_any1), AttServiceMessage::Subscribe(_any1),
AttServiceMessage::EnrAdd(_any3) AttServiceMessage::EnrAdd(_any3)
] ]
@ -278,16 +283,28 @@ mod tests {
.validator_subscriptions(subscriptions) .validator_subscriptions(subscriptions)
.unwrap(); .unwrap();
let min_ttl = Instant::now().checked_add(
attestation_service
.beacon_chain
.slot_clock
.duration_to_slot(current_slot + Slot::new(subscription_slot) + Slot::new(1))
.unwrap(),
);
// just discover peers, don't subscribe yet // just discover peers, don't subscribe yet
let expected = vec![AttServiceMessage::DiscoverPeers(SubnetId::new( let expected = vec![AttServiceMessage::DiscoverPeers {
validator_index, subnet_id: SubnetId::new(validator_index),
))]; min_ttl,
}];
let events = get_events(attestation_service, no_events_expected, 1).await; let events = get_events(attestation_service, no_events_expected, 1).await;
assert_matches!( assert_matches!(
events[..3], events[..3],
[ [
AttServiceMessage::DiscoverPeers(_any1), AttServiceMessage::DiscoverPeers {
subnet_id: _any_subnet,
min_ttl: _any_instant
},
AttServiceMessage::Subscribe(_any2), AttServiceMessage::Subscribe(_any2),
AttServiceMessage::EnrAdd(_any3) AttServiceMessage::EnrAdd(_any3)
] ]
@ -325,9 +342,20 @@ mod tests {
.validator_subscriptions(subscriptions) .validator_subscriptions(subscriptions)
.unwrap(); .unwrap();
let min_ttl = Instant::now().checked_add(
attestation_service
.beacon_chain
.slot_clock
.duration_to_slot(current_slot + Slot::new(subscription_slot) + Slot::new(1))
.unwrap(),
);
// we should discover peers, wait, then subscribe // we should discover peers, wait, then subscribe
let expected = vec![ let expected = vec![
AttServiceMessage::DiscoverPeers(SubnetId::new(validator_index)), AttServiceMessage::DiscoverPeers {
subnet_id: SubnetId::new(validator_index),
min_ttl,
},
AttServiceMessage::Subscribe(SubnetId::new(validator_index)), AttServiceMessage::Subscribe(SubnetId::new(validator_index)),
]; ];
@ -335,7 +363,10 @@ mod tests {
assert_matches!( assert_matches!(
events[..3], events[..3],
[ [
AttServiceMessage::DiscoverPeers(_any1), AttServiceMessage::DiscoverPeers {
subnet_id: _any_subnet,
min_ttl: _any_instant
},
AttServiceMessage::Subscribe(_any2), AttServiceMessage::Subscribe(_any2),
AttServiceMessage::EnrAdd(_any3) AttServiceMessage::EnrAdd(_any3)
] ]
@ -381,7 +412,10 @@ mod tests {
assert_matches!( assert_matches!(
events[..3], events[..3],
[ [
AttServiceMessage::DiscoverPeers(_any1), AttServiceMessage::DiscoverPeers {
subnet_id: _any_subnet,
min_ttl: _any_instant
},
AttServiceMessage::Subscribe(_any2), AttServiceMessage::Subscribe(_any2),
AttServiceMessage::EnrAdd(_any3) AttServiceMessage::EnrAdd(_any3)
] ]
@ -419,17 +453,29 @@ mod tests {
.validator_subscriptions(subscriptions) .validator_subscriptions(subscriptions)
.unwrap(); .unwrap();
let min_ttl = Instant::now().checked_add(
attestation_service
.beacon_chain
.slot_clock
.duration_to_slot(current_slot + Slot::new(subscription_slot) + Slot::new(1))
.unwrap(),
);
// expect discover peers because we will enter TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD range // expect discover peers because we will enter TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD range
let expected: Vec<AttServiceMessage> = vec![AttServiceMessage::DiscoverPeers( let expected: Vec<AttServiceMessage> = vec![AttServiceMessage::DiscoverPeers {
SubnetId::new(validator_index), subnet_id: SubnetId::new(validator_index),
)]; min_ttl,
}];
let events = get_events(attestation_service, no_events_expected, 5).await; let events = get_events(attestation_service, no_events_expected, 5).await;
assert_matches!( assert_matches!(
events[..3], events[..3],
[ [
AttServiceMessage::DiscoverPeers(_any1), AttServiceMessage::DiscoverPeers {
subnet_id: _any_subnet,
min_ttl: _any_instant
},
AttServiceMessage::Subscribe(_any2), AttServiceMessage::Subscribe(_any2),
AttServiceMessage::EnrAdd(_any3) AttServiceMessage::EnrAdd(_any3)
] ]
@ -470,9 +516,10 @@ mod tests {
for event in events { for event in events {
match event { match event {
AttServiceMessage::DiscoverPeers(_any_subnet) => { AttServiceMessage::DiscoverPeers {
discover_peer_count = discover_peer_count + 1 subnet_id: _any_subnet,
} min_ttl: _any_instant,
} => discover_peer_count = discover_peer_count + 1,
AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1, AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1,
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1,
_ => unexpected_msg_count = unexpected_msg_count + 1, _ => unexpected_msg_count = unexpected_msg_count + 1,
@ -517,9 +564,10 @@ mod tests {
for event in events { for event in events {
match event { match event {
AttServiceMessage::DiscoverPeers(_any_subnet) => { AttServiceMessage::DiscoverPeers {
discover_peer_count = discover_peer_count + 1 subnet_id: _any_subnet,
} min_ttl: _any_instant,
} => discover_peer_count = discover_peer_count + 1,
AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1, AttServiceMessage::Subscribe(_any_subnet) => subscribe_count = subscribe_count + 1,
AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1, AttServiceMessage::EnrAdd(_any_subnet) => enr_add_count = enr_add_count + 1,
_ => unexpected_msg_count = unexpected_msg_count + 1, _ => unexpected_msg_count = unexpected_msg_count + 1,

View File

@ -10,8 +10,8 @@ use crate::error;
use crate::service::NetworkMessage; use crate::service::NetworkMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
use eth2_libp2p::{ use eth2_libp2p::{
rpc::{RPCCodedResponse, RPCRequest, RPCResponse, RequestId, ResponseTermination}, rpc::{RPCError, RequestId, SubstreamId},
MessageId, NetworkGlobals, PeerId, PubsubMessage, RPCEvent, MessageId, NetworkGlobals, PeerId, PubsubMessage, Request, Response,
}; };
use futures::prelude::*; use futures::prelude::*;
use processor::Processor; use processor::Processor;
@ -43,8 +43,24 @@ pub enum RouterMessage<T: EthSpec> {
PeerDialed(PeerId), PeerDialed(PeerId),
/// Peer has disconnected, /// Peer has disconnected,
PeerDisconnected(PeerId), PeerDisconnected(PeerId),
/// An RPC response/request has been received. /// An RPC request has been received.
RPC(PeerId, RPCEvent<T>), RPCRequestReceived {
peer_id: PeerId,
stream_id: SubstreamId,
request: Request,
},
/// An RPC response has been received.
RPCResponseReceived {
peer_id: PeerId,
request_id: RequestId,
response: Response<T>,
},
/// An RPC request failed
RPCFailed {
peer_id: PeerId,
request_id: RequestId,
error: RPCError,
},
/// A gossip message has been received. The fields are: message id, the peer that sent us this /// A gossip message has been received. The fields are: message id, the peer that sent us this
/// message and the message itself. /// message and the message itself.
PubsubMessage(MessageId, PeerId, PubsubMessage<T>), PubsubMessage(MessageId, PeerId, PubsubMessage<T>),
@ -58,7 +74,7 @@ impl<T: BeaconChainTypes> Router<T> {
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>,
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
runtime_handle: &tokio::runtime::Handle, executor: environment::TaskExecutor,
log: slog::Logger, log: slog::Logger,
) -> error::Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>> { ) -> error::Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>> {
let message_handler_log = log.new(o!("service"=> "router")); let message_handler_log = log.new(o!("service"=> "router"));
@ -68,7 +84,7 @@ impl<T: BeaconChainTypes> Router<T> {
// Initialise a message instance, which itself spawns the syncing thread. // Initialise a message instance, which itself spawns the syncing thread.
let processor = Processor::new( let processor = Processor::new(
runtime_handle, executor.clone(),
beacon_chain, beacon_chain,
network_globals.clone(), network_globals.clone(),
network_send.clone(), network_send.clone(),
@ -84,12 +100,15 @@ impl<T: BeaconChainTypes> Router<T> {
}; };
// spawn handler task and move the message handler instance into the spawned thread // spawn handler task and move the message handler instance into the spawned thread
runtime_handle.spawn(async move { executor.spawn(
handler_recv async move {
.for_each(move |msg| future::ready(handler.handle_message(msg))) debug!(log, "Network message router started");
.await; handler_recv
debug!(log, "Network message handler terminated."); .for_each(move |msg| future::ready(handler.handle_message(msg)))
}); .await;
},
"router",
);
Ok(handler_send) Ok(handler_send)
} }
@ -106,11 +125,32 @@ impl<T: BeaconChainTypes> Router<T> {
RouterMessage::PeerDisconnected(peer_id) => { RouterMessage::PeerDisconnected(peer_id) => {
self.processor.on_disconnect(peer_id); self.processor.on_disconnect(peer_id);
} }
// An RPC message request/response has been received RouterMessage::RPCRequestReceived {
RouterMessage::RPC(peer_id, rpc_event) => { peer_id,
self.handle_rpc_message(peer_id, rpc_event); stream_id,
request,
} => {
self.handle_rpc_request(peer_id, stream_id, request);
}
RouterMessage::RPCResponseReceived {
peer_id,
request_id,
response,
} => {
self.handle_rpc_response(peer_id, request_id, response);
}
RouterMessage::RPCFailed {
peer_id,
request_id,
error,
} => {
warn!(self.log, "RPC Error";
"peer_id" => peer_id.to_string(),
"request_id" => request_id,
"error" => error.to_string(),
"client" => self.network_globals.client(&peer_id).to_string());
self.processor.on_rpc_error(peer_id, request_id);
} }
// An RPC message request/response has been received
RouterMessage::PubsubMessage(id, peer_id, gossip) => { RouterMessage::PubsubMessage(id, peer_id, gossip) => {
self.handle_gossip(id, peer_id, gossip); self.handle_gossip(id, peer_id, gossip);
} }
@ -119,32 +159,14 @@ impl<T: BeaconChainTypes> Router<T> {
/* RPC - Related functionality */ /* RPC - Related functionality */
/// Handle RPC messages
fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent<T::EthSpec>) {
match rpc_message {
RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req),
RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp),
RPCEvent::Error(id, _protocol, error) => {
warn!(self.log, "RPC Error"; "peer_id" => peer_id.to_string(), "request_id" => id, "error" => error.to_string(),
"client" => self.network_globals.client(&peer_id).to_string());
self.processor.on_rpc_error(peer_id, id);
}
}
}
/// A new RPC request has been received from the network. /// A new RPC request has been received from the network.
fn handle_rpc_request( fn handle_rpc_request(&mut self, peer_id: PeerId, stream_id: SubstreamId, request: Request) {
&mut self,
peer_id: PeerId,
request_id: RequestId,
request: RPCRequest<T::EthSpec>,
) {
match request { match request {
RPCRequest::Status(status_message) => { Request::Status(status_message) => {
self.processor self.processor
.on_status_request(peer_id, request_id, status_message) .on_status_request(peer_id, stream_id, status_message)
} }
RPCRequest::Goodbye(goodbye_reason) => { Request::Goodbye(goodbye_reason) => {
debug!( debug!(
self.log, "Peer sent Goodbye"; self.log, "Peer sent Goodbye";
"peer_id" => peer_id.to_string(), "peer_id" => peer_id.to_string(),
@ -153,14 +175,12 @@ impl<T: BeaconChainTypes> Router<T> {
); );
self.processor.on_disconnect(peer_id); self.processor.on_disconnect(peer_id);
} }
RPCRequest::BlocksByRange(request) => self Request::BlocksByRange(request) => self
.processor .processor
.on_blocks_by_range_request(peer_id, request_id, request), .on_blocks_by_range_request(peer_id, stream_id, request),
RPCRequest::BlocksByRoot(request) => self Request::BlocksByRoot(request) => self
.processor .processor
.on_blocks_by_root_request(peer_id, request_id, request), .on_blocks_by_root_request(peer_id, stream_id, request),
RPCRequest::Ping(_) => unreachable!("Ping MUST be handled in the behaviour"),
RPCRequest::MetaData(_) => unreachable!("MetaData MUST be handled in the behaviour"),
} }
} }
@ -170,71 +190,20 @@ impl<T: BeaconChainTypes> Router<T> {
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request_id: RequestId, request_id: RequestId,
error_response: RPCCodedResponse<T::EthSpec>, response: Response<T::EthSpec>,
) { ) {
// an error could have occurred. // an error could have occurred.
match error_response { match response {
RPCCodedResponse::InvalidRequest(error) => { Response::Status(status_message) => {
warn!(self.log, "RPC Invalid Request"; self.processor.on_status_response(peer_id, status_message);
"peer_id" => peer_id.to_string(),
"request_id" => request_id,
"error" => error.to_string(),
"client" => self.network_globals.client(&peer_id).to_string());
self.processor.on_rpc_error(peer_id, request_id);
} }
RPCCodedResponse::ServerError(error) => { Response::BlocksByRange(beacon_block) => {
warn!(self.log, "RPC Server Error" ; self.processor
"peer_id" => peer_id.to_string(), .on_blocks_by_range_response(peer_id, request_id, beacon_block);
"request_id" => request_id,
"error" => error.to_string(),
"client" => self.network_globals.client(&peer_id).to_string());
self.processor.on_rpc_error(peer_id, request_id);
} }
RPCCodedResponse::Unknown(error) => { Response::BlocksByRoot(beacon_block) => {
warn!(self.log, "RPC Unknown Error"; self.processor
"peer_id" => peer_id.to_string(), .on_blocks_by_root_response(peer_id, request_id, beacon_block);
"request_id" => request_id,
"error" => error.to_string(),
"client" => self.network_globals.client(&peer_id).to_string());
self.processor.on_rpc_error(peer_id, request_id);
}
RPCCodedResponse::Success(response) => match response {
RPCResponse::Status(status_message) => {
self.processor.on_status_response(peer_id, status_message);
}
RPCResponse::BlocksByRange(beacon_block) => {
self.processor.on_blocks_by_range_response(
peer_id,
request_id,
Some(beacon_block),
);
}
RPCResponse::BlocksByRoot(beacon_block) => {
self.processor.on_blocks_by_root_response(
peer_id,
request_id,
Some(beacon_block),
);
}
RPCResponse::Pong(_) => {
unreachable!("Ping must be handled in the behaviour");
}
RPCResponse::MetaData(_) => {
unreachable!("Meta data must be handled in the behaviour");
}
},
RPCCodedResponse::StreamTermination(response_type) => {
// have received a stream termination, notify the processing functions
match response_type {
ResponseTermination::BlocksByRange => {
self.processor
.on_blocks_by_range_response(peer_id, request_id, None);
}
ResponseTermination::BlocksByRoot => {
self.processor
.on_blocks_by_root_response(peer_id, request_id, None);
}
}
} }
} }
} }

View File

@ -7,14 +7,13 @@ use beacon_chain::{
}, },
BeaconChain, BeaconChainTypes, BlockError, BlockProcessingOutcome, GossipVerifiedBlock, BeaconChain, BeaconChainTypes, BlockError, BlockProcessingOutcome, GossipVerifiedBlock,
}; };
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::*;
use eth2_libp2p::rpc::{RPCCodedResponse, RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::{NetworkGlobals, PeerId, Request, Response};
use eth2_libp2p::{NetworkGlobals, PeerId};
use slog::{debug, error, o, trace, warn}; use slog::{debug, error, o, trace, warn};
use ssz::Encode; use ssz::Encode;
use std::sync::Arc; use std::sync::Arc;
use store::Store; use store::Store;
use tokio::sync::{mpsc, oneshot}; use tokio::sync::mpsc;
use types::{ use types::{
Attestation, ChainSpec, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, Attestation, ChainSpec, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock,
Slot, Slot,
@ -33,8 +32,6 @@ pub struct Processor<T: BeaconChainTypes> {
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
/// A channel to the syncing thread. /// A channel to the syncing thread.
sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>, sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
/// A oneshot channel for destroying the sync thread.
_sync_exit: oneshot::Sender<()>,
/// A network context to return and handle RPC requests. /// A network context to return and handle RPC requests.
network: HandlerNetworkContext<T::EthSpec>, network: HandlerNetworkContext<T::EthSpec>,
/// The `RPCHandler` logger. /// The `RPCHandler` logger.
@ -44,7 +41,7 @@ pub struct Processor<T: BeaconChainTypes> {
impl<T: BeaconChainTypes> Processor<T> { impl<T: BeaconChainTypes> Processor<T> {
/// Instantiate a `Processor` instance /// Instantiate a `Processor` instance
pub fn new( pub fn new(
runtime_handle: &tokio::runtime::Handle, executor: environment::TaskExecutor,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>,
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
@ -53,8 +50,8 @@ impl<T: BeaconChainTypes> Processor<T> {
let sync_logger = log.new(o!("service"=> "sync")); let sync_logger = log.new(o!("service"=> "sync"));
// spawn the sync thread // spawn the sync thread
let (sync_send, _sync_exit) = crate::sync::manager::spawn( let sync_send = crate::sync::manager::spawn(
runtime_handle, executor,
beacon_chain.clone(), beacon_chain.clone(),
network_globals, network_globals,
network_send.clone(), network_send.clone(),
@ -64,7 +61,6 @@ impl<T: BeaconChainTypes> Processor<T> {
Processor { Processor {
chain: beacon_chain, chain: beacon_chain,
sync_send, sync_send,
_sync_exit,
network: HandlerNetworkContext::new(network_send, log.clone()), network: HandlerNetworkContext::new(network_send, log.clone()),
log: log.clone(), log: log.clone(),
} }
@ -89,7 +85,10 @@ impl<T: BeaconChainTypes> Processor<T> {
/// An error occurred during an RPC request. The state is maintained by the sync manager, so /// An error occurred during an RPC request. The state is maintained by the sync manager, so
/// this function notifies the sync manager of the error. /// this function notifies the sync manager of the error.
pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) { pub fn on_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId) {
self.send_to_sync(SyncMessage::RPCError(peer_id, request_id)); // Check if the failed RPC belongs to sync
if let RequestId::Sync(id) = request_id {
self.send_to_sync(SyncMessage::RPCError(peer_id, id));
}
} }
/// Sends a `Status` message to the peer. /// Sends a `Status` message to the peer.
@ -109,7 +108,7 @@ impl<T: BeaconChainTypes> Processor<T> {
"head_slot" => format!("{}", status_message.head_slot), "head_slot" => format!("{}", status_message.head_slot),
); );
self.network self.network
.send_rpc_request(peer_id, RPCRequest::Status(status_message)); .send_processor_request(peer_id, Request::Status(status_message));
} }
} }
@ -119,7 +118,7 @@ impl<T: BeaconChainTypes> Processor<T> {
pub fn on_status_request( pub fn on_status_request(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request_id: RequestId, request_id: SubstreamId,
status: StatusMessage, status: StatusMessage,
) { ) {
debug!( debug!(
@ -136,10 +135,10 @@ impl<T: BeaconChainTypes> Processor<T> {
// ignore status responses if we are shutting down // ignore status responses if we are shutting down
if let Some(status_message) = status_message(&self.chain) { if let Some(status_message) = status_message(&self.chain) {
// Say status back. // Say status back.
self.network.send_rpc_response( self.network.send_response(
peer_id.clone(), peer_id.clone(),
Response::Status(status_message),
request_id, request_id,
RPCResponse::Status(status_message),
); );
} }
@ -284,16 +283,16 @@ impl<T: BeaconChainTypes> Processor<T> {
pub fn on_blocks_by_root_request( pub fn on_blocks_by_root_request(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request_id: RequestId, request_id: SubstreamId,
request: BlocksByRootRequest, request: BlocksByRootRequest,
) { ) {
let mut send_block_count = 0; let mut send_block_count = 0;
for root in request.block_roots.iter() { for root in request.block_roots.iter() {
if let Ok(Some(block)) = self.chain.store.get_block(root) { if let Ok(Some(block)) = self.chain.store.get_block(root) {
self.network.send_rpc_response( self.network.send_response(
peer_id.clone(), peer_id.clone(),
Response::BlocksByRoot(Some(Box::new(block))),
request_id, request_id,
RPCResponse::BlocksByRoot(Box::new(block)),
); );
send_block_count += 1; send_block_count += 1;
} else { } else {
@ -314,18 +313,15 @@ impl<T: BeaconChainTypes> Processor<T> {
); );
// send stream termination // send stream termination
self.network.send_rpc_error_response( self.network
peer_id, .send_response(peer_id, Response::BlocksByRoot(None), request_id);
request_id,
RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot),
);
} }
/// Handle a `BlocksByRange` request from the peer. /// Handle a `BlocksByRange` request from the peer.
pub fn on_blocks_by_range_request( pub fn on_blocks_by_range_request(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request_id: RequestId, request_id: SubstreamId,
req: BlocksByRangeRequest, req: BlocksByRangeRequest,
) { ) {
debug!( debug!(
@ -391,10 +387,10 @@ impl<T: BeaconChainTypes> Processor<T> {
&& block.slot() < req.start_slot + req.count * req.step && block.slot() < req.start_slot + req.count * req.step
{ {
blocks_sent += 1; blocks_sent += 1;
self.network.send_rpc_response( self.network.send_response(
peer_id.clone(), peer_id.clone(),
Response::BlocksByRange(Some(Box::new(block))),
request_id, request_id,
RPCResponse::BlocksByRange(Box::new(block)),
); );
} }
} else { } else {
@ -428,11 +424,8 @@ impl<T: BeaconChainTypes> Processor<T> {
} }
// send the stream terminator // send the stream terminator
self.network.send_rpc_error_response( self.network
peer_id, .send_response(peer_id, Response::BlocksByRange(None), request_id);
request_id,
RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
);
} }
/// Handle a `BlocksByRange` response from the peer. /// Handle a `BlocksByRange` response from the peer.
@ -449,11 +442,18 @@ impl<T: BeaconChainTypes> Processor<T> {
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
); );
self.send_to_sync(SyncMessage::BlocksByRangeResponse { if let RequestId::Sync(id) = request_id {
peer_id, self.send_to_sync(SyncMessage::BlocksByRangeResponse {
request_id, peer_id,
beacon_block, request_id: id,
}); beacon_block,
});
} else {
debug!(
self.log,
"All blocks by range responses should belong to sync"
);
}
} }
/// Handle a `BlocksByRoot` response from the peer. /// Handle a `BlocksByRoot` response from the peer.
@ -469,11 +469,18 @@ impl<T: BeaconChainTypes> Processor<T> {
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
); );
self.send_to_sync(SyncMessage::BlocksByRootResponse { if let RequestId::Sync(id) = request_id {
peer_id, self.send_to_sync(SyncMessage::BlocksByRootResponse {
request_id, peer_id,
beacon_block, request_id: id,
}); beacon_block,
});
} else {
debug!(
self.log,
"All Blocks by Root responses should belong to sync"
)
}
} }
/// Template function to be called on a block to determine if the block should be propagated /// Template function to be called on a block to determine if the block should be propagated
@ -905,8 +912,6 @@ pub(crate) fn status_message<T: BeaconChainTypes>(
/// Wraps a Network Channel to employ various RPC related network functionality for the /// Wraps a Network Channel to employ various RPC related network functionality for the
/// processor. /// processor.
/// The Processor doesn't manage it's own request Id's and can therefore only send
/// responses or requests with 0 request Ids.
pub struct HandlerNetworkContext<T: EthSpec> { pub struct HandlerNetworkContext<T: EthSpec> {
/// The network channel to relay messages to the Network service. /// The network channel to relay messages to the Network service.
network_send: mpsc::UnboundedSender<NetworkMessage<T>>, network_send: mpsc::UnboundedSender<NetworkMessage<T>>,
@ -919,6 +924,12 @@ impl<T: EthSpec> HandlerNetworkContext<T> {
Self { network_send, log } Self { network_send, log }
} }
fn inform_network(&mut self, msg: NetworkMessage<T>) {
self.network_send
.send(msg)
.unwrap_or_else(|_| warn!(self.log, "Could not send message to the network service"))
}
pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
warn!( warn!(
&self.log, &self.log,
@ -926,55 +937,42 @@ impl<T: EthSpec> HandlerNetworkContext<T> {
"reason" => format!("{:?}", reason), "reason" => format!("{:?}", reason),
"peer_id" => format!("{:?}", peer_id), "peer_id" => format!("{:?}", peer_id),
); );
self.send_rpc_request(peer_id.clone(), RPCRequest::Goodbye(reason)); self.send_processor_request(peer_id.clone(), Request::Goodbye(reason));
self.network_send self.inform_network(NetworkMessage::Disconnect { peer_id });
.send(NetworkMessage::Disconnect { peer_id })
.unwrap_or_else(|_| {
warn!(
self.log,
"Could not send a Disconnect to the network service"
)
});
} }
pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest<T>) { pub fn send_processor_request(&mut self, peer_id: PeerId, request: Request) {
// the message handler cannot send requests with ids. Id's are managed by the sync self.inform_network(NetworkMessage::SendRequest {
// manager.
let request_id = 0;
self.send_rpc_event(peer_id, RPCEvent::Request(request_id, rpc_request));
}
/// Convenience function to wrap successful RPC Responses.
pub fn send_rpc_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
rpc_response: RPCResponse<T>,
) {
self.send_rpc_event(
peer_id, peer_id,
RPCEvent::Response(request_id, RPCCodedResponse::Success(rpc_response)), request_id: RequestId::Router,
); request,
})
} }
/// Send an RPCCodedResponse. This handles errors and stream terminations. pub fn send_response(
pub fn send_rpc_error_response(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request_id: RequestId, response: Response<T>,
rpc_error_response: RPCCodedResponse<T>, stream_id: SubstreamId,
) { ) {
self.send_rpc_event(peer_id, RPCEvent::Response(request_id, rpc_error_response)); self.inform_network(NetworkMessage::SendResponse {
peer_id,
stream_id,
response,
})
} }
pub fn _send_error_response(
fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent<T>) { &mut self,
self.network_send peer_id: PeerId,
.send(NetworkMessage::RPC(peer_id, rpc_event)) substream_id: SubstreamId,
.unwrap_or_else(|_| { error: RPCResponseErrorCode,
warn!( reason: String,
self.log, ) {
"Could not send RPC message to the network service" self.inform_network(NetworkMessage::SendError {
) peer_id,
}); error,
substream_id,
reason,
})
} }
} }

View File

@ -7,15 +7,17 @@ use crate::{
use crate::{error, metrics}; use crate::{error, metrics};
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Service as LibP2PService;
use eth2_libp2p::{rpc::RPCRequest, BehaviourEvent, Enr, MessageId, NetworkGlobals, PeerId}; use eth2_libp2p::{
use eth2_libp2p::{Libp2pEvent, PubsubMessage, RPCEvent}; rpc::{RPCResponseErrorCode, RequestId, SubstreamId},
Libp2pEvent, PubsubMessage, Request, Response,
};
use eth2_libp2p::{BehaviourEvent, Enr, MessageId, NetworkGlobals, PeerId};
use futures::prelude::*; use futures::prelude::*;
use rest_types::ValidatorSubscription; use rest_types::ValidatorSubscription;
use slog::{debug, error, info, o, trace}; use slog::{debug, error, info, o, trace};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::runtime::Handle; use tokio::sync::mpsc;
use tokio::sync::{mpsc, oneshot};
use tokio::time::Delay; use tokio::time::Delay;
use types::EthSpec; use types::EthSpec;
@ -53,13 +55,12 @@ impl<T: BeaconChainTypes> NetworkService<T> {
pub fn start( pub fn start(
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
config: &NetworkConfig, config: &NetworkConfig,
runtime_handle: &Handle, executor: environment::TaskExecutor,
network_log: slog::Logger,
) -> error::Result<( ) -> error::Result<(
Arc<NetworkGlobals<T::EthSpec>>, Arc<NetworkGlobals<T::EthSpec>>,
mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
oneshot::Sender<()>,
)> { )> {
let network_log = executor.log().clone();
// build the network channel // build the network channel
let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage<T::EthSpec>>(); let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage<T::EthSpec>>();
// get a reference to the beacon chain store // get a reference to the beacon chain store
@ -75,7 +76,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
// launch libp2p service // launch libp2p service
let (network_globals, mut libp2p) = let (network_globals, mut libp2p) =
runtime_handle.enter(|| LibP2PService::new(config, enr_fork_id, &network_log))?; LibP2PService::new(executor.clone(), config, enr_fork_id, &network_log)?;
for enr in load_dht::<T::Store, T::EthSpec>(store.clone()) { for enr in load_dht::<T::Store, T::EthSpec>(store.clone()) {
libp2p.swarm.add_enr(enr); libp2p.swarm.add_enr(enr);
@ -88,7 +89,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
beacon_chain.clone(), beacon_chain.clone(),
network_globals.clone(), network_globals.clone(),
network_send.clone(), network_send.clone(),
runtime_handle, executor.clone(),
network_log.clone(), network_log.clone(),
)?; )?;
@ -111,19 +112,23 @@ impl<T: BeaconChainTypes> NetworkService<T> {
propagation_percentage, propagation_percentage,
}; };
let network_exit = runtime_handle.enter(|| spawn_service(network_service))?; spawn_service(executor, network_service)?;
Ok((network_globals, network_send, network_exit)) Ok((network_globals, network_send))
} }
} }
fn spawn_service<T: BeaconChainTypes>( fn spawn_service<T: BeaconChainTypes>(
executor: environment::TaskExecutor,
mut service: NetworkService<T>, mut service: NetworkService<T>,
) -> error::Result<tokio::sync::oneshot::Sender<()>> { ) -> error::Result<()> {
let (network_exit, mut exit_rx) = tokio::sync::oneshot::channel(); let mut exit_rx = executor.exit();
// spawn on the current executor // spawn on the current executor
tokio::spawn(async move { executor.spawn_without_exit(async move {
// TODO: there is something with this code that prevents cargo fmt from doing anything at
// all. Ok, it is worse, the compiler doesn't show errors over this code beyond ast
// checking
loop { loop {
// build the futures to check simultaneously // build the futures to check simultaneously
tokio::select! { tokio::select! {
@ -151,13 +156,18 @@ fn spawn_service<T: BeaconChainTypes>(
info!(service.log, "Network service shutdown"); info!(service.log, "Network service shutdown");
return; return;
} }
// handle a message sent to the network // handle a message sent to the network
Some(message) = service.network_recv.recv() => { Some(message) = service.network_recv.recv() => {
match message { match message {
NetworkMessage::RPC(peer_id, rpc_event) => { NetworkMessage::SendRequest{ peer_id, request, request_id } => {
trace!(service.log, "Sending RPC"; "rpc" => format!("{}", rpc_event)); service.libp2p.send_request(peer_id, request_id, request);
service.libp2p.swarm.send_rpc(peer_id, rpc_event); }
NetworkMessage::SendResponse{ peer_id, response, stream_id } => {
service.libp2p.send_response(peer_id, stream_id, response);
}
NetworkMessage::SendError{ peer_id, error, substream_id, reason } => {
service.libp2p.respond_with_error(peer_id, substream_id, error, reason);
} }
NetworkMessage::Propagate { NetworkMessage::Propagate {
propagation_source, propagation_source,
@ -178,8 +188,8 @@ fn spawn_service<T: BeaconChainTypes>(
info!(service.log, "Random filter did not propagate message"); info!(service.log, "Random filter did not propagate message");
} else { } else {
trace!(service.log, "Propagating gossipsub message"; trace!(service.log, "Propagating gossipsub message";
"propagation_peer" => format!("{:?}", propagation_source), "propagation_peer" => format!("{:?}", propagation_source),
"message_id" => message_id.to_string(), "message_id" => message_id.to_string(),
); );
service service
.libp2p .libp2p
@ -230,124 +240,143 @@ fn spawn_service<T: BeaconChainTypes>(
.attestation_service .attestation_service
.validator_subscriptions(subscriptions); .validator_subscriptions(subscriptions);
} }
}
}
// process any attestation service events
Some(attestation_service_message) = service.attestation_service.next() => {
match attestation_service_message {
// TODO: Implement
AttServiceMessage::Subscribe(subnet_id) => {
service.libp2p.swarm.subscribe_to_subnet(subnet_id);
}
AttServiceMessage::Unsubscribe(subnet_id) => {
service.libp2p.swarm.subscribe_to_subnet(subnet_id);
}
AttServiceMessage::EnrAdd(subnet_id) => {
service.libp2p.swarm.update_enr_subnet(subnet_id, true);
}
AttServiceMessage::EnrRemove(subnet_id) => {
service.libp2p.swarm.update_enr_subnet(subnet_id, false);
}
AttServiceMessage::DiscoverPeers(subnet_id) => {
service.libp2p.swarm.peers_request(subnet_id);
} }
} }
} // process any attestation service events
libp2p_event = service.libp2p.next_event() => { Some(attestation_service_message) = service.attestation_service.next() => {
// poll the swarm match attestation_service_message {
match libp2p_event { // TODO: Implement
Libp2pEvent::Behaviour(event) => match event { AttServiceMessage::Subscribe(subnet_id) => {
BehaviourEvent::RPC(peer_id, rpc_event) => { service.libp2p.swarm.subscribe_to_subnet(subnet_id);
// if we received a Goodbye message, drop and ban the peer
if let RPCEvent::Request(_, RPCRequest::Goodbye(_)) = rpc_event {
//peers_to_ban.push(peer_id.clone());
service.libp2p.disconnect_and_ban_peer(
peer_id.clone(),
std::time::Duration::from_secs(BAN_PEER_TIMEOUT),
);
};
let _ = service
.router_send
.send(RouterMessage::RPC(peer_id, rpc_event))
.map_err(|_| {
debug!(service.log, "Failed to send RPC to router");
});
} }
BehaviourEvent::StatusPeer(peer_id) => { AttServiceMessage::Unsubscribe(subnet_id) => {
let _ = service service.libp2p.swarm.subscribe_to_subnet(subnet_id);
.router_send
.send(RouterMessage::StatusPeer(peer_id))
.map_err(|_| {
debug!(service.log, "Failed to send re-status peer to router");
});
} }
BehaviourEvent::PubsubMessage { AttServiceMessage::EnrAdd(subnet_id) => {
id, service.libp2p.swarm.update_enr_subnet(subnet_id, true);
source, }
message, AttServiceMessage::EnrRemove(subnet_id) => {
.. service.libp2p.swarm.update_enr_subnet(subnet_id, false);
} => { }
// Update prometheus metrics. AttServiceMessage::DiscoverPeers{subnet_id, min_ttl} => {
expose_receive_metrics(&message); service.libp2p.swarm.discover_subnet_peers(subnet_id, min_ttl);
match message { }
// attestation information gets processed in the attestation service }
PubsubMessage::Attestation(ref subnet_and_attestation) => { }
let subnet = &subnet_and_attestation.0; libp2p_event = service.libp2p.next_event() => {
let attestation = &subnet_and_attestation.1; // poll the swarm
// checks if we have an aggregator for the slot. If so, we process match libp2p_event {
// the attestation Libp2pEvent::Behaviour(event) => match event {
if service.attestation_service.should_process_attestation( BehaviourEvent::RequestReceived{peer_id, id, request} => {
&id, if let Request::Goodbye(_) = request {
&source, // if we received a Goodbye message, drop and ban the peer
subnet, //peers_to_ban.push(peer_id.clone());
attestation, // TODO: remove this: https://github.com/sigp/lighthouse/issues/1240
) { service.libp2p.disconnect_and_ban_peer(
peer_id.clone(),
std::time::Duration::from_secs(BAN_PEER_TIMEOUT),
);
};
let _ = service
.router_send
.send(RouterMessage::RPCRequestReceived{peer_id, stream_id:id, request})
.map_err(|_| {
debug!(service.log, "Failed to send RPC to router");
});
}
BehaviourEvent::ResponseReceived{peer_id, id, response} => {
let _ = service
.router_send
.send(RouterMessage::RPCResponseReceived{ peer_id, request_id:id, response })
.map_err(|_| {
debug!(service.log, "Failed to send RPC to router");
});
}
BehaviourEvent::RPCFailed{id, peer_id, error} => {
let _ = service
.router_send
.send(RouterMessage::RPCFailed{ peer_id, request_id:id, error })
.map_err(|_| {
debug!(service.log, "Failed to send RPC to router");
});
}
BehaviourEvent::StatusPeer(peer_id) => {
let _ = service
.router_send
.send(RouterMessage::StatusPeer(peer_id))
.map_err(|_| {
debug!(service.log, "Failed to send re-status peer to router");
});
}
BehaviourEvent::PubsubMessage {
id,
source,
message,
..
} => {
// Update prometheus metrics.
expose_receive_metrics(&message);
match message {
// attestation information gets processed in the attestation service
PubsubMessage::Attestation(ref subnet_and_attestation) => {
let subnet = &subnet_and_attestation.0;
let attestation = &subnet_and_attestation.1;
// checks if we have an aggregator for the slot. If so, we process
// the attestation
if service.attestation_service.should_process_attestation(
&id,
&source,
subnet,
attestation,
) {
let _ = service
.router_send
.send(RouterMessage::PubsubMessage(id, source, message))
.map_err(|_| {
debug!(service.log, "Failed to send pubsub message to router");
});
} else {
metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_IGNORED)
}
}
_ => {
// all else is sent to the router
let _ = service let _ = service
.router_send .router_send
.send(RouterMessage::PubsubMessage(id, source, message)) .send(RouterMessage::PubsubMessage(id, source, message))
.map_err(|_| { .map_err(|_| {
debug!(service.log, "Failed to send pubsub message to router"); debug!(service.log, "Failed to send pubsub message to router");
}); });
} else {
metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_IGNORED)
} }
} }
_ => { }
// all else is sent to the router BehaviourEvent::PeerSubscribed(_, _) => {},
let _ = service }
.router_send Libp2pEvent::NewListenAddr(multiaddr) => {
.send(RouterMessage::PubsubMessage(id, source, message)) service.network_globals.listen_multiaddrs.write().push(multiaddr);
.map_err(|_| { }
debug!(service.log, "Failed to send pubsub message to router"); Libp2pEvent::PeerConnected{ peer_id, endpoint,} => {
}); debug!(service.log, "Peer Connected"; "peer_id" => peer_id.to_string(), "endpoint" => format!("{:?}", endpoint));
} if let eth2_libp2p::ConnectedPoint::Dialer { .. } = endpoint {
let _ = service
.router_send
.send(RouterMessage::PeerDialed(peer_id))
.map_err(|_| {
debug!(service.log, "Failed to send peer dialed to router"); });
} }
} }
BehaviourEvent::PeerSubscribed(_, _) => {}, Libp2pEvent::PeerDisconnected{ peer_id, endpoint,} => {
debug!(service.log, "Peer Disconnected"; "peer_id" => peer_id.to_string(), "endpoint" => format!("{:?}", endpoint));
let _ = service
.router_send
.send(RouterMessage::PeerDisconnected(peer_id))
.map_err(|_| {
debug!(service.log, "Failed to send peer disconnect to router");
});
}
} }
Libp2pEvent::NewListenAddr(multiaddr) => {
service.network_globals.listen_multiaddrs.write().push(multiaddr);
}
Libp2pEvent::PeerConnected{ peer_id, endpoint,} => {
debug!(service.log, "Peer Connected"; "peer_id" => peer_id.to_string(), "endpoint" => format!("{:?}", endpoint));
if let eth2_libp2p::ConnectedPoint::Dialer { .. } = endpoint {
let _ = service
.router_send
.send(RouterMessage::PeerDialed(peer_id))
.map_err(|_| {
debug!(service.log, "Failed to send peer dialed to router"); });
}
}
Libp2pEvent::PeerDisconnected{ peer_id, endpoint,} => {
debug!(service.log, "Peer Disconnected"; "peer_id" => peer_id.to_string(), "endpoint" => format!("{:?}", endpoint));
let _ = service
.router_send
.send(RouterMessage::PeerDisconnected(peer_id))
.map_err(|_| {
debug!(service.log, "Failed to send peer disconnect to router");
});
}
}
} }
} }
@ -361,9 +390,9 @@ fn spawn_service<T: BeaconChainTypes>(
} }
} }
} }
}); }, "network");
Ok(network_exit) Ok(())
} }
/// Returns a `Delay` that triggers shortly after the next change in the beacon chain fork version. /// Returns a `Delay` that triggers shortly after the next change in the beacon chain fork version.
@ -385,8 +414,27 @@ pub enum NetworkMessage<T: EthSpec> {
Subscribe { Subscribe {
subscriptions: Vec<ValidatorSubscription>, subscriptions: Vec<ValidatorSubscription>,
}, },
/// Send an RPC message to the libp2p service. /// Send an RPC request to the libp2p service.
RPC(PeerId, RPCEvent<T>), SendRequest {
peer_id: PeerId,
request: Request,
request_id: RequestId,
},
/// Send a successful Response to the libp2p service.
SendResponse {
peer_id: PeerId,
response: Response<T>,
stream_id: SubstreamId,
},
/// Respond to a peer's request with an error.
SendError {
// TODO: note that this is never used, we just say goodbye without nicely clossing the
// stream assigned to the request
peer_id: PeerId,
error: RPCResponseErrorCode,
reason: String,
substream_id: SubstreamId,
},
/// Publish a list of messages to the gossipsub protocol. /// Publish a list of messages to the gossipsub protocol.
Publish { messages: Vec<PubsubMessage<T>> }, Publish { messages: Vec<PubsubMessage<T>> },
/// Propagate a received gossipsub message. /// Propagate a received gossipsub message.

View File

@ -32,7 +32,9 @@ mod tests {
let enrs = vec![enr1, enr2]; let enrs = vec![enr1, enr2];
let runtime = Runtime::new().unwrap(); let runtime = Runtime::new().unwrap();
let handle = runtime.handle().clone();
let (signal, exit) = exit_future::signal();
let executor = environment::TaskExecutor::new(runtime.handle().clone(), exit, log.clone());
let mut config = NetworkConfig::default(); let mut config = NetworkConfig::default();
config.libp2p_port = 21212; config.libp2p_port = 21212;
@ -42,8 +44,8 @@ mod tests {
// Create a new network service which implicitly gets dropped at the // Create a new network service which implicitly gets dropped at the
// end of the block. // end of the block.
let _ = let _ = NetworkService::start(beacon_chain.clone(), &config, executor).unwrap();
NetworkService::start(beacon_chain.clone(), &config, &handle, log.clone()).unwrap(); drop(signal);
}); });
runtime.shutdown_timeout(tokio::time::Duration::from_millis(300)); runtime.shutdown_timeout(tokio::time::Duration::from_millis(300));

View File

@ -37,9 +37,10 @@ use super::block_processor::{spawn_block_processor, BatchProcessResult, ProcessI
use super::network_context::SyncNetworkContext; use super::network_context::SyncNetworkContext;
use super::peer_sync_info::{PeerSyncInfo, PeerSyncType}; use super::peer_sync_info::{PeerSyncInfo, PeerSyncType};
use super::range_sync::{BatchId, ChainId, RangeSync}; use super::range_sync::{BatchId, ChainId, RangeSync};
use super::RequestId;
use crate::service::NetworkMessage; use crate::service::NetworkMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use eth2_libp2p::rpc::{methods::*, RequestId}; use eth2_libp2p::rpc::BlocksByRootRequest;
use eth2_libp2p::types::NetworkGlobals; use eth2_libp2p::types::NetworkGlobals;
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
use fnv::FnvHashMap; use fnv::FnvHashMap;
@ -48,7 +49,7 @@ use smallvec::SmallVec;
use std::boxed::Box; use std::boxed::Box;
use std::ops::Sub; use std::ops::Sub;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::{mpsc, oneshot}; use tokio::sync::mpsc;
use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; use types::{EthSpec, Hash256, SignedBeaconBlock, Slot};
/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync
@ -181,17 +182,12 @@ impl SingleBlockRequest {
/// chain. This allows the chain to be /// chain. This allows the chain to be
/// dropped during the syncing process which will gracefully end the `SyncManager`. /// dropped during the syncing process which will gracefully end the `SyncManager`.
pub fn spawn<T: BeaconChainTypes>( pub fn spawn<T: BeaconChainTypes>(
runtime_handle: &tokio::runtime::Handle, executor: environment::TaskExecutor,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>,
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
log: slog::Logger, log: slog::Logger,
) -> ( ) -> mpsc::UnboundedSender<SyncMessage<T::EthSpec>> {
mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
oneshot::Sender<()>,
) {
// generate the exit channel
let (sync_exit, exit_rx) = tokio::sync::oneshot::channel();
// generate the message channel // generate the message channel
let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>(); let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>();
@ -215,11 +211,8 @@ pub fn spawn<T: BeaconChainTypes>(
// spawn the sync manager thread // spawn the sync manager thread
debug!(log, "Sync Manager started"); debug!(log, "Sync Manager started");
runtime_handle.spawn(async move { executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync");
futures::future::select(Box::pin(sync_manager.main()), exit_rx).await; sync_send
info!(log.clone(), "Sync Manager shutdown");
});
(sync_send, sync_exit)
} }
impl<T: BeaconChainTypes> SyncManager<T> { impl<T: BeaconChainTypes> SyncManager<T> {

View File

@ -9,3 +9,6 @@ mod range_sync;
pub use manager::SyncMessage; pub use manager::SyncMessage;
pub use peer_sync_info::PeerSyncInfo; pub use peer_sync_info::PeerSyncInfo;
/// Type of id of rpc requests sent by sync
pub type RequestId = usize;

View File

@ -4,9 +4,8 @@
use crate::router::processor::status_message; use crate::router::processor::status_message;
use crate::service::NetworkMessage; use crate::service::NetworkMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, RequestId};
use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RequestId}; use eth2_libp2p::{Client, NetworkGlobals, PeerId, Request};
use eth2_libp2p::{Client, NetworkGlobals, PeerId};
use slog::{debug, trace, warn}; use slog::{debug, trace, warn};
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -22,7 +21,7 @@ pub struct SyncNetworkContext<T: EthSpec> {
network_globals: Arc<NetworkGlobals<T>>, network_globals: Arc<NetworkGlobals<T>>,
/// A sequential ID for all RPC requests. /// A sequential ID for all RPC requests.
request_id: RequestId, request_id: usize,
/// Logger for the `SyncNetworkContext`. /// Logger for the `SyncNetworkContext`.
log: slog::Logger, log: slog::Logger,
} }
@ -68,7 +67,7 @@ impl<T: EthSpec> SyncNetworkContext<T> {
"head_slot" => format!("{}", status_message.head_slot), "head_slot" => format!("{}", status_message.head_slot),
); );
let _ = self.send_rpc_request(peer_id, RPCRequest::Status(status_message)); let _ = self.send_rpc_request(peer_id, Request::Status(status_message));
} }
} }
@ -76,7 +75,7 @@ impl<T: EthSpec> SyncNetworkContext<T> {
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request: BlocksByRangeRequest, request: BlocksByRangeRequest,
) -> Result<RequestId, &'static str> { ) -> Result<usize, &'static str> {
trace!( trace!(
self.log, self.log,
"Sending BlocksByRange Request"; "Sending BlocksByRange Request";
@ -84,14 +83,14 @@ impl<T: EthSpec> SyncNetworkContext<T> {
"count" => request.count, "count" => request.count,
"peer" => format!("{:?}", peer_id) "peer" => format!("{:?}", peer_id)
); );
self.send_rpc_request(peer_id, RPCRequest::BlocksByRange(request)) self.send_rpc_request(peer_id, Request::BlocksByRange(request))
} }
pub fn blocks_by_root_request( pub fn blocks_by_root_request(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request: BlocksByRootRequest, request: BlocksByRootRequest,
) -> Result<RequestId, &'static str> { ) -> Result<usize, &'static str> {
trace!( trace!(
self.log, self.log,
"Sending BlocksByRoot Request"; "Sending BlocksByRoot Request";
@ -99,7 +98,7 @@ impl<T: EthSpec> SyncNetworkContext<T> {
"count" => request.block_roots.len(), "count" => request.block_roots.len(),
"peer" => format!("{:?}", peer_id) "peer" => format!("{:?}", peer_id)
); );
self.send_rpc_request(peer_id, RPCRequest::BlocksByRoot(request)) self.send_rpc_request(peer_id, Request::BlocksByRoot(request))
} }
pub fn downvote_peer(&mut self, peer_id: PeerId) { pub fn downvote_peer(&mut self, peer_id: PeerId) {
@ -109,6 +108,10 @@ impl<T: EthSpec> SyncNetworkContext<T> {
"peer" => format!("{:?}", peer_id) "peer" => format!("{:?}", peer_id)
); );
// TODO: Implement reputation // TODO: Implement reputation
// TODO: what if we first close the channel sending a response
// RPCResponseErrorCode::InvalidRequest (or something)
// and then disconnect the peer? either request dc or let the behaviour have that logic
// itself
self.disconnect(peer_id, GoodbyeReason::Fault); self.disconnect(peer_id, GoodbyeReason::Fault);
} }
@ -121,7 +124,7 @@ impl<T: EthSpec> SyncNetworkContext<T> {
); );
// ignore the error if the channel send fails // ignore the error if the channel send fails
let _ = self.send_rpc_request(peer_id.clone(), RPCRequest::Goodbye(reason)); let _ = self.send_rpc_request(peer_id.clone(), Request::Goodbye(reason));
self.network_send self.network_send
.send(NetworkMessage::Disconnect { peer_id }) .send(NetworkMessage::Disconnect { peer_id })
.unwrap_or_else(|_| { .unwrap_or_else(|_| {
@ -135,27 +138,22 @@ impl<T: EthSpec> SyncNetworkContext<T> {
pub fn send_rpc_request( pub fn send_rpc_request(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
rpc_request: RPCRequest<T>, request: Request,
) -> Result<RequestId, &'static str> { ) -> Result<usize, &'static str> {
let request_id = self.request_id; let request_id = self.request_id;
self.request_id += 1; self.request_id += 1;
self.send_rpc_event(peer_id, RPCEvent::Request(request_id, rpc_request))?; self.send_network_msg(NetworkMessage::SendRequest {
peer_id,
request_id: RequestId::Sync(request_id),
request,
})?;
Ok(request_id) Ok(request_id)
} }
fn send_rpc_event( fn send_network_msg(&mut self, msg: NetworkMessage<T>) -> Result<(), &'static str> {
&mut self, self.network_send.send(msg).map_err(|_| {
peer_id: PeerId, debug!(self.log, "Could not send message to the network service");
rpc_event: RPCEvent<T>, "Network channel send Failed"
) -> Result<(), &'static str> { })
self.network_send
.send(NetworkMessage::RPC(peer_id, rpc_event))
.map_err(|_| {
debug!(
self.log,
"Could not send RPC message to the network service"
);
"Network channel send Failed"
})
} }
} }

View File

@ -1,7 +1,7 @@
use super::manager::SLOT_IMPORT_TOLERANCE; use super::manager::SLOT_IMPORT_TOLERANCE;
use crate::router::processor::status_message; use crate::router::processor::status_message;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::*;
use eth2_libp2p::SyncInfo; use eth2_libp2p::SyncInfo;
use std::ops::Sub; use std::ops::Sub;
use std::sync::Arc; use std::sync::Arc;

View File

@ -1,6 +1,5 @@
use super::chain::EPOCHS_PER_BATCH; use super::chain::EPOCHS_PER_BATCH;
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::RequestId;
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use ssz::Encode; use ssz::Encode;
@ -112,9 +111,9 @@ impl<T: EthSpec> PartialOrd for Batch<T> {
/// This is used to optimise searches for idle peers (peers that have no outbound batch requests). /// This is used to optimise searches for idle peers (peers that have no outbound batch requests).
pub struct PendingBatches<T: EthSpec> { pub struct PendingBatches<T: EthSpec> {
/// The current pending batches. /// The current pending batches.
batches: FnvHashMap<RequestId, Batch<T>>, batches: FnvHashMap<usize, Batch<T>>,
/// A mapping of peers to the number of pending requests. /// A mapping of peers to the number of pending requests.
peer_requests: HashMap<PeerId, HashSet<RequestId>>, peer_requests: HashMap<PeerId, HashSet<usize>>,
} }
impl<T: EthSpec> PendingBatches<T> { impl<T: EthSpec> PendingBatches<T> {
@ -125,7 +124,7 @@ impl<T: EthSpec> PendingBatches<T> {
} }
} }
pub fn insert(&mut self, request_id: RequestId, batch: Batch<T>) -> Option<Batch<T>> { pub fn insert(&mut self, request_id: usize, batch: Batch<T>) -> Option<Batch<T>> {
let peer_request = batch.current_peer.clone(); let peer_request = batch.current_peer.clone();
self.peer_requests self.peer_requests
.entry(peer_request) .entry(peer_request)
@ -134,7 +133,7 @@ impl<T: EthSpec> PendingBatches<T> {
self.batches.insert(request_id, batch) self.batches.insert(request_id, batch)
} }
pub fn remove(&mut self, request_id: RequestId) -> Option<Batch<T>> { pub fn remove(&mut self, request_id: usize) -> Option<Batch<T>> {
if let Some(batch) = self.batches.remove(&request_id) { if let Some(batch) = self.batches.remove(&request_id) {
if let Entry::Occupied(mut entry) = self.peer_requests.entry(batch.current_peer.clone()) if let Entry::Occupied(mut entry) = self.peer_requests.entry(batch.current_peer.clone())
{ {
@ -157,7 +156,7 @@ impl<T: EthSpec> PendingBatches<T> {
/// Adds a block to the batches if the request id exists. Returns None if there is no batch /// Adds a block to the batches if the request id exists. Returns None if there is no batch
/// matching the request id. /// matching the request id.
pub fn add_block(&mut self, request_id: RequestId, block: SignedBeaconBlock<T>) -> Option<()> { pub fn add_block(&mut self, request_id: usize, block: SignedBeaconBlock<T>) -> Option<()> {
let batch = self.batches.get_mut(&request_id)?; let batch = self.batches.get_mut(&request_id)?;
batch.downloaded_blocks.push(block); batch.downloaded_blocks.push(block);
Some(()) Some(())

View File

@ -1,9 +1,8 @@
use super::batch::{Batch, BatchId, PendingBatches}; use super::batch::{Batch, BatchId, PendingBatches};
use crate::sync::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId}; use crate::sync::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId};
use crate::sync::network_context::SyncNetworkContext; use crate::sync::network_context::SyncNetworkContext;
use crate::sync::SyncMessage; use crate::sync::{RequestId, SyncMessage};
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::rpc::RequestId;
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
use rand::prelude::*; use rand::prelude::*;
use slog::{crit, debug, warn}; use slog::{crit, debug, warn};

View File

@ -47,8 +47,8 @@ use crate::sync::block_processor::BatchProcessResult;
use crate::sync::manager::SyncMessage; use crate::sync::manager::SyncMessage;
use crate::sync::network_context::SyncNetworkContext; use crate::sync::network_context::SyncNetworkContext;
use crate::sync::PeerSyncInfo; use crate::sync::PeerSyncInfo;
use crate::sync::RequestId;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::rpc::RequestId;
use eth2_libp2p::{NetworkGlobals, PeerId}; use eth2_libp2p::{NetworkGlobals, PeerId};
use slog::{debug, error, trace}; use slog::{debug, error, trace};
use std::collections::HashSet; use std::collections::HashSet;

View File

@ -20,6 +20,7 @@ use state_processing::per_block_processing::{
}; };
use std::collections::{hash_map, HashMap, HashSet}; use std::collections::{hash_map, HashMap, HashSet};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::ptr;
use types::{ use types::{
typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec,
EthSpec, Fork, Hash256, ProposerSlashing, RelativeEpoch, SignedVoluntaryExit, Validator, EthSpec, Fork, Hash256, ProposerSlashing, RelativeEpoch, SignedVoluntaryExit, Validator,
@ -408,6 +409,9 @@ fn prune_validator_hash_map<T, F, E: EthSpec>(
/// Compare two operation pools. /// Compare two operation pools.
impl<T: EthSpec + Default> PartialEq for OperationPool<T> { impl<T: EthSpec + Default> PartialEq for OperationPool<T> {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
if ptr::eq(self, other) {
return true;
}
*self.attestations.read() == *other.attestations.read() *self.attestations.read() == *other.attestations.read()
&& *self.attester_slashings.read() == *other.attester_slashings.read() && *self.attester_slashings.read() == *other.attester_slashings.read()
&& *self.proposer_slashings.read() == *other.proposer_slashings.read() && *self.proposer_slashings.read() == *other.proposer_slashings.read()

View File

@ -25,7 +25,7 @@ state_processing = { path = "../../consensus/state_processing" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
http = "0.2.1" http = "0.2.1"
hyper = "0.13.5" hyper = "0.13.5"
tokio = { version = "0.2", features = ["sync"] } tokio = { version = "0.2.21", features = ["sync"] }
url = "2.1.1" url = "2.1.1"
lazy_static = "1.4.0" lazy_static = "1.4.0"
eth2_config = { path = "../../common/eth2_config" } eth2_config = { path = "../../common/eth2_config" }
@ -36,6 +36,9 @@ parking_lot = "0.10.2"
futures = "0.3.5" futures = "0.3.5"
operation_pool = { path = "../operation_pool" } operation_pool = { path = "../operation_pool" }
rayon = "1.3.0" rayon = "1.3.0"
environment = { path = "../../lighthouse/environment" }
uhttp_sse = "0.5.1"
bus = "2.2.3"
[dev-dependencies] [dev-dependencies]
assert_matches = "1.3.0" assert_matches = "1.3.0"

View File

@ -3,16 +3,22 @@ use crate::response_builder::ResponseBuilder;
use crate::validator::get_state_for_epoch; use crate::validator::get_state_for_epoch;
use crate::{ApiError, ApiResult, UrlQuery}; use crate::{ApiError, ApiResult, UrlQuery};
use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig};
use hyper::{Body, Request}; use bus::BusReader;
use futures::executor::block_on;
use hyper::body::Bytes;
use hyper::{Body, Request, Response};
use rest_types::{ use rest_types::{
BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse,
ValidatorRequest, ValidatorResponse, ValidatorRequest, ValidatorResponse,
}; };
use std::io::Write;
use std::sync::Arc; use std::sync::Arc;
use store::Store; use store::Store;
use slog::{error, Logger};
use types::{ use types::{
AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes, AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes,
RelativeEpoch, Slot, RelativeEpoch, SignedBeaconBlockHash, Slot,
}; };
/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`.
@ -122,6 +128,48 @@ pub fn get_block_root<T: BeaconChainTypes>(
ResponseBuilder::new(&req)?.body(&root) ResponseBuilder::new(&req)?.body(&root)
} }
fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Result<Bytes> {
let mut buffer = Vec::new();
{
let mut sse_message = uhttp_sse::SseMessage::new(&mut buffer);
let untyped_hash: Hash256 = new_head_hash.into();
write!(sse_message.data()?, "{:?}", untyped_hash)?;
}
let bytes: Bytes = buffer.into();
Ok(bytes)
}
pub fn stream_forks<T: BeaconChainTypes>(
log: Logger,
mut events: BusReader<SignedBeaconBlockHash>,
) -> ApiResult {
let (mut sender, body) = Body::channel();
std::thread::spawn(move || {
while let Ok(new_head_hash) = events.recv() {
let chunk = match make_sse_response_chunk(new_head_hash) {
Ok(chunk) => chunk,
Err(e) => {
error!(log, "Failed to make SSE chunk"; "error" => e.to_string());
sender.abort();
break;
}
};
if let Err(bytes) = block_on(sender.send_data(chunk)) {
error!(log, "Couldn't stream piece {:?}", bytes);
}
}
});
let response = Response::builder()
.status(200)
.header("Content-Type", "text/event-stream")
.header("Connection", "Keep-Alive")
.header("Cache-Control", "no-cache")
.header("Access-Control-Allow-Origin", "*")
.body(body)
.map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e)))?;
Ok(response)
}
/// HTTP handler to return the `Fork` of the current head. /// HTTP handler to return the `Fork` of the current head.
pub fn get_fork<T: BeaconChainTypes>( pub fn get_fork<T: BeaconChainTypes>(
req: Request<Body>, req: Request<Body>,

View File

@ -71,6 +71,12 @@ impl From<hyper::error::Error> for ApiError {
} }
} }
impl From<std::io::Error> for ApiError {
fn from(e: std::io::Error) -> ApiError {
ApiError::ServerError(format!("IO error: {:?}", e))
}
}
impl StdError for ApiError { impl StdError for ApiError {
fn cause(&self) -> Option<&dyn StdError> { fn cause(&self) -> Option<&dyn StdError> {
None None

View File

@ -21,6 +21,7 @@ mod url_query;
mod validator; mod validator;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use bus::Bus;
use client_network::NetworkMessage; use client_network::NetworkMessage;
pub use config::ApiEncodingFormat; pub use config::ApiEncodingFormat;
use error::{ApiError, ApiResult}; use error::{ApiError, ApiResult};
@ -30,12 +31,13 @@ use futures::future::TryFutureExt;
use hyper::server::conn::AddrStream; use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn}; use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Server}; use hyper::{Body, Request, Server};
use parking_lot::Mutex;
use slog::{info, warn}; use slog::{info, warn};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::ops::Deref;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::{mpsc, oneshot}; use tokio::sync::mpsc;
use types::SignedBeaconBlockHash;
use url_query::UrlQuery; use url_query::UrlQuery;
pub use crate::helpers::parse_pubkey_bytes; pub use crate::helpers::parse_pubkey_bytes;
@ -51,14 +53,16 @@ pub struct NetworkInfo<T: BeaconChainTypes> {
// Allowing more than 7 arguments. // Allowing more than 7 arguments.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn start_server<T: BeaconChainTypes>( pub fn start_server<T: BeaconChainTypes>(
executor: environment::TaskExecutor,
config: &Config, config: &Config,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network_info: NetworkInfo<T>, network_info: NetworkInfo<T>,
db_path: PathBuf, db_path: PathBuf,
freezer_db_path: PathBuf, freezer_db_path: PathBuf,
eth2_config: Eth2Config, eth2_config: Eth2Config,
log: slog::Logger, events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
) -> Result<(oneshot::Sender<()>, SocketAddr), hyper::Error> { ) -> Result<SocketAddr, hyper::Error> {
let log = executor.log();
let inner_log = log.clone(); let inner_log = log.clone();
let eth2_config = Arc::new(eth2_config); let eth2_config = Arc::new(eth2_config);
@ -71,6 +75,7 @@ pub fn start_server<T: BeaconChainTypes>(
let network_channel = network_info.network_chan.clone(); let network_channel = network_info.network_chan.clone();
let db_path = db_path.clone(); let db_path = db_path.clone();
let freezer_db_path = freezer_db_path.clone(); let freezer_db_path = freezer_db_path.clone();
let events = events.clone();
async move { async move {
Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| { Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| {
@ -83,6 +88,7 @@ pub fn start_server<T: BeaconChainTypes>(
log.clone(), log.clone(),
db_path.clone(), db_path.clone(),
freezer_db_path.clone(), freezer_db_path.clone(),
events.clone(),
) )
})) }))
} }
@ -98,7 +104,7 @@ pub fn start_server<T: BeaconChainTypes>(
let actual_listen_addr = server.local_addr(); let actual_listen_addr = server.local_addr();
// Build a channel to kill the HTTP server. // Build a channel to kill the HTTP server.
let (exit_signal, exit) = oneshot::channel::<()>(); let exit = executor.exit();
let inner_log = log.clone(); let inner_log = log.clone();
let server_exit = async move { let server_exit = async move {
let _ = exit.await; let _ = exit.await;
@ -116,7 +122,8 @@ pub fn start_server<T: BeaconChainTypes>(
inner_log, inner_log,
"HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e) "HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e)
) )
}); })
.unwrap_or_else(|_| ());
info!( info!(
log, log,
@ -125,18 +132,7 @@ pub fn start_server<T: BeaconChainTypes>(
"port" => actual_listen_addr.port(), "port" => actual_listen_addr.port(),
); );
tokio::spawn(server_future); executor.spawn_without_exit(server_future, "http");
Ok((exit_signal, actual_listen_addr)) Ok(actual_listen_addr)
}
#[derive(Clone)]
pub struct DBPath(PathBuf);
impl Deref for DBPath {
type Target = PathBuf;
fn deref(&self) -> &Self::Target {
&self.0
}
} }

View File

@ -3,14 +3,16 @@ use crate::{
spec, validator, NetworkChannel, spec, validator, NetworkChannel,
}; };
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use bus::Bus;
use eth2_config::Eth2Config; use eth2_config::Eth2Config;
use eth2_libp2p::NetworkGlobals; use eth2_libp2p::NetworkGlobals;
use hyper::{Body, Error, Method, Request, Response}; use hyper::{Body, Error, Method, Request, Response};
use parking_lot::Mutex;
use slog::debug; use slog::debug;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant; use std::time::Instant;
use types::Slot; use types::{SignedBeaconBlockHash, Slot};
// Allowing more than 7 arguments. // Allowing more than 7 arguments.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -23,6 +25,7 @@ pub async fn route<T: BeaconChainTypes>(
local_log: slog::Logger, local_log: slog::Logger,
db_path: PathBuf, db_path: PathBuf,
freezer_db_path: PathBuf, freezer_db_path: PathBuf,
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
) -> Result<Response<Body>, Error> { ) -> Result<Response<Body>, Error> {
metrics::inc_counter(&metrics::REQUEST_COUNT); metrics::inc_counter(&metrics::REQUEST_COUNT);
let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME);
@ -63,6 +66,10 @@ pub async fn route<T: BeaconChainTypes>(
(&Method::GET, "/beacon/block") => beacon::get_block::<T>(req, beacon_chain), (&Method::GET, "/beacon/block") => beacon::get_block::<T>(req, beacon_chain),
(&Method::GET, "/beacon/block_root") => beacon::get_block_root::<T>(req, beacon_chain), (&Method::GET, "/beacon/block_root") => beacon::get_block_root::<T>(req, beacon_chain),
(&Method::GET, "/beacon/fork") => beacon::get_fork::<T>(req, beacon_chain), (&Method::GET, "/beacon/fork") => beacon::get_fork::<T>(req, beacon_chain),
(&Method::GET, "/beacon/fork/stream") => {
let reader = events.lock().add_rx();
beacon::stream_forks::<T>(log, reader)
}
(&Method::GET, "/beacon/genesis_time") => beacon::get_genesis_time::<T>(req, beacon_chain), (&Method::GET, "/beacon/genesis_time") => beacon::get_genesis_time::<T>(req, beacon_chain),
(&Method::GET, "/beacon/genesis_validators_root") => { (&Method::GET, "/beacon/genesis_validators_root") => {
beacon::get_genesis_validators_root::<T>(req, beacon_chain) beacon::get_genesis_validators_root::<T>(req, beacon_chain)

View File

@ -10,10 +10,10 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis};
pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir}; pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir};
pub use eth2_config::Eth2Config; pub use eth2_config::Eth2Config;
use beacon_chain::events::TeeEventHandler;
use beacon_chain::migrate::{BackgroundMigrator, HotColdDB}; use beacon_chain::migrate::{BackgroundMigrator, HotColdDB};
use beacon_chain::{ use beacon_chain::{
builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender, builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock,
slot_clock::SystemTimeSlotClock,
}; };
use clap::ArgMatches; use clap::ArgMatches;
use config::get_config; use config::get_config;
@ -30,7 +30,7 @@ pub type ProductionClient<E> = Client<
SystemTimeSlotClock, SystemTimeSlotClock,
CachingEth1Backend<E, HotColdDB<E>>, CachingEth1Backend<E, HotColdDB<E>>,
E, E,
WebSocketSender<E>, TeeEventHandler<E>,
>, >,
>; >;
@ -58,7 +58,7 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
&matches, &matches,
&context.eth2_config.spec_constants, &context.eth2_config.spec_constants,
&context.eth2_config().spec, &context.eth2_config().spec,
context.log.clone(), context.log().clone(),
)?; )?;
Self::new(context, client_config).await Self::new(context, client_config).await
} }
@ -75,7 +75,7 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
let client_config_1 = client_config.clone(); let client_config_1 = client_config.clone();
let client_genesis = client_config.genesis.clone(); let client_genesis = client_config.genesis.clone();
let store_config = client_config.store.clone(); let store_config = client_config.store.clone();
let log = context.log.clone(); let log = context.log().clone();
let db_path = client_config.create_db_path()?; let db_path = client_config.create_db_path()?;
let freezer_db_path_res = client_config.create_freezer_db_path(); let freezer_db_path_res = client_config.create_freezer_db_path();
@ -113,15 +113,17 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
builder.no_eth1_backend()? builder.no_eth1_backend()?
}; };
let builder = builder let (builder, events) = builder
.system_time_slot_clock()? .system_time_slot_clock()?
.websocket_event_handler(client_config.websocket_server.clone())? .tee_event_handler(client_config.websocket_server.clone())?;
let builder = builder
.build_beacon_chain()? .build_beacon_chain()?
.network(&mut client_config.network)? .network(&mut client_config.network)?
.notifier()?; .notifier()?;
let builder = if client_config.rest_api.enabled { let builder = if client_config.rest_api.enabled {
builder.http_server(&client_config, &http_eth2_config)? builder.http_server(&client_config, &http_eth2_config, events)?
} else { } else {
builder builder
}; };

View File

@ -16,7 +16,7 @@ rayon = "1.3.0"
[dependencies] [dependencies]
db-key = "0.0.5" db-key = "0.0.5"
leveldb = "0.8.4" leveldb = "0.8.5"
parking_lot = "0.10.2" parking_lot = "0.10.2"
itertools = "0.9.0" itertools = "0.9.0"
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
@ -29,4 +29,4 @@ serde = "1.0.110"
serde_derive = "1.0.110" serde_derive = "1.0.110"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
lru = "0.4.3" lru = "0.5.1"

View File

@ -51,4 +51,5 @@ fn http_server_genesis_state() {
api_state, db_state, api_state, db_state,
"genesis state from api should match that from the DB" "genesis state from api should match that from the DB"
); );
env.fire_signal();
} }

View File

@ -8,7 +8,8 @@ edition = "2018"
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
slot_clock = { path = "../../common/slot_clock" } slot_clock = { path = "../../common/slot_clock" }
tokio = { version = "0.2.20", features = ["full"] } tokio = { version = "0.2.21", features = ["full"] }
slog = "2.5.2" slog = "2.5.2"
parking_lot = "0.10.2" parking_lot = "0.10.2"
futures = "0.3.5" futures = "0.3.5"
environment = { path = "../../lighthouse/environment" }

View File

@ -3,23 +3,20 @@
//! This service allows task execution on the beacon node for various functionality. //! This service allows task execution on the beacon node for various functionality.
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::future;
use futures::stream::StreamExt; use futures::stream::StreamExt;
use slog::info;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::time::{interval_at, Instant}; use tokio::time::{interval_at, Instant};
/// Spawns a timer service which periodically executes tasks for the beacon chain /// Spawns a timer service which periodically executes tasks for the beacon chain
/// TODO: We might not need a `Handle` to the runtime since this function should be pub fn spawn_timer<T: BeaconChainTypes>(
/// called from the context of a runtime and we can simply spawn using task::spawn. executor: environment::TaskExecutor,
/// Check for issues without the Handle.
pub fn spawn<T: BeaconChainTypes>(
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
milliseconds_per_slot: u64, milliseconds_per_slot: u64,
) -> Result<tokio::sync::oneshot::Sender<()>, &'static str> { ) -> Result<(), &'static str> {
let (exit_signal, exit) = tokio::sync::oneshot::channel(); let log = executor.log();
let start_instant = Instant::now() let start_instant = Instant::now()
+ beacon_chain + beacon_chain
.slot_clock .slot_clock
@ -27,14 +24,15 @@ pub fn spawn<T: BeaconChainTypes>(
.ok_or_else(|| "slot_notifier unable to determine time to next slot")?; .ok_or_else(|| "slot_notifier unable to determine time to next slot")?;
// Warning: `interval_at` panics if `milliseconds_per_slot` = 0. // Warning: `interval_at` panics if `milliseconds_per_slot` = 0.
let timer_future = interval_at(start_instant, Duration::from_millis(milliseconds_per_slot)) let mut interval = interval_at(start_instant, Duration::from_millis(milliseconds_per_slot));
.for_each(move |_| { let timer_future = async move {
while interval.next().await.is_some() {
beacon_chain.per_slot_task(); beacon_chain.per_slot_task();
future::ready(()) }
}); };
let future = futures::future::select(timer_future, exit); executor.spawn(timer_future, "timer");
tokio::spawn(future); info!(log, "Timer service started");
Ok(exit_signal) Ok(())
} }

View File

@ -12,6 +12,7 @@ serde = "1.0.110"
serde_derive = "1.0.110" serde_derive = "1.0.110"
serde_json = "1.0.52" serde_json = "1.0.52"
slog = "2.5.2" slog = "2.5.2"
tokio = { version = "0.2.20", features = ["full"] } tokio = { version = "0.2.21", features = ["full"] }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
ws = "0.9.1" ws = "0.9.1"
environment = { path = "../../lighthouse/environment" }

View File

@ -1,4 +1,4 @@
use slog::{debug, error, info, warn, Logger}; use slog::{debug, error, info, warn};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::net::SocketAddr; use std::net::SocketAddr;
use types::EthSpec; use types::EthSpec;
@ -34,16 +34,10 @@ impl<T: EthSpec> WebSocketSender<T> {
} }
pub fn start_server<T: EthSpec>( pub fn start_server<T: EthSpec>(
executor: environment::TaskExecutor,
config: &Config, config: &Config,
log: &Logger, ) -> Result<(WebSocketSender<T>, SocketAddr), String> {
) -> Result< let log = executor.log();
(
WebSocketSender<T>,
tokio::sync::oneshot::Sender<()>,
SocketAddr,
),
String,
> {
let server_string = format!("{}:{}", config.listen_address, config.port); let server_string = format!("{}:{}", config.listen_address, config.port);
// Create a server that simply ignores any incoming messages. // Create a server that simply ignores any incoming messages.
@ -67,31 +61,26 @@ pub fn start_server<T: EthSpec>(
let broadcaster = server.broadcaster(); let broadcaster = server.broadcaster();
// Produce a signal/channel that can gracefully shutdown the websocket server. // Produce a signal/channel that can gracefully shutdown the websocket server.
let exit_channel = { let exit = executor.exit();
let (exit_channel, exit) = tokio::sync::oneshot::channel(); let log_inner = log.clone();
let broadcaster_inner = server.broadcaster();
let log_inner = log.clone(); let exit_future = async move {
let broadcaster_inner = server.broadcaster(); let _ = exit.await;
let exit_future = async move { if let Err(e) = broadcaster_inner.shutdown() {
let _ = exit.await; warn!(
if let Err(e) = broadcaster_inner.shutdown() { log_inner,
warn!( "Websocket server errored on shutdown";
log_inner, "error" => format!("{:?}", e)
"Websocket server errored on shutdown"; );
"error" => format!("{:?}", e) } else {
); info!(log_inner, "Websocket server shutdown");
} else { }
info!(log_inner, "Websocket server shutdown");
}
};
// Place a future on the handle that will shutdown the websocket server when the
// application exits.
tokio::spawn(exit_future);
exit_channel
}; };
// Place a future on the handle that will shutdown the websocket server when the
// application exits.
executor.runtime_handle().spawn(exit_future);
let log_inner = log.clone(); let log_inner = log.clone();
let _ = std::thread::spawn(move || match server.run() { let _ = std::thread::spawn(move || match server.run() {
@ -122,7 +111,6 @@ pub fn start_server<T: EthSpec>(
sender: Some(broadcaster), sender: Some(broadcaster),
_phantom: PhantomData, _phantom: PhantomData,
}, },
exit_channel,
actual_listen_addr, actual_listen_addr,
)) ))
} }

View File

@ -16,5 +16,5 @@ tempdir = "0.3.7"
serde = "1.0.110" serde = "1.0.110"
serde_yaml = "0.8.11" serde_yaml = "0.8.11"
types = { path = "../../consensus/types"} types = { path = "../../consensus/types"}
eth2-libp2p = { path = "../../beacon_node/eth2-libp2p"} enr = { version = "0.1.0", features = ["libsecp256k1", "ed25519"] }
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"

View File

@ -7,7 +7,7 @@
//! //!
//! https://github.com/sigp/lighthouse/pull/605 //! https://github.com/sigp/lighthouse/pull/605
use eth2_libp2p::Enr; use enr::{CombinedKey, Enr};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use std::fs::{create_dir_all, File}; use std::fs::{create_dir_all, File};
use std::io::{Read, Write}; use std::io::{Read, Write};
@ -39,7 +39,7 @@ pub const HARDCODED_BOOT_ENR: &[u8] = include_bytes!("../witti-v0-11-3/boot_enr.
pub struct Eth2TestnetConfig<E: EthSpec> { pub struct Eth2TestnetConfig<E: EthSpec> {
pub deposit_contract_address: String, pub deposit_contract_address: String,
pub deposit_contract_deploy_block: u64, pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr>>, pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state: Option<BeaconState<E>>, pub genesis_state: Option<BeaconState<E>>,
pub yaml_config: Option<YamlConfig>, pub yaml_config: Option<YamlConfig>,
} }
@ -246,7 +246,7 @@ mod tests {
} }
fn do_test<E: EthSpec>( fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr>>, boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>, genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>, yaml_config: Option<YamlConfig>,
) { ) {

View File

@ -6,7 +6,7 @@ edition = "2018"
[dependencies] [dependencies]
futures = "0.3.5" futures = "0.3.5"
tokio = { version = "0.2.20", features = ["time"] } tokio = { version = "0.2.21", features = ["time"] }
[dev-dependencies] [dev-dependencies]
tokio = { version = "0.2.20", features = ["time", "rt-threaded", "macros"] } tokio = { version = "0.2.21", features = ["time", "rt-threaded", "macros"] }

View File

@ -56,7 +56,9 @@
use prometheus::{HistogramOpts, HistogramTimer, Opts}; use prometheus::{HistogramOpts, HistogramTimer, Opts};
pub use prometheus::{Encoder, Gauge, Histogram, IntCounter, IntGauge, Result, TextEncoder}; pub use prometheus::{
Encoder, Gauge, Histogram, HistogramVec, IntCounter, IntGauge, IntGaugeVec, Result, TextEncoder,
};
/// Collect all the metrics for reporting. /// Collect all the metrics for reporting.
pub fn gather() -> Vec<prometheus::proto::MetricFamily> { pub fn gather() -> Vec<prometheus::proto::MetricFamily> {
@ -99,6 +101,48 @@ pub fn try_create_histogram(name: &str, help: &str) -> Result<Histogram> {
Ok(histogram) Ok(histogram)
} }
/// Attempts to crate a `HistogramVec`, returning `Err` if the registry does not accept the counter
/// (potentially due to naming conflict).
pub fn try_create_histogram_vec(
name: &str,
help: &str,
label_names: &[&str],
) -> Result<HistogramVec> {
let opts = HistogramOpts::new(name, help);
let histogram_vec = HistogramVec::new(opts, label_names)?;
prometheus::register(Box::new(histogram_vec.clone()))?;
Ok(histogram_vec)
}
/// Attempts to crate a `IntGaugeVec`, returning `Err` if the registry does not accept the gauge
/// (potentially due to naming conflict).
pub fn try_create_int_gauge_vec(
name: &str,
help: &str,
label_names: &[&str],
) -> Result<IntGaugeVec> {
let opts = Opts::new(name, help);
let counter_vec = IntGaugeVec::new(opts, label_names)?;
prometheus::register(Box::new(counter_vec.clone()))?;
Ok(counter_vec)
}
pub fn get_int_gauge(int_gauge_vec: &Result<IntGaugeVec>, name: &[&str]) -> Option<IntGauge> {
if let Ok(int_gauge_vec) = int_gauge_vec {
Some(int_gauge_vec.get_metric_with_label_values(name).ok()?)
} else {
None
}
}
pub fn get_histogram(histogram_vec: &Result<HistogramVec>, name: &[&str]) -> Option<Histogram> {
if let Ok(histogram_vec) = histogram_vec {
Some(histogram_vec.get_metric_with_label_values(name).ok()?)
} else {
None
}
}
/// Starts a timer for the given `Histogram`, stopping when it gets dropped or given to `stop_timer(..)`. /// Starts a timer for the given `Histogram`, stopping when it gets dropped or given to `stop_timer(..)`.
pub fn start_timer(histogram: &Result<Histogram>) -> Option<HistogramTimer> { pub fn start_timer(histogram: &Result<Histogram>) -> Option<HistogramTimer> {
if let Ok(histogram) = histogram { if let Ok(histogram) = histogram {
@ -133,6 +177,18 @@ pub fn set_gauge(gauge: &Result<IntGauge>, value: i64) {
} }
} }
pub fn inc_gauge(gauge: &Result<IntGauge>) {
if let Ok(gauge) = gauge {
gauge.inc();
}
}
pub fn dec_gauge(gauge: &Result<IntGauge>) {
if let Ok(gauge) = gauge {
gauge.dec();
}
}
pub fn maybe_set_gauge(gauge: &Result<IntGauge>, value_opt: Option<i64>) { pub fn maybe_set_gauge(gauge: &Result<IntGauge>, value_opt: Option<i64>) {
if let Some(value) = value_opt { if let Some(value) = value_opt {
set_gauge(gauge, value) set_gauge(gauge, value)

View File

@ -5,6 +5,7 @@ use parking_lot::{RwLock, RwLockReadGuard};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use std::collections::HashMap; use std::collections::HashMap;
use std::ptr;
use types::{Epoch, Hash256, Slot}; use types::{Epoch, Hash256, Slot};
pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256;
@ -51,6 +52,9 @@ pub struct ProtoArrayForkChoice {
impl PartialEq for ProtoArrayForkChoice { impl PartialEq for ProtoArrayForkChoice {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
if ptr::eq(self, other) {
return true;
}
*self.proto_array.read() == *other.proto_array.read() *self.proto_array.read() == *other.proto_array.read()
&& *self.votes.read() == *other.votes.read() && *self.votes.read() == *other.votes.read()
&& *self.balances.read() == *other.balances.read() && *self.balances.read() == *other.balances.read()

View File

@ -27,7 +27,7 @@ dirs = "2.0.2"
genesis = { path = "../beacon_node/genesis" } genesis = { path = "../beacon_node/genesis" }
deposit_contract = { path = "../common/deposit_contract" } deposit_contract = { path = "../common/deposit_contract" }
tree_hash = "0.1.0" tree_hash = "0.1.0"
tokio = { version = "0.2.20", features = ["full"] } tokio = { version = "0.2.21", features = ["full"] }
clap_utils = { path = "../common/clap_utils" } clap_utils = { path = "../common/clap_utils" }
eth2-libp2p = { path = "../beacon_node/eth2-libp2p" } eth2-libp2p = { path = "../beacon_node/eth2-libp2p" }
validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] }

View File

@ -46,7 +46,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
config.lowest_cached_block_number = eth2_testnet_config.deposit_contract_deploy_block; config.lowest_cached_block_number = eth2_testnet_config.deposit_contract_deploy_block;
config.follow_distance = spec.eth1_follow_distance / 2; config.follow_distance = spec.eth1_follow_distance / 2;
let genesis_service = Eth1GenesisService::new(config, env.core_context().log.clone()); let genesis_service = Eth1GenesisService::new(config, env.core_context().log().clone());
env.runtime().block_on(async { env.runtime().block_on(async {
let _ = genesis_service let _ = genesis_service

View File

@ -9,7 +9,7 @@ write_ssz_files = ["beacon_node/write_ssz_files"] # Writes debugging .ssz files
[dependencies] [dependencies]
beacon_node = { "path" = "../beacon_node" } beacon_node = { "path" = "../beacon_node" }
tokio = "0.2.20" tokio = "0.2.21"
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.0" sloggers = "1.0.0"
types = { "path" = "../consensus/types" } types = { "path" = "../consensus/types" }

View File

@ -6,7 +6,7 @@ edition = "2018"
[dependencies] [dependencies]
clap = "2.33.0" clap = "2.33.0"
tokio = "0.2.20" tokio = "0.2.21"
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.0" sloggers = "1.0.0"
types = { "path" = "../../consensus/types" } types = { "path" = "../../consensus/types" }
@ -20,6 +20,9 @@ ctrlc = { version = "3.1.4", features = ["termination"] }
futures = "0.3.5" futures = "0.3.5"
parking_lot = "0.10.2" parking_lot = "0.10.2"
slog-json = "2.3.0" slog-json = "2.3.0"
exit-future = "0.2.0"
lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
[dev-dependencies] [dev-dependencies]
beacon_node = { path = "../../beacon_node" } beacon_node = { path = "../../beacon_node" }

View File

@ -0,0 +1,128 @@
use crate::metrics;
use futures::prelude::*;
use slog::{debug, trace};
use tokio::runtime::Handle;
/// A wrapper over a runtime handle which can spawn async and blocking tasks.
#[derive(Clone)]
pub struct TaskExecutor {
/// The handle to the runtime on which tasks are spawned
pub(crate) handle: Handle,
/// The receiver exit future which on receiving shuts down the task
pub(crate) exit: exit_future::Exit,
pub(crate) log: slog::Logger,
}
impl TaskExecutor {
/// Create a new task executor.
///
/// Note: this function is mainly useful in tests. A `TaskExecutor` should be normally obtained from
/// a [`RuntimeContext`](struct.RuntimeContext.html)
pub fn new(handle: Handle, exit: exit_future::Exit, log: slog::Logger) -> Self {
Self { handle, exit, log }
}
/// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit`. The task is canceled
/// when the corresponding exit_future `Signal` is fired/dropped.
///
/// This function generates prometheus metrics on number of tasks and task duration.
pub fn spawn(&self, task: impl Future<Output = ()> + Send + 'static, name: &'static str) {
let exit = self.exit.clone();
let log = self.log.clone();
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
// Task is shutdown before it completes if `exit` receives
let int_gauge_1 = int_gauge.clone();
let future = future::select(Box::pin(task), exit).then(move |either| {
match either {
future::Either::Left(_) => trace!(log, "Async task completed"; "task" => name),
future::Either::Right(_) => {
debug!(log, "Async task shutdown, exit received"; "task" => name)
}
}
int_gauge_1.dec();
futures::future::ready(())
});
int_gauge.inc();
self.handle.spawn(future);
}
}
/// Spawn a future on the tokio runtime. This function does not wrap the task in an `exit_future::Exit`
/// like [spawn](#method.spawn).
/// The caller of this function is responsible for wrapping up the task with an `exit_future::Exit` to
/// ensure that the task gets canceled appropriately.
/// This function generates prometheus metrics on number of tasks and task duration.
///
/// This is useful in cases where the future to be spawned needs to do additional cleanup work when
/// the task is completed/canceled (e.g. writing local variables to disk) or the task is created from
/// some framework which does its own cleanup (e.g. a hyper server).
pub fn spawn_without_exit(
&self,
task: impl Future<Output = ()> + Send + 'static,
name: &'static str,
) {
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
let int_gauge_1 = int_gauge.clone();
let future = task.then(move |_| {
int_gauge_1.dec();
futures::future::ready(())
});
int_gauge.inc();
self.handle.spawn(future);
}
}
/// Spawn a blocking task on a dedicated tokio thread pool wrapped in an exit future.
/// This function generates prometheus metrics on number of tasks and task duration.
pub fn spawn_blocking<F>(&self, task: F, name: &'static str)
where
F: FnOnce() -> () + Send + 'static,
{
let exit = self.exit.clone();
let log = self.log.clone();
if let Some(metric) = metrics::get_histogram(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]) {
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::BLOCKING_TASKS_COUNT, &[name])
{
let int_gauge_1 = int_gauge.clone();
let timer = metric.start_timer();
let join_handle = self.handle.spawn_blocking(task);
let future = future::select(join_handle, exit).then(move |either| {
match either {
future::Either::Left(_) => {
trace!(log, "Blocking task completed"; "task" => name)
}
future::Either::Right(_) => {
debug!(log, "Blocking task shutdown, exit received"; "task" => name)
}
}
timer.observe_duration();
int_gauge_1.dec();
futures::future::ready(())
});
int_gauge.inc();
self.handle.spawn(future);
}
}
}
/// Returns the underlying runtime handle.
pub fn runtime_handle(&self) -> Handle {
self.handle.clone()
}
/// Returns a copy of the `exit_future::Exit`.
pub fn exit(&self) -> exit_future::Exit {
self.exit.clone()
}
/// Returns a reference to the logger.
pub fn log(&self) -> &slog::Logger {
&self.log
}
}

View File

@ -10,6 +10,8 @@
use eth2_config::Eth2Config; use eth2_config::Eth2Config;
use eth2_testnet_config::Eth2TestnetConfig; use eth2_testnet_config::Eth2TestnetConfig;
use futures::channel::oneshot; use futures::channel::oneshot;
pub use executor::TaskExecutor;
use slog::{info, o, Drain, Level, Logger}; use slog::{info, o, Drain, Level, Logger};
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use std::cell::RefCell; use std::cell::RefCell;
@ -17,8 +19,10 @@ use std::ffi::OsStr;
use std::fs::{rename as FsRename, OpenOptions}; use std::fs::{rename as FsRename, OpenOptions};
use std::path::PathBuf; use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use tokio::runtime::{Builder as RuntimeBuilder, Handle, Runtime}; use tokio::runtime::{Builder as RuntimeBuilder, Runtime};
use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec};
mod executor;
mod metrics;
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
@ -184,10 +188,13 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
/// Consumes the builder, returning an `Environment`. /// Consumes the builder, returning an `Environment`.
pub fn build(self) -> Result<Environment<E>, String> { pub fn build(self) -> Result<Environment<E>, String> {
let (signal, exit) = exit_future::signal();
Ok(Environment { Ok(Environment {
runtime: self runtime: self
.runtime .runtime
.ok_or_else(|| "Cannot build environment without runtime".to_string())?, .ok_or_else(|| "Cannot build environment without runtime".to_string())?,
signal: Some(signal),
exit,
log: self log: self
.log .log
.ok_or_else(|| "Cannot build environment without log".to_string())?, .ok_or_else(|| "Cannot build environment without log".to_string())?,
@ -204,8 +211,7 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
/// `Runtime`, instead it only has access to a `Runtime`. /// `Runtime`, instead it only has access to a `Runtime`.
#[derive(Clone)] #[derive(Clone)]
pub struct RuntimeContext<E: EthSpec> { pub struct RuntimeContext<E: EthSpec> {
pub runtime_handle: Handle, pub executor: TaskExecutor,
pub log: Logger,
pub eth_spec_instance: E, pub eth_spec_instance: E,
pub eth2_config: Eth2Config, pub eth2_config: Eth2Config,
} }
@ -216,8 +222,11 @@ impl<E: EthSpec> RuntimeContext<E> {
/// The generated service will have the `service_name` in all it's logs. /// The generated service will have the `service_name` in all it's logs.
pub fn service_context(&self, service_name: String) -> Self { pub fn service_context(&self, service_name: String) -> Self {
Self { Self {
runtime_handle: self.runtime_handle.clone(), executor: TaskExecutor {
log: self.log.new(o!("service" => service_name)), handle: self.executor.handle.clone(),
exit: self.executor.exit.clone(),
log: self.executor.log.new(o!("service" => service_name)),
},
eth_spec_instance: self.eth_spec_instance.clone(), eth_spec_instance: self.eth_spec_instance.clone(),
eth2_config: self.eth2_config.clone(), eth2_config: self.eth2_config.clone(),
} }
@ -227,12 +236,19 @@ impl<E: EthSpec> RuntimeContext<E> {
pub fn eth2_config(&self) -> &Eth2Config { pub fn eth2_config(&self) -> &Eth2Config {
&self.eth2_config &self.eth2_config
} }
/// Returns a reference to the logger for this service.
pub fn log(&self) -> &slog::Logger {
self.executor.log()
}
} }
/// An environment where Lighthouse services can run. Used to start a production beacon node or /// An environment where Lighthouse services can run. Used to start a production beacon node or
/// validator client, or to run tests that involve logging and async task execution. /// validator client, or to run tests that involve logging and async task execution.
pub struct Environment<E: EthSpec> { pub struct Environment<E: EthSpec> {
runtime: Runtime, runtime: Runtime,
signal: Option<exit_future::Signal>,
exit: exit_future::Exit,
log: Logger, log: Logger,
eth_spec_instance: E, eth_spec_instance: E,
pub eth2_config: Eth2Config, pub eth2_config: Eth2Config,
@ -251,8 +267,11 @@ impl<E: EthSpec> Environment<E> {
/// Returns a `Context` where no "service" has been added to the logger output. /// Returns a `Context` where no "service" has been added to the logger output.
pub fn core_context(&mut self) -> RuntimeContext<E> { pub fn core_context(&mut self) -> RuntimeContext<E> {
RuntimeContext { RuntimeContext {
runtime_handle: self.runtime.handle().clone(), executor: TaskExecutor {
log: self.log.clone(), exit: self.exit.clone(),
handle: self.runtime().handle().clone(),
log: self.log.clone(),
},
eth_spec_instance: self.eth_spec_instance.clone(), eth_spec_instance: self.eth_spec_instance.clone(),
eth2_config: self.eth2_config.clone(), eth2_config: self.eth2_config.clone(),
} }
@ -261,8 +280,11 @@ impl<E: EthSpec> Environment<E> {
/// Returns a `Context` where the `service_name` is added to the logger output. /// Returns a `Context` where the `service_name` is added to the logger output.
pub fn service_context(&mut self, service_name: String) -> RuntimeContext<E> { pub fn service_context(&mut self, service_name: String) -> RuntimeContext<E> {
RuntimeContext { RuntimeContext {
runtime_handle: self.runtime.handle().clone(), executor: TaskExecutor {
log: self.log.new(o!("service" => service_name)), exit: self.exit.clone(),
handle: self.runtime().handle().clone(),
log: self.log.new(o!("service" => service_name.clone())),
},
eth_spec_instance: self.eth_spec_instance.clone(), eth_spec_instance: self.eth_spec_instance.clone(),
eth2_config: self.eth2_config.clone(), eth2_config: self.eth2_config.clone(),
} }
@ -291,6 +313,13 @@ impl<E: EthSpec> Environment<E> {
.shutdown_timeout(std::time::Duration::from_secs(2)) .shutdown_timeout(std::time::Duration::from_secs(2))
} }
/// Fire exit signal which shuts down all spawned services
pub fn fire_signal(&mut self) {
if let Some(signal) = self.signal.take() {
let _ = signal.fire();
}
}
/// Sets the logger (and all child loggers) to log to a file. /// Sets the logger (and all child loggers) to log to a file.
pub fn log_to_json_file( pub fn log_to_json_file(
&mut self, &mut self,

View File

@ -0,0 +1,21 @@
/// Handles async task metrics
use lazy_static::lazy_static;
pub use lighthouse_metrics::*;
lazy_static! {
pub static ref ASYNC_TASKS_COUNT: Result<IntGaugeVec> = try_create_int_gauge_vec(
"async_tasks_count",
"Total number of async tasks spawned using spawn",
&["async_task_count"]
);
pub static ref BLOCKING_TASKS_COUNT: Result<IntGaugeVec> = try_create_int_gauge_vec(
"blocking_tasks_count",
"Total number of async tasks spawned using spawn_blocking",
&["blocking_task_count"]
);
pub static ref BLOCKING_TASKS_HISTOGRAM: Result<HistogramVec> = try_create_histogram_vec(
"blocking_tasks_histogram",
"Time taken by blocking tasks",
&["blocking_task_hist"]
);
}

View File

@ -142,7 +142,7 @@ fn run<E: EthSpec>(
.optional_eth2_testnet_config(optional_testnet_config)? .optional_eth2_testnet_config(optional_testnet_config)?
.build()?; .build()?;
let log = environment.core_context().log; let log = environment.core_context().log().clone();
if let Some(log_path) = matches.value_of("logfile") { if let Some(log_path) = matches.value_of("logfile") {
let path = log_path let path = log_path
@ -217,11 +217,15 @@ fn run<E: EthSpec>(
)) ))
.map_err(|e| format!("Failed to init validator client: {}", e))?; .map_err(|e| format!("Failed to init validator client: {}", e))?;
environment.core_context().runtime_handle.enter(|| { environment
validator .core_context()
.start_service() .executor
.map_err(|e| format!("Failed to start validator client service: {}", e)) .runtime_handle()
})?; .enter(|| {
validator
.start_service()
.map_err(|e| format!("Failed to start validator client service: {}", e))
})?;
Some(validator) Some(validator)
} else { } else {
@ -235,9 +239,9 @@ fn run<E: EthSpec>(
// Block this thread until Crtl+C is pressed. // Block this thread until Crtl+C is pressed.
environment.block_until_ctrl_c()?; environment.block_until_ctrl_c()?;
info!(log, "Shutting down.."); info!(log, "Shutting down..");
environment.fire_signal();
drop(beacon_node); drop(beacon_node);
drop(validator_client); drop(validator_client);

View File

@ -5,8 +5,8 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
tokio = { version = "0.2.21", features = ["time"] }
web3 = "0.11.0" web3 = "0.11.0"
tokio = { version = "0.2.20", features = ["time"] }
futures = { version = "0.3.5", features = ["compat"] } futures = { version = "0.3.5", features = ["compat"] }
types = { path = "../../consensus/types"} types = { path = "../../consensus/types"}
serde_json = "1.0.52" serde_json = "1.0.52"

View File

@ -12,7 +12,7 @@ types = { path = "../../consensus/types" }
validator_client = { path = "../../validator_client" } validator_client = { path = "../../validator_client" }
parking_lot = "0.10.2" parking_lot = "0.10.2"
futures = "0.3.5" futures = "0.3.5"
tokio = "0.2.20" tokio = "0.2.21"
eth1_test_rig = { path = "../eth1_test_rig" } eth1_test_rig = { path = "../eth1_test_rig" }
env_logger = "0.7.1" env_logger = "0.7.1"
clap = "2.33.0" clap = "2.33.0"

View File

@ -9,7 +9,7 @@ name = "validator_client"
path = "src/lib.rs" path = "src/lib.rs"
[dev-dependencies] [dev-dependencies]
tokio = {version = "0.2.20", features = ["time", "rt-threaded", "macros"]} tokio = { version = "0.2.21", features = ["time", "rt-threaded", "macros"] }
[dependencies] [dependencies]
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
@ -27,7 +27,7 @@ serde_json = "1.0.52"
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
slog-async = "2.5.0" slog-async = "2.5.0"
slog-term = "2.5.0" slog-term = "2.5.0"
tokio = {version = "0.2.20", features = ["time"]} tokio = { version = "0.2.21", features = ["time"] }
error-chain = "0.12.2" error-chain = "0.12.2"
bincode = "1.2.1" bincode = "1.2.1"
futures = { version = "0.3.5", features = ["compat"] } futures = { version = "0.3.5", features = ["compat"] }

View File

@ -3,8 +3,7 @@ use crate::{
validator_store::ValidatorStore, validator_store::ValidatorStore,
}; };
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use futures::StreamExt;
use futures::{FutureExt, StreamExt};
use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode};
use slog::{crit, debug, info, trace}; use slog::{crit, debug, info, trace};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -118,8 +117,8 @@ impl<T, E: EthSpec> Deref for AttestationService<T, E> {
impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> { impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
/// Starts the service which periodically produces attestations. /// Starts the service which periodically produces attestations.
pub fn start_update_service(self, spec: &ChainSpec) -> Result<Signal, String> { pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> {
let log = self.context.log.clone(); let log = self.context.log().clone();
let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); let slot_duration = Duration::from_millis(spec.milliseconds_per_slot);
let duration_to_next_slot = self let duration_to_next_slot = self
@ -141,13 +140,11 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
) )
}; };
let (exit_signal, exit_fut) = exit_future::signal(); let executor = self.context.executor.clone();
let runtime_handle = self.context.runtime_handle.clone();
let interval_fut = async move { let interval_fut = async move {
while interval.next().await.is_some() { while interval.next().await.is_some() {
let log = &self.context.log; let log = self.context.log();
if let Err(e) = self.spawn_attestation_tasks(slot_duration) { if let Err(e) = self.spawn_attestation_tasks(slot_duration) {
crit!( crit!(
@ -164,13 +161,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
} }
}; };
let future = futures::future::select( executor.spawn(interval_fut, "attestation_service");
Box::pin(interval_fut), Ok(())
exit_fut.map(move |_| info!(log, "Shutdown complete")),
);
runtime_handle.spawn(future);
Ok(exit_signal)
} }
/// For each each required attestation, spawn a new task that downloads, signs and uploads the /// For each each required attestation, spawn a new task that downloads, signs and uploads the
@ -214,7 +206,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
.into_iter() .into_iter()
.for_each(|(committee_index, validator_duties)| { .for_each(|(committee_index, validator_duties)| {
// Spawn a separate task for each attestation. // Spawn a separate task for each attestation.
self.inner.context.runtime_handle.spawn( self.inner.context.executor.runtime_handle().spawn(
self.clone().publish_attestations_and_aggregates( self.clone().publish_attestations_and_aggregates(
slot, slot,
committee_index, committee_index,
@ -243,7 +235,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
validator_duties: Vec<DutyAndProof>, validator_duties: Vec<DutyAndProof>,
aggregate_production_instant: Instant, aggregate_production_instant: Instant,
) -> Result<(), ()> { ) -> Result<(), ()> {
let log = &self.context.log; let log = self.context.log();
// There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have
// any validators for the given `slot` and `committee_index`. // any validators for the given `slot` and `committee_index`.
@ -314,7 +306,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
committee_index: CommitteeIndex, committee_index: CommitteeIndex,
validator_duties: &[DutyAndProof], validator_duties: &[DutyAndProof],
) -> Result<Option<Attestation<E>>, String> { ) -> Result<Option<Attestation<E>>, String> {
let log = &self.context.log; let log = self.context.log();
if validator_duties.is_empty() { if validator_duties.is_empty() {
return Ok(None); return Ok(None);
@ -448,7 +440,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
attestation: Attestation<E>, attestation: Attestation<E>,
validator_duties: &[DutyAndProof], validator_duties: &[DutyAndProof],
) -> Result<(), String> { ) -> Result<(), String> {
let log = &self.context.log; let log = self.context.log();
let aggregated_attestation = self let aggregated_attestation = self
.beacon_node .beacon_node
@ -548,6 +540,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use futures::future::FutureExt;
use parking_lot::RwLock; use parking_lot::RwLock;
/// This test is to ensure that a `tokio_timer::Delay` with an instant in the past will still /// This test is to ensure that a `tokio_timer::Delay` with an instant in the past will still

View File

@ -1,7 +1,6 @@
use crate::{duties_service::DutiesService, validator_store::ValidatorStore}; use crate::{duties_service::DutiesService, validator_store::ValidatorStore};
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use futures::{StreamExt, TryFutureExt};
use futures::{FutureExt, StreamExt, TryFutureExt};
use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode};
use slog::{crit, error, info, trace}; use slog::{crit, error, info, trace};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -113,8 +112,8 @@ impl<T, E: EthSpec> Deref for BlockService<T, E> {
impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> { impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
/// Starts the service that periodically attempts to produce blocks. /// Starts the service that periodically attempts to produce blocks.
pub fn start_update_service(self, spec: &ChainSpec) -> Result<Signal, String> { pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> {
let log = self.context.log.clone(); let log = self.context.log().clone();
let duration_to_next_slot = self let duration_to_next_slot = self
.slot_clock .slot_clock
@ -136,7 +135,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
) )
}; };
let runtime_handle = self.inner.context.runtime_handle.clone(); let executor = self.inner.context.executor.clone();
let interval_fut = async move { let interval_fut = async move {
while interval.next().await.is_some() { while interval.next().await.is_some() {
@ -144,20 +143,14 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
} }
}; };
let (exit_signal, exit_fut) = exit_future::signal(); executor.spawn(interval_fut, "block_service");
let future = futures::future::select( Ok(())
Box::pin(interval_fut),
exit_fut.map(move |_| info!(log, "Shutdown complete")),
);
runtime_handle.spawn(future);
Ok(exit_signal)
} }
/// Attempt to produce a block for any block producers in the `ValidatorStore`. /// Attempt to produce a block for any block producers in the `ValidatorStore`.
async fn do_update(&self) -> Result<(), ()> { async fn do_update(&self) -> Result<(), ()> {
let log = &self.context.log; let log = self.context.log();
let slot = self.slot_clock.now().ok_or_else(move || { let slot = self.slot_clock.now().ok_or_else(move || {
crit!(log, "Duties manager failed to read slot clock"); crit!(log, "Duties manager failed to read slot clock");
@ -190,7 +183,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
iter.for_each(|validator_pubkey| { iter.for_each(|validator_pubkey| {
let service = self.clone(); let service = self.clone();
let log = log.clone(); let log = log.clone();
self.inner.context.runtime_handle.spawn( self.inner.context.executor.runtime_handle().spawn(
service service
.publish_block(slot, validator_pubkey) .publish_block(slot, validator_pubkey)
.map_err(move |e| { .map_err(move |e| {
@ -208,7 +201,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
/// Produce a block at the given slot for validator_pubkey /// Produce a block at the given slot for validator_pubkey
async fn publish_block(self, slot: Slot, validator_pubkey: PublicKey) -> Result<(), String> { async fn publish_block(self, slot: Slot, validator_pubkey: PublicKey) -> Result<(), String> {
let log = &self.context.log; let log = self.context.log();
let current_slot = self let current_slot = self
.slot_clock .slot_clock

View File

@ -1,11 +1,10 @@
use crate::{is_synced::is_synced, validator_store::ValidatorStore}; use crate::{is_synced::is_synced, validator_store::ValidatorStore};
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use futures::StreamExt;
use futures::{FutureExt, StreamExt};
use parking_lot::RwLock; use parking_lot::RwLock;
use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode};
use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription}; use rest_types::{ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription};
use slog::{debug, error, info, trace, warn}; use slog::{debug, error, trace, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::collections::HashMap; use std::collections::HashMap;
use std::convert::TryInto; use std::convert::TryInto;
@ -439,9 +438,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
} }
/// Start the service that periodically polls the beacon node for validator duties. /// Start the service that periodically polls the beacon node for validator duties.
pub fn start_update_service(self, spec: &ChainSpec) -> Result<Signal, String> { pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> {
let log = self.context.log.clone();
let duration_to_next_slot = self let duration_to_next_slot = self
.slot_clock .slot_clock
.duration_to_next_slot() .duration_to_next_slot()
@ -456,15 +453,14 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
) )
}; };
let (exit_signal, exit_fut) = exit_future::signal();
// Run an immediate update before starting the updater service. // Run an immediate update before starting the updater service.
self.inner self.inner
.context .context
.runtime_handle .executor
.runtime_handle()
.spawn(self.clone().do_update()); .spawn(self.clone().do_update());
let runtime_handle = self.inner.context.runtime_handle.clone(); let executor = self.inner.context.executor.clone();
let interval_fut = async move { let interval_fut = async move {
while interval.next().await.is_some() { while interval.next().await.is_some() {
@ -472,18 +468,14 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
} }
}; };
let future = futures::future::select( executor.spawn(interval_fut, "duties_service");
Box::pin(interval_fut),
exit_fut.map(move |_| info!(log, "Shutdown complete")),
);
runtime_handle.spawn(future);
Ok(exit_signal) Ok(())
} }
/// Attempt to download the duties of all managed validators for this epoch and the next. /// Attempt to download the duties of all managed validators for this epoch and the next.
async fn do_update(self) -> Result<(), ()> { async fn do_update(self) -> Result<(), ()> {
let log = &self.context.log; let log = self.context.log();
if !is_synced(&self.beacon_node, &self.slot_clock, None).await if !is_synced(&self.beacon_node, &self.slot_clock, None).await
&& !self.allow_unsynced_beacon_node && !self.allow_unsynced_beacon_node
@ -550,7 +542,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
.await .await
.map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))?; .map_err(move |e| format!("Failed to get duties for epoch {}: {:?}", epoch, e))?;
let log = self.context.log.clone(); let log = self.context.log().clone();
let mut new_validator = 0; let mut new_validator = 0;
let mut new_epoch = 0; let mut new_epoch = 0;
@ -652,7 +644,7 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
) )
} }
let log = self.context.log.clone(); let log = self.context.log().clone();
let count = validator_subscriptions.len(); let count = validator_subscriptions.len();
if count == 0 { if count == 0 {

View File

@ -1,9 +1,8 @@
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal; use futures::StreamExt;
use futures::{FutureExt, StreamExt};
use parking_lot::RwLock; use parking_lot::RwLock;
use remote_beacon_node::RemoteBeaconNode; use remote_beacon_node::RemoteBeaconNode;
use slog::{debug, info, trace}; use slog::{debug, trace};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::ops::Deref; use std::ops::Deref;
use std::sync::Arc; use std::sync::Arc;
@ -100,9 +99,7 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
} }
/// Starts the service that periodically polls for the `Fork`. /// Starts the service that periodically polls for the `Fork`.
pub fn start_update_service(self, spec: &ChainSpec) -> Result<Signal, String> { pub fn start_update_service(self, spec: &ChainSpec) -> Result<(), String> {
let log = self.context.log.clone();
let duration_to_next_epoch = self let duration_to_next_epoch = self
.slot_clock .slot_clock
.duration_to_next_epoch(E::slots_per_epoch()) .duration_to_next_epoch(E::slots_per_epoch())
@ -117,15 +114,14 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
) )
}; };
let (exit_signal, exit_fut) = exit_future::signal();
// Run an immediate update before starting the updater service. // Run an immediate update before starting the updater service.
self.inner self.inner
.context .context
.runtime_handle .executor
.runtime_handle()
.spawn(self.clone().do_update()); .spawn(self.clone().do_update());
let runtime_handle = self.inner.context.runtime_handle.clone(); let executor = self.inner.context.executor.clone();
let interval_fut = async move { let interval_fut = async move {
while interval.next().await.is_some() { while interval.next().await.is_some() {
@ -133,18 +129,14 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
} }
}; };
let future = futures::future::select( executor.spawn(interval_fut, "fork_service");
Box::pin(interval_fut),
exit_fut.map(move |_| info!(log, "Shutdown complete")),
);
runtime_handle.spawn(future);
Ok(exit_signal) Ok(())
} }
/// Attempts to download the `Fork` from the server. /// Attempts to download the `Fork` from the server.
async fn do_update(self) -> Result<(), ()> { async fn do_update(self) -> Result<(), ()> {
let log = &self.context.log; let log = self.context.log();
let fork = self let fork = self
.inner .inner

View File

@ -17,7 +17,6 @@ use clap::ArgMatches;
use config::SLASHING_PROTECTION_FILENAME; use config::SLASHING_PROTECTION_FILENAME;
use duties_service::{DutiesService, DutiesServiceBuilder}; use duties_service::{DutiesService, DutiesServiceBuilder};
use environment::RuntimeContext; use environment::RuntimeContext;
use exit_future::Signal;
use fork_service::{ForkService, ForkServiceBuilder}; use fork_service::{ForkService, ForkServiceBuilder};
use notifier::spawn_notifier; use notifier::spawn_notifier;
use remote_beacon_node::RemoteBeaconNode; use remote_beacon_node::RemoteBeaconNode;
@ -41,7 +40,6 @@ pub struct ProductionValidatorClient<T: EthSpec> {
fork_service: ForkService<SystemTimeSlotClock, T>, fork_service: ForkService<SystemTimeSlotClock, T>,
block_service: BlockService<SystemTimeSlotClock, T>, block_service: BlockService<SystemTimeSlotClock, T>,
attestation_service: AttestationService<SystemTimeSlotClock, T>, attestation_service: AttestationService<SystemTimeSlotClock, T>,
exit_signals: Vec<Signal>,
config: Config, config: Config,
} }
@ -60,10 +58,10 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
/// Instantiates the validator client, _without_ starting the timers to trigger block /// Instantiates the validator client, _without_ starting the timers to trigger block
/// and attestation production. /// and attestation production.
pub async fn new(mut context: RuntimeContext<T>, config: Config) -> Result<Self, String> { pub async fn new(mut context: RuntimeContext<T>, config: Config) -> Result<Self, String> {
let log_1 = context.log.clone(); let log_1 = context.log().clone();
let log_2 = context.log.clone(); let log_2 = context.log().clone();
let log_3 = context.log.clone(); let log_3 = context.log().clone();
let log_4 = context.log.clone(); let log_4 = context.log().clone();
info!( info!(
log_1, log_1,
@ -217,46 +215,32 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
fork_service, fork_service,
block_service, block_service,
attestation_service, attestation_service,
exit_signals: vec![],
config, config,
}) })
} }
pub fn start_service(&mut self) -> Result<(), String> { pub fn start_service(&mut self) -> Result<(), String> {
let duties_exit = self self.duties_service
.duties_service
.clone() .clone()
.start_update_service(&self.context.eth2_config.spec) .start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start duties service: {}", e))?; .map_err(|e| format!("Unable to start duties service: {}", e))?;
let fork_exit = self self.fork_service
.fork_service
.clone() .clone()
.start_update_service(&self.context.eth2_config.spec) .start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start fork service: {}", e))?; .map_err(|e| format!("Unable to start fork service: {}", e))?;
let block_exit = self self.block_service
.block_service
.clone() .clone()
.start_update_service(&self.context.eth2_config.spec) .start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start block service: {}", e))?; .map_err(|e| format!("Unable to start block service: {}", e))?;
let attestation_exit = self self.attestation_service
.attestation_service
.clone() .clone()
.start_update_service(&self.context.eth2_config.spec) .start_update_service(&self.context.eth2_config.spec)
.map_err(|e| format!("Unable to start attestation service: {}", e))?; .map_err(|e| format!("Unable to start attestation service: {}", e))?;
let notifier_exit = spawn_notifier(self).map_err(|e| format!("Failed to start notifier: {}", e))?;
spawn_notifier(self).map_err(|e| format!("Failed to start notifier: {}", e))?;
self.exit_signals = vec![
duties_exit,
fork_exit,
block_exit,
attestation_exit,
notifier_exit,
];
Ok(()) Ok(())
} }

View File

@ -1,16 +1,14 @@
use crate::{is_synced::is_synced, ProductionValidatorClient}; use crate::{is_synced::is_synced, ProductionValidatorClient};
use exit_future::Signal; use futures::StreamExt;
use futures::{FutureExt, StreamExt};
use slog::{error, info}; use slog::{error, info};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use tokio::time::{interval_at, Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
use types::EthSpec; use types::EthSpec;
/// Spawns a notifier service which periodically logs information about the node. /// Spawns a notifier service which periodically logs information about the node.
pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Result<Signal, String> { pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Result<(), String> {
let context = client.context.service_context("notifier".into()); let context = client.context.service_context("notifier".into());
let runtime_handle = context.runtime_handle.clone(); let executor = context.executor.clone();
let log = context.log.clone();
let duties_service = client.duties_service.clone(); let duties_service = client.duties_service.clone();
let allow_unsynced_beacon_node = client.config.allow_unsynced_beacon_node; let allow_unsynced_beacon_node = client.config.allow_unsynced_beacon_node;
@ -25,7 +23,7 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
let mut interval = interval_at(start_instant, slot_duration); let mut interval = interval_at(start_instant, slot_duration);
let interval_fut = async move { let interval_fut = async move {
let log = &context.log; let log = context.log();
while interval.next().await.is_some() { while interval.next().await.is_some() {
if !is_synced( if !is_synced(
@ -83,12 +81,6 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
} }
}; };
let (exit_signal, exit) = exit_future::signal(); executor.spawn(interval_fut, "validator_notifier");
let future = futures::future::select( Ok(())
Box::pin(interval_fut),
exit.map(move |_| info!(log, "Shutdown complete")),
);
runtime_handle.spawn(future);
Ok(exit_signal)
} }