Update libp2p (#2101)
This is a little bit of a tip-of-the-iceberg PR. It houses a lot of code changes in the libp2p dependency. This needs a bit of thorough testing before merging. The primary code changes are: - General libp2p dependency update - Gossipsub refactor to shift compression into gossipsub providing performance improvements and improved API for handling compression Co-authored-by: Paul Hauner <paul@paulhauner.com>
This commit is contained in:
parent
b5e81eb6b2
commit
2931b05582
268
Cargo.lock
generated
268
Cargo.lock
generated
@ -31,7 +31,7 @@ dependencies = [
|
|||||||
"slog-term",
|
"slog-term",
|
||||||
"slot_clock",
|
"slot_clock",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"types",
|
"types",
|
||||||
"validator_dir",
|
"validator_dir",
|
||||||
@ -130,7 +130,7 @@ dependencies = [
|
|||||||
"aes 0.5.0",
|
"aes 0.5.0",
|
||||||
"block-cipher",
|
"block-cipher",
|
||||||
"ghash",
|
"ghash",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -144,7 +144,7 @@ dependencies = [
|
|||||||
"cipher",
|
"cipher",
|
||||||
"ctr",
|
"ctr",
|
||||||
"ghash",
|
"ghash",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -430,9 +430,9 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-tls"
|
name = "async-tls"
|
||||||
version = "0.10.0"
|
version = "0.11.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d85a97c4a0ecce878efd3f945f119c78a646d8975340bca0398f9bb05c30cc52"
|
checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-io",
|
"futures-io",
|
||||||
@ -473,7 +473,7 @@ version = "0.10.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "baf13118df3e3dce4b5ac930641343b91b656e4e72c8f8325838b01a4b1c9d45"
|
checksum = "baf13118df3e3dce4b5ac930641343b91b656e4e72c8f8325838b01a4b1c9d45"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"log",
|
"log",
|
||||||
"url",
|
"url",
|
||||||
]
|
]
|
||||||
@ -599,7 +599,7 @@ dependencies = [
|
|||||||
"store",
|
"store",
|
||||||
"task_executor",
|
"task_executor",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
@ -636,7 +636,7 @@ dependencies = [
|
|||||||
"slog-term",
|
"slog-term",
|
||||||
"store",
|
"store",
|
||||||
"task_executor",
|
"task_executor",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
@ -848,7 +848,7 @@ dependencies = [
|
|||||||
"slog-stdlog",
|
"slog-stdlog",
|
||||||
"slog-term",
|
"slog-term",
|
||||||
"sloggers",
|
"sloggers",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -1099,7 +1099,7 @@ dependencies = [
|
|||||||
"task_executor",
|
"task_executor",
|
||||||
"time 0.2.23",
|
"time 0.2.23",
|
||||||
"timer",
|
"timer",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"toml",
|
"toml",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
"types",
|
"types",
|
||||||
@ -1350,7 +1350,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
|
checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"generic-array 0.14.4",
|
"generic-array 0.14.4",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1360,7 +1360,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "58bcd97a54c7ca5ce2f6eb16f6bede5b0ab5f0055fedc17d2f0b4466e21671ca"
|
checksum = "58bcd97a54c7ca5ce2f6eb16f6bede5b0ab5f0055fedc17d2f0b4466e21671ca"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"generic-array 0.14.4",
|
"generic-array 0.14.4",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1370,7 +1370,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6"
|
checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"generic-array 0.14.4",
|
"generic-array 0.14.4",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1454,7 +1454,7 @@ dependencies = [
|
|||||||
"byteorder",
|
"byteorder",
|
||||||
"digest 0.9.0",
|
"digest 0.9.0",
|
||||||
"rand_core 0.5.1",
|
"rand_core 0.5.1",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -1649,7 +1649,7 @@ dependencies = [
|
|||||||
"rlp",
|
"rlp",
|
||||||
"sha2 0.9.2",
|
"sha2 0.9.2",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-util 0.5.1",
|
"tokio-util 0.5.1",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
@ -1751,7 +1751,7 @@ dependencies = [
|
|||||||
"generic-array 0.14.4",
|
"generic-array 0.14.4",
|
||||||
"group",
|
"group",
|
||||||
"rand_core 0.5.1",
|
"rand_core 0.5.1",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -1832,7 +1832,7 @@ dependencies = [
|
|||||||
"slog-term",
|
"slog-term",
|
||||||
"sloggers",
|
"sloggers",
|
||||||
"task_executor",
|
"task_executor",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -1871,7 +1871,7 @@ dependencies = [
|
|||||||
"sloggers",
|
"sloggers",
|
||||||
"state_processing",
|
"state_processing",
|
||||||
"task_executor",
|
"task_executor",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"toml",
|
"toml",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
@ -1886,7 +1886,7 @@ dependencies = [
|
|||||||
"deposit_contract",
|
"deposit_contract",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"types",
|
"types",
|
||||||
"web3",
|
"web3",
|
||||||
@ -2025,7 +2025,7 @@ dependencies = [
|
|||||||
"task_executor",
|
"task_executor",
|
||||||
"tempdir",
|
"tempdir",
|
||||||
"tiny-keccak 2.0.2",
|
"tiny-keccak 2.0.2",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-io-timeout",
|
"tokio-io-timeout",
|
||||||
"tokio-util 0.4.0",
|
"tokio-util 0.4.0",
|
||||||
"types",
|
"types",
|
||||||
@ -2215,7 +2215,7 @@ checksum = "01646e077d4ebda82b73f1bca002ea1e91561a77df2431a9e79729bcc31950ef"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bitvec 0.18.4",
|
"bitvec 0.18.4",
|
||||||
"rand_core 0.5.1",
|
"rand_core 0.5.1",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2532,7 +2532,7 @@ dependencies = [
|
|||||||
"serde_derive",
|
"serde_derive",
|
||||||
"slog",
|
"slog",
|
||||||
"state_processing",
|
"state_processing",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
"types",
|
"types",
|
||||||
@ -2613,7 +2613,7 @@ checksum = "cc11f9f5fbf1943b48ae7c2bf6846e7d827a512d1be4f23af708f5ca5d01dde1"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"ff",
|
"ff",
|
||||||
"rand_core 0.5.1",
|
"rand_core 0.5.1",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2627,7 +2627,7 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio 0.2.24",
|
"tokio 0.2.24",
|
||||||
@ -2646,10 +2646,10 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)",
|
"http 0.2.1",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-util 0.5.1",
|
"tokio-util 0.5.1",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-futures",
|
"tracing-futures",
|
||||||
@ -2684,7 +2684,7 @@ name = "hashset_delay"
|
|||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-util 0.5.1",
|
"tokio-util 0.5.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -2698,7 +2698,7 @@ dependencies = [
|
|||||||
"bitflags",
|
"bitflags",
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"headers-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"headers-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"mime",
|
"mime",
|
||||||
"sha-1 0.8.2",
|
"sha-1 0.8.2",
|
||||||
"time 0.1.44",
|
"time 0.1.44",
|
||||||
@ -2713,7 +2713,7 @@ dependencies = [
|
|||||||
"bitflags",
|
"bitflags",
|
||||||
"bytes 0.6.0",
|
"bytes 0.6.0",
|
||||||
"headers-core 0.2.0 (git+https://github.com/blacktemplar/headers?branch=lighthouse)",
|
"headers-core 0.2.0 (git+https://github.com/blacktemplar/headers?branch=lighthouse)",
|
||||||
"http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)",
|
"http 0.2.1",
|
||||||
"mime",
|
"mime",
|
||||||
"sha-1 0.8.2",
|
"sha-1 0.8.2",
|
||||||
"time 0.1.44",
|
"time 0.1.44",
|
||||||
@ -2725,7 +2725,7 @@ version = "0.2.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
|
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2733,7 +2733,7 @@ name = "headers-core"
|
|||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "git+https://github.com/blacktemplar/headers?branch=lighthouse#8bffbd8aa2e170745a81e62fc0d7e98c0a23a69a"
|
source = "git+https://github.com/blacktemplar/headers?branch=lighthouse#8bffbd8aa2e170745a81e62fc0d7e98c0a23a69a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)",
|
"http 0.2.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2830,20 +2830,20 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "http"
|
name = "http"
|
||||||
version = "0.2.1"
|
version = "0.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "git+https://github.com/agemanning/http?branch=lighthouse#144a8ad6334f40bf4f84e26cf582ff164795024a"
|
||||||
checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9"
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.5.6",
|
"bytes 0.6.0",
|
||||||
"fnv",
|
"fnv",
|
||||||
"itoa",
|
"itoa",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http"
|
name = "http"
|
||||||
version = "0.2.1"
|
version = "0.2.2"
|
||||||
source = "git+https://github.com/agemanning/http?branch=lighthouse#144a8ad6334f40bf4f84e26cf582ff164795024a"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.6.0",
|
"bytes 0.5.6",
|
||||||
"fnv",
|
"fnv",
|
||||||
"itoa",
|
"itoa",
|
||||||
]
|
]
|
||||||
@ -2855,7 +2855,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b"
|
checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2864,7 +2864,7 @@ version = "0.3.1"
|
|||||||
source = "git+https://github.com/agemanning/http-body?branch=lighthouse#a10365c24eaee8eab881519accad48b695b88ccf"
|
source = "git+https://github.com/agemanning/http-body?branch=lighthouse#a10365c24eaee8eab881519accad48b695b88ccf"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.6.0",
|
"bytes 0.6.0",
|
||||||
"http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)",
|
"http 0.2.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2892,7 +2892,7 @@ dependencies = [
|
|||||||
"slot_clock",
|
"slot_clock",
|
||||||
"state_processing",
|
"state_processing",
|
||||||
"store",
|
"store",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
"types",
|
"types",
|
||||||
@ -2917,7 +2917,7 @@ dependencies = [
|
|||||||
"slog",
|
"slog",
|
||||||
"slot_clock",
|
"slot_clock",
|
||||||
"store",
|
"store",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"types",
|
"types",
|
||||||
"warp",
|
"warp",
|
||||||
@ -2980,7 +2980,7 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2 0.2.7",
|
"h2 0.2.7",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"httparse",
|
"httparse",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
@ -3003,14 +3003,14 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2 0.3.0",
|
"h2 0.3.0",
|
||||||
"http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)",
|
"http 0.2.1",
|
||||||
"http-body 0.3.1 (git+https://github.com/agemanning/http-body?branch=lighthouse)",
|
"http-body 0.3.1 (git+https://github.com/agemanning/http-body?branch=lighthouse)",
|
||||||
"httparse",
|
"httparse",
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa",
|
"itoa",
|
||||||
"pin-project 1.0.2",
|
"pin-project 1.0.2",
|
||||||
"socket2",
|
"socket2",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"tracing",
|
"tracing",
|
||||||
"want",
|
"want",
|
||||||
@ -3024,7 +3024,7 @@ checksum = "f93ec5be69758dfc06b9b29efa9d6e9306e387c85eb362c603912eead2ad98c7"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"hyper 0.13.9",
|
"hyper 0.13.9",
|
||||||
"hyper-tls",
|
"hyper-tls",
|
||||||
"native-tls",
|
"native-tls",
|
||||||
@ -3121,9 +3121,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "indexmap"
|
name = "indexmap"
|
||||||
version = "1.6.0"
|
version = "1.6.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2"
|
checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"autocfg 1.0.1",
|
"autocfg 1.0.1",
|
||||||
"hashbrown",
|
"hashbrown",
|
||||||
@ -3193,7 +3193,7 @@ dependencies = [
|
|||||||
"encoding_rs",
|
"encoding_rs",
|
||||||
"flume",
|
"flume",
|
||||||
"futures-lite",
|
"futures-lite",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"log",
|
"log",
|
||||||
"mime",
|
"mime",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@ -3354,7 +3354,7 @@ dependencies = [
|
|||||||
"serde_yaml",
|
"serde_yaml",
|
||||||
"simple_logger",
|
"simple_logger",
|
||||||
"state_processing",
|
"state_processing",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
"types",
|
"types",
|
||||||
@ -3431,14 +3431,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p"
|
name = "libp2p"
|
||||||
version = "0.30.1"
|
version = "0.33.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"atomic",
|
"atomic",
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"libp2p-core-derive",
|
"libp2p-core-derive",
|
||||||
"libp2p-dns",
|
"libp2p-dns",
|
||||||
"libp2p-gossipsub",
|
"libp2p-gossipsub",
|
||||||
@ -3449,7 +3449,7 @@ dependencies = [
|
|||||||
"libp2p-tcp",
|
"libp2p-tcp",
|
||||||
"libp2p-websocket",
|
"libp2p-websocket",
|
||||||
"libp2p-yamux",
|
"libp2p-yamux",
|
||||||
"parity-multiaddr 0.9.6 (git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25)",
|
"parity-multiaddr 0.10.0",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"pin-project 1.0.2",
|
"pin-project 1.0.2",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
@ -3473,8 +3473,8 @@ dependencies = [
|
|||||||
"libsecp256k1",
|
"libsecp256k1",
|
||||||
"log",
|
"log",
|
||||||
"multihash 0.11.4",
|
"multihash 0.11.4",
|
||||||
"multistream-select 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"multistream-select 0.8.5",
|
||||||
"parity-multiaddr 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-multiaddr 0.9.6",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"pin-project 1.0.2",
|
"pin-project 1.0.2",
|
||||||
"prost",
|
"prost",
|
||||||
@ -3492,12 +3492,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-core"
|
name = "libp2p-core"
|
||||||
version = "0.25.0"
|
version = "0.26.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"asn1_der",
|
"asn1_der",
|
||||||
"bs58 0.4.0",
|
"bs58 0.4.0",
|
||||||
"bytes 0.5.6",
|
|
||||||
"ed25519-dalek",
|
"ed25519-dalek",
|
||||||
"either",
|
"either",
|
||||||
"fnv",
|
"fnv",
|
||||||
@ -3507,8 +3506,8 @@ dependencies = [
|
|||||||
"libsecp256k1",
|
"libsecp256k1",
|
||||||
"log",
|
"log",
|
||||||
"multihash 0.13.2",
|
"multihash 0.13.2",
|
||||||
"multistream-select 0.8.5 (git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25)",
|
"multistream-select 0.9.1",
|
||||||
"parity-multiaddr 0.9.6 (git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25)",
|
"parity-multiaddr 0.10.0",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"pin-project 1.0.2",
|
"pin-project 1.0.2",
|
||||||
"prost",
|
"prost",
|
||||||
@ -3526,8 +3525,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-core-derive"
|
name = "libp2p-core-derive"
|
||||||
version = "0.20.2"
|
version = "0.21.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn",
|
||||||
@ -3535,18 +3534,18 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-dns"
|
name = "libp2p-dns"
|
||||||
version = "0.25.0"
|
version = "0.26.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"log",
|
"log",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-gossipsub"
|
name = "libp2p-gossipsub"
|
||||||
version = "0.25.0"
|
version = "0.26.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.13.0",
|
"base64 0.13.0",
|
||||||
"byteorder",
|
"byteorder",
|
||||||
@ -3555,12 +3554,13 @@ dependencies = [
|
|||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"futures_codec",
|
"futures_codec",
|
||||||
"hex_fmt",
|
"hex_fmt",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"libp2p-swarm",
|
"libp2p-swarm",
|
||||||
"log",
|
"log",
|
||||||
"prost",
|
"prost",
|
||||||
"prost-build",
|
"prost-build",
|
||||||
"rand 0.7.3",
|
"rand 0.7.3",
|
||||||
|
"regex",
|
||||||
"sha2 0.9.2",
|
"sha2 0.9.2",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unsigned-varint 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -3569,11 +3569,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-identify"
|
name = "libp2p-identify"
|
||||||
version = "0.25.0"
|
version = "0.26.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"libp2p-swarm",
|
"libp2p-swarm",
|
||||||
"log",
|
"log",
|
||||||
"prost",
|
"prost",
|
||||||
@ -3584,13 +3584,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-mplex"
|
name = "libp2p-mplex"
|
||||||
version = "0.25.0"
|
version = "0.26.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"futures_codec",
|
"futures_codec",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"log",
|
"log",
|
||||||
"nohash-hasher",
|
"nohash-hasher",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
@ -3601,14 +3601,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-noise"
|
name = "libp2p-noise"
|
||||||
version = "0.27.0"
|
version = "0.28.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"curve25519-dalek",
|
"curve25519-dalek",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"log",
|
"log",
|
||||||
"prost",
|
"prost",
|
||||||
"prost-build",
|
"prost-build",
|
||||||
@ -3622,12 +3622,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-swarm"
|
name = "libp2p-swarm"
|
||||||
version = "0.25.0"
|
version = "0.26.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"either",
|
"either",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"log",
|
"log",
|
||||||
"rand 0.7.3",
|
"rand 0.7.3",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
@ -3637,28 +3637,28 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-tcp"
|
name = "libp2p-tcp"
|
||||||
version = "0.25.0"
|
version = "0.26.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"futures-timer",
|
"futures-timer",
|
||||||
"if-addrs",
|
"if-addrs",
|
||||||
"ipnet",
|
"ipnet",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"log",
|
"log",
|
||||||
"socket2",
|
"socket2",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-websocket"
|
name = "libp2p-websocket"
|
||||||
version = "0.26.0"
|
version = "0.27.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-tls",
|
"async-tls",
|
||||||
"either",
|
"either",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"log",
|
"log",
|
||||||
"quicksink",
|
"quicksink",
|
||||||
"rustls",
|
"rustls",
|
||||||
@ -3671,11 +3671,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libp2p-yamux"
|
name = "libp2p-yamux"
|
||||||
version = "0.28.0"
|
version = "0.29.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"libp2p-core 0.25.0",
|
"libp2p-core 0.26.0",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"yamux",
|
"yamux",
|
||||||
@ -3693,7 +3693,7 @@ dependencies = [
|
|||||||
"hmac-drbg",
|
"hmac-drbg",
|
||||||
"rand 0.7.3",
|
"rand 0.7.3",
|
||||||
"sha2 0.8.2",
|
"sha2 0.8.2",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
"typenum",
|
"typenum",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -3745,7 +3745,7 @@ dependencies = [
|
|||||||
"slog-term",
|
"slog-term",
|
||||||
"sloggers",
|
"sloggers",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"types",
|
"types",
|
||||||
"validator_client",
|
"validator_client",
|
||||||
@ -3847,9 +3847,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lru"
|
name = "lru"
|
||||||
version = "0.6.1"
|
version = "0.6.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "be716eb6878ca2263eb5d00a781aa13264a794f519fe6af4fbb2668b2d5441c0"
|
checksum = "3abe07af102235a56ac9a6dd904aab1e05483e2e8afdfffec3598be55b1b7606"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hashbrown",
|
"hashbrown",
|
||||||
]
|
]
|
||||||
@ -4136,8 +4136,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "multistream-select"
|
name = "multistream-select"
|
||||||
version = "0.8.5"
|
version = "0.9.1"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
@ -4223,7 +4223,7 @@ dependencies = [
|
|||||||
"store",
|
"store",
|
||||||
"task_executor",
|
"task_executor",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
@ -4481,8 +4481,8 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-multiaddr"
|
name = "parity-multiaddr"
|
||||||
version = "0.9.6"
|
version = "0.10.0"
|
||||||
source = "git+https://github.com/sigp/rust-libp2p?rev=830e6fabb7ee51281a98f5e092f056668adbef25#830e6fabb7ee51281a98f5e092f056668adbef25"
|
source = "git+https://github.com/sigp/rust-libp2p?rev=97000533e4710183124abde017c6c3d68287c1ae#97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrayref",
|
"arrayref",
|
||||||
"bs58 0.4.0",
|
"bs58 0.4.0",
|
||||||
@ -5303,7 +5303,7 @@ dependencies = [
|
|||||||
"remote_signer_test",
|
"remote_signer_test",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"serde",
|
"serde",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -5321,7 +5321,7 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"tempdir",
|
"tempdir",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
@ -5337,16 +5337,16 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqwest"
|
name = "reqwest"
|
||||||
version = "0.10.9"
|
version = "0.10.10"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fb15d6255c792356a0f578d8a645c677904dc02e862bebe2ecc18e0c01b9a0ce"
|
checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.13.0",
|
"base64 0.13.0",
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"encoding_rs",
|
"encoding_rs",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http-body 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper 0.13.9",
|
"hyper 0.13.9",
|
||||||
"hyper-tls",
|
"hyper-tls",
|
||||||
@ -5367,7 +5367,6 @@ dependencies = [
|
|||||||
"url",
|
"url",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
"wasm-bindgen-futures",
|
"wasm-bindgen-futures",
|
||||||
"wasm-bindgen-test",
|
|
||||||
"web-sys",
|
"web-sys",
|
||||||
"winreg",
|
"winreg",
|
||||||
]
|
]
|
||||||
@ -5468,11 +5467,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustls"
|
name = "rustls"
|
||||||
version = "0.18.1"
|
version = "0.19.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81"
|
checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.12.3",
|
"base64 0.13.0",
|
||||||
"log",
|
"log",
|
||||||
"ring",
|
"ring",
|
||||||
"sct",
|
"sct",
|
||||||
@ -5861,7 +5860,7 @@ dependencies = [
|
|||||||
"node_test_rig",
|
"node_test_rig",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"rayon",
|
"rayon",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"types",
|
"types",
|
||||||
"validator_client",
|
"validator_client",
|
||||||
@ -5920,7 +5919,7 @@ dependencies = [
|
|||||||
"slot_clock",
|
"slot_clock",
|
||||||
"state_processing",
|
"state_processing",
|
||||||
"task_executor",
|
"task_executor",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6083,19 +6082,18 @@ dependencies = [
|
|||||||
"ring",
|
"ring",
|
||||||
"rustc_version",
|
"rustc_version",
|
||||||
"sha2 0.9.2",
|
"sha2 0.9.2",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
"x25519-dalek",
|
"x25519-dalek",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "socket2"
|
name = "socket2"
|
||||||
version = "0.3.17"
|
version = "0.3.18"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902"
|
checksum = "97e0e9fd577458a4f61fb91fcb559ea2afecc54c934119421f9f5d3d5b1a1057"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if 1.0.0",
|
"cfg-if 1.0.0",
|
||||||
"libc",
|
"libc",
|
||||||
"redox_syscall",
|
|
||||||
"winapi 0.3.9",
|
"winapi 0.3.9",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6299,9 +6297,9 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "subtle"
|
name = "subtle"
|
||||||
version = "2.3.0"
|
version = "2.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd"
|
checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "swap_or_not_shuffle"
|
name = "swap_or_not_shuffle"
|
||||||
@ -6362,7 +6360,7 @@ dependencies = [
|
|||||||
"lazy_static",
|
"lazy_static",
|
||||||
"lighthouse_metrics",
|
"lighthouse_metrics",
|
||||||
"slog",
|
"slog",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6534,7 +6532,7 @@ dependencies = [
|
|||||||
"slog",
|
"slog",
|
||||||
"slot_clock",
|
"slot_clock",
|
||||||
"task_executor",
|
"task_executor",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6625,18 +6623,18 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "0.3.5"
|
version = "0.3.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a12a3eb39ee2c231be64487f1fcbe726c8f2514876a55480a5ab8559fc374252"
|
checksum = "720ba21c25078711bf456d607987d95bce90f7c3bea5abe1db587862e7a1e87c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"autocfg 1.0.1",
|
"autocfg 1.0.1",
|
||||||
"bytes 0.6.0",
|
"bytes 0.6.0",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"lazy_static",
|
|
||||||
"libc",
|
"libc",
|
||||||
"memchr",
|
"memchr",
|
||||||
"mio 0.7.6",
|
"mio 0.7.6",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
|
"once_cell",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"pin-project-lite 0.2.0",
|
"pin-project-lite 0.2.0",
|
||||||
"signal-hook-registry",
|
"signal-hook-registry",
|
||||||
@ -6655,7 +6653,7 @@ dependencies = [
|
|||||||
"once_cell",
|
"once_cell",
|
||||||
"pin-project-lite 0.1.11",
|
"pin-project-lite 0.1.11",
|
||||||
"tokio 0.2.24",
|
"tokio 0.2.24",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6664,7 +6662,7 @@ version = "0.5.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6654a6da4326b0b4228000891d44fbcbdaa1904c6ddfa06617230649073be8fb"
|
checksum = "6654a6da4326b0b4228000891d44fbcbdaa1904c6ddfa06617230649073be8fb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6708,7 +6706,7 @@ dependencies = [
|
|||||||
"futures-util",
|
"futures-util",
|
||||||
"log",
|
"log",
|
||||||
"pin-project 1.0.2",
|
"pin-project 1.0.2",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tungstenite",
|
"tungstenite",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -6739,7 +6737,7 @@ dependencies = [
|
|||||||
"log",
|
"log",
|
||||||
"pin-project-lite 0.1.11",
|
"pin-project-lite 0.1.11",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6755,7 +6753,7 @@ dependencies = [
|
|||||||
"log",
|
"log",
|
||||||
"pin-project-lite 0.2.0",
|
"pin-project-lite 0.2.0",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -6915,7 +6913,7 @@ dependencies = [
|
|||||||
"base64 0.12.3",
|
"base64 0.12.3",
|
||||||
"byteorder",
|
"byteorder",
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"httparse",
|
"httparse",
|
||||||
"input_buffer",
|
"input_buffer",
|
||||||
"log",
|
"log",
|
||||||
@ -6943,7 +6941,7 @@ dependencies = [
|
|||||||
"base64 0.11.0",
|
"base64 0.11.0",
|
||||||
"bytes 0.5.6",
|
"bytes 0.5.6",
|
||||||
"chrono",
|
"chrono",
|
||||||
"http 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"http 0.2.2",
|
||||||
"mime",
|
"mime",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -7065,7 +7063,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402"
|
checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"generic-array 0.14.4",
|
"generic-array 0.14.4",
|
||||||
"subtle 2.3.0",
|
"subtle 2.4.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -7169,7 +7167,7 @@ dependencies = [
|
|||||||
"slot_clock",
|
"slot_clock",
|
||||||
"tempdir",
|
"tempdir",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-compat-02",
|
"tokio-compat-02",
|
||||||
"tree_hash",
|
"tree_hash",
|
||||||
"types",
|
"types",
|
||||||
@ -7261,7 +7259,7 @@ dependencies = [
|
|||||||
"bytes 0.6.0",
|
"bytes 0.6.0",
|
||||||
"futures 0.3.8",
|
"futures 0.3.8",
|
||||||
"headers 0.3.2 (git+https://github.com/blacktemplar/headers?branch=lighthouse)",
|
"headers 0.3.2 (git+https://github.com/blacktemplar/headers?branch=lighthouse)",
|
||||||
"http 0.2.1 (git+https://github.com/agemanning/http?branch=lighthouse)",
|
"http 0.2.1",
|
||||||
"hyper 0.14.0-dev",
|
"hyper 0.14.0-dev",
|
||||||
"log",
|
"log",
|
||||||
"mime",
|
"mime",
|
||||||
@ -7273,7 +7271,7 @@ dependencies = [
|
|||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"tokio-tungstenite",
|
"tokio-tungstenite",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"tracing",
|
"tracing",
|
||||||
@ -7292,7 +7290,7 @@ dependencies = [
|
|||||||
"safe_arith",
|
"safe_arith",
|
||||||
"serde",
|
"serde",
|
||||||
"state_processing",
|
"state_processing",
|
||||||
"tokio 0.3.5",
|
"tokio 0.3.6",
|
||||||
"types",
|
"types",
|
||||||
"warp",
|
"warp",
|
||||||
]
|
]
|
||||||
@ -7473,9 +7471,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "webpki-roots"
|
name = "webpki-roots"
|
||||||
version = "0.20.0"
|
version = "0.21.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f"
|
checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"webpki",
|
"webpki",
|
||||||
]
|
]
|
||||||
|
@ -42,7 +42,7 @@ regex = "1.3.9"
|
|||||||
[dependencies.libp2p]
|
[dependencies.libp2p]
|
||||||
#version = "0.23.0"
|
#version = "0.23.0"
|
||||||
git = "https://github.com/sigp/rust-libp2p"
|
git = "https://github.com/sigp/rust-libp2p"
|
||||||
rev = "830e6fabb7ee51281a98f5e092f056668adbef25"
|
rev = "97000533e4710183124abde017c6c3d68287c1ae"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
|
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
|
||||||
|
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
||||||
use crate::{error, TopicHash};
|
use crate::{error, TopicHash};
|
||||||
use libp2p::gossipsub::{
|
use libp2p::gossipsub::{
|
||||||
GenericGossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds,
|
GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams,
|
||||||
TopicScoreParams,
|
|
||||||
};
|
};
|
||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@ -37,10 +36,7 @@ pub struct PeerScoreSettings<TSpec: EthSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
||||||
pub fn new<T>(
|
pub fn new(chain_spec: &ChainSpec, gs_config: &GossipsubConfig) -> PeerScoreSettings<TSpec> {
|
||||||
chain_spec: &ChainSpec,
|
|
||||||
gs_config: &GenericGossipsubConfig<T>,
|
|
||||||
) -> PeerScoreSettings<TSpec> {
|
|
||||||
let slot = Duration::from_millis(chain_spec.milliseconds_per_slot);
|
let slot = Duration::from_millis(chain_spec.milliseconds_per_slot);
|
||||||
let beacon_attestation_subnet_weight = 1.0 / chain_spec.attestation_subnet_count as f64;
|
let beacon_attestation_subnet_weight = 1.0 / chain_spec.attestation_subnet_count as f64;
|
||||||
let max_positive_score = (MAX_IN_MESH_SCORE + MAX_FIRST_MESSAGE_DELIVERIES_SCORE)
|
let max_positive_score = (MAX_IN_MESH_SCORE + MAX_FIRST_MESSAGE_DELIVERIES_SCORE)
|
||||||
|
@ -6,17 +6,13 @@ use crate::peer_manager::{
|
|||||||
use crate::rpc::*;
|
use crate::rpc::*;
|
||||||
use crate::service::METADATA_FILENAME;
|
use crate::service::METADATA_FILENAME;
|
||||||
use crate::types::{
|
use crate::types::{
|
||||||
subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, MessageData,
|
subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform,
|
||||||
SubnetDiscovery,
|
SubnetDiscovery,
|
||||||
};
|
};
|
||||||
use crate::Eth2Enr;
|
use crate::Eth2Enr;
|
||||||
use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash};
|
use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use handler::{BehaviourHandler, BehaviourHandlerIn, DelegateIn, DelegateOut};
|
use handler::{BehaviourHandler, BehaviourHandlerIn, DelegateIn, DelegateOut};
|
||||||
use libp2p::gossipsub::subscription_filter::{
|
|
||||||
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter,
|
|
||||||
};
|
|
||||||
use libp2p::gossipsub::PeerScoreThresholds;
|
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::{
|
core::{
|
||||||
connection::{ConnectedPoint, ConnectionId, ListenerId},
|
connection::{ConnectedPoint, ConnectionId, ListenerId},
|
||||||
@ -24,13 +20,14 @@ use libp2p::{
|
|||||||
Multiaddr,
|
Multiaddr,
|
||||||
},
|
},
|
||||||
gossipsub::{
|
gossipsub::{
|
||||||
GenericGossipsub, GenericGossipsubEvent, IdentTopic as Topic, MessageAcceptance,
|
subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter},
|
||||||
MessageAuthenticity, MessageId,
|
Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance,
|
||||||
|
MessageAuthenticity, MessageId, PeerScoreThresholds,
|
||||||
},
|
},
|
||||||
identify::{Identify, IdentifyEvent},
|
identify::{Identify, IdentifyEvent},
|
||||||
swarm::{
|
swarm::{
|
||||||
NetworkBehaviour, NetworkBehaviourAction as NBAction, NotifyHandler, PollParameters,
|
AddressScore, NetworkBehaviour, NetworkBehaviourAction as NBAction, NotifyHandler,
|
||||||
ProtocolsHandler,
|
PollParameters, ProtocolsHandler,
|
||||||
},
|
},
|
||||||
PeerId,
|
PeerId,
|
||||||
};
|
};
|
||||||
@ -58,8 +55,7 @@ pub const GOSSIPSUB_GREYLIST_THRESHOLD: f64 = -16000.0;
|
|||||||
pub type PeerRequestId = (ConnectionId, SubstreamId);
|
pub type PeerRequestId = (ConnectionId, SubstreamId);
|
||||||
|
|
||||||
pub type SubscriptionFilter = MaxCountSubscriptionFilter<WhitelistSubscriptionFilter>;
|
pub type SubscriptionFilter = MaxCountSubscriptionFilter<WhitelistSubscriptionFilter>;
|
||||||
pub type Gossipsub = GenericGossipsub<MessageData, SubscriptionFilter>;
|
pub type Gossipsub = BaseGossipsub<SnappyTransform, SubscriptionFilter>;
|
||||||
pub type GossipsubEvent = GenericGossipsubEvent<MessageData>;
|
|
||||||
|
|
||||||
/// The types of events than can be obtained from polling the behaviour.
|
/// The types of events than can be obtained from polling the behaviour.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -181,10 +177,14 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
max_subscriptions_per_request: 100, //this is according to the current go implementation
|
max_subscriptions_per_request: 100, //this is according to the current go implementation
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut gossipsub = Gossipsub::new_with_subscription_filter(
|
// Initialize the compression transform.
|
||||||
|
let snappy_transform = SnappyTransform::new(net_conf.gs_config.max_transmit_size());
|
||||||
|
|
||||||
|
let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform(
|
||||||
MessageAuthenticity::Anonymous,
|
MessageAuthenticity::Anonymous,
|
||||||
net_conf.gs_config.clone(),
|
net_conf.gs_config.clone(),
|
||||||
filter,
|
filter,
|
||||||
|
snappy_transform,
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("Could not construct gossipsub: {:?}", e))?;
|
.map_err(|e| format!("Could not construct gossipsub: {:?}", e))?;
|
||||||
|
|
||||||
@ -390,34 +390,30 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
pub fn publish(&mut self, messages: Vec<PubsubMessage<TSpec>>) {
|
pub fn publish(&mut self, messages: Vec<PubsubMessage<TSpec>>) {
|
||||||
for message in messages {
|
for message in messages {
|
||||||
for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) {
|
for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) {
|
||||||
match message.encode(GossipEncoding::default()) {
|
let message_data = message.encode(GossipEncoding::default());
|
||||||
Ok(message_data) => {
|
if let Err(e) = self.gossipsub.publish(topic.clone().into(), message_data) {
|
||||||
if let Err(e) = self.gossipsub.publish(topic.clone().into(), message_data) {
|
slog::warn!(self.log, "Could not publish message";
|
||||||
slog::warn!(self.log, "Could not publish message";
|
|
||||||
"error" => ?e);
|
"error" => ?e);
|
||||||
|
|
||||||
// add to metrics
|
// add to metrics
|
||||||
match topic.kind() {
|
match topic.kind() {
|
||||||
GossipKind::Attestation(subnet_id) => {
|
GossipKind::Attestation(subnet_id) => {
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
if let Some(v) = metrics::get_int_gauge(
|
||||||
&metrics::FAILED_ATTESTATION_PUBLISHES_PER_SUBNET,
|
&metrics::FAILED_ATTESTATION_PUBLISHES_PER_SUBNET,
|
||||||
&[&subnet_id.to_string()],
|
&[&subnet_id.to_string()],
|
||||||
) {
|
) {
|
||||||
v.inc()
|
v.inc()
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
kind => {
|
kind => {
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
if let Some(v) = metrics::get_int_gauge(
|
||||||
&metrics::FAILED_PUBLISHES_PER_MAIN_TOPIC,
|
&metrics::FAILED_PUBLISHES_PER_MAIN_TOPIC,
|
||||||
&[&format!("{:?}", kind)],
|
&[&format!("{:?}", kind)],
|
||||||
) {
|
) {
|
||||||
v.inc()
|
v.inc()
|
||||||
};
|
};
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => crit!(self.log, "Could not publish message"; "error" => e),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -642,7 +638,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
} => {
|
} => {
|
||||||
// Note: We are keeping track here of the peer that sent us the message, not the
|
// Note: We are keeping track here of the peer that sent us the message, not the
|
||||||
// peer that originally published the message.
|
// peer that originally published the message.
|
||||||
match PubsubMessage::decode(&gs_msg.topic, gs_msg.data()) {
|
match PubsubMessage::decode(&gs_msg.topic, &gs_msg.data) {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(self.log, "Could not decode gossipsub message"; "error" => e);
|
debug!(self.log, "Could not decode gossipsub message"; "error" => e);
|
||||||
//reject the message
|
//reject the message
|
||||||
@ -854,7 +850,10 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
PeerManagerEvent::SocketUpdated(address) => {
|
PeerManagerEvent::SocketUpdated(address) => {
|
||||||
return Poll::Ready(NBAction::ReportObservedAddr { address });
|
return Poll::Ready(NBAction::ReportObservedAddr {
|
||||||
|
address,
|
||||||
|
score: AddressScore::Finite(1),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
PeerManagerEvent::Status(peer_id) => {
|
PeerManagerEvent::Status(peer_id) => {
|
||||||
// it's time to status. We don't keep a beacon chain reference here, so we inform
|
// it's time to status. We don't keep a beacon chain reference here, so we inform
|
||||||
@ -1028,8 +1027,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
trace!(self.log, "Disconnecting newly connected peer"; "peer_id" => %peer_id, "reason" => %goodbye_reason)
|
trace!(self.log, "Disconnecting newly connected peer"; "peer_id" => %peer_id, "reason" => %goodbye_reason)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.peers_to_dc
|
self.peers_to_dc.push_back((*peer_id, Some(goodbye_reason)));
|
||||||
.push_back((peer_id.clone(), Some(goodbye_reason)));
|
|
||||||
// NOTE: We don't inform the peer manager that this peer is disconnecting. It is simply
|
// NOTE: We don't inform the peer manager that this peer is disconnecting. It is simply
|
||||||
// rejected with a goodbye.
|
// rejected with a goodbye.
|
||||||
return;
|
return;
|
||||||
@ -1041,13 +1039,13 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
ConnectedPoint::Listener { send_back_addr, .. } => {
|
ConnectedPoint::Listener { send_back_addr, .. } => {
|
||||||
self.peer_manager
|
self.peer_manager
|
||||||
.connect_ingoing(&peer_id, send_back_addr.clone());
|
.connect_ingoing(&peer_id, send_back_addr.clone());
|
||||||
self.add_event(BehaviourEvent::PeerConnected(peer_id.clone()));
|
self.add_event(BehaviourEvent::PeerConnected(*peer_id));
|
||||||
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming");
|
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming");
|
||||||
}
|
}
|
||||||
ConnectedPoint::Dialer { address } => {
|
ConnectedPoint::Dialer { address } => {
|
||||||
self.peer_manager
|
self.peer_manager
|
||||||
.connect_outgoing(&peer_id, address.clone());
|
.connect_outgoing(&peer_id, address.clone());
|
||||||
self.add_event(BehaviourEvent::PeerDialed(peer_id.clone()));
|
self.add_event(BehaviourEvent::PeerDialed(*peer_id));
|
||||||
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Dialed");
|
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Dialed");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1122,7 +1120,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
// Both these cases, the peer has been previously registered in the sub protocols and
|
// Both these cases, the peer has been previously registered in the sub protocols and
|
||||||
// potentially the application layer.
|
// potentially the application layer.
|
||||||
// Inform the application.
|
// Inform the application.
|
||||||
self.add_event(BehaviourEvent::PeerDisconnected(peer_id.clone()));
|
self.add_event(BehaviourEvent::PeerDisconnected(*peer_id));
|
||||||
// Inform the behaviour.
|
// Inform the behaviour.
|
||||||
delegate_to_behaviours!(self, inject_disconnected, peer_id);
|
delegate_to_behaviours!(self, inject_disconnected, peer_id);
|
||||||
|
|
||||||
@ -1260,8 +1258,8 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
),
|
),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
NBAction::ReportObservedAddr { address } => {
|
NBAction::ReportObservedAddr { address, score } => {
|
||||||
return Poll::Ready(NBAction::ReportObservedAddr { address })
|
return Poll::Ready(NBAction::ReportObservedAddr { address, score })
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Poll::Pending => break,
|
Poll::Pending => break,
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
use crate::types::{GossipKind, MessageData};
|
use crate::types::GossipKind;
|
||||||
use crate::{Enr, PeerIdSerialized};
|
use crate::{Enr, PeerIdSerialized};
|
||||||
use directory::{
|
use directory::{
|
||||||
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
|
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
|
||||||
};
|
};
|
||||||
use discv5::{Discv5Config, Discv5ConfigBuilder};
|
use discv5::{Discv5Config, Discv5ConfigBuilder};
|
||||||
use libp2p::gossipsub::{
|
use libp2p::gossipsub::{
|
||||||
FastMessageId, GenericGossipsubConfig, GenericGossipsubConfigBuilder, GenericGossipsubMessage,
|
FastMessageId, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId,
|
||||||
MessageId, RawGossipsubMessage, ValidationMode,
|
RawGossipsubMessage, ValidationMode,
|
||||||
};
|
};
|
||||||
use libp2p::Multiaddr;
|
use libp2p::Multiaddr;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
@ -15,14 +15,13 @@ use std::path::PathBuf;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
pub const GOSSIP_MAX_SIZE: usize = 1_048_576;
|
pub const GOSSIP_MAX_SIZE: usize = 1_048_576;
|
||||||
const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0];
|
|
||||||
|
// We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the
|
||||||
|
// specification. We leave it here for posterity.
|
||||||
|
// const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0];
|
||||||
const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0];
|
const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0];
|
||||||
pub const MESH_N_LOW: usize = 6;
|
pub const MESH_N_LOW: usize = 6;
|
||||||
|
|
||||||
pub type GossipsubConfig = GenericGossipsubConfig<MessageData>;
|
|
||||||
pub type GossipsubConfigBuilder = GenericGossipsubConfigBuilder<MessageData>;
|
|
||||||
pub type GossipsubMessage = GenericGossipsubMessage<MessageData>;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
/// Network configuration for lighthouse.
|
/// Network configuration for lighthouse.
|
||||||
@ -110,35 +109,28 @@ impl Default for Config {
|
|||||||
|
|
||||||
// The function used to generate a gossipsub message id
|
// The function used to generate a gossipsub message id
|
||||||
// We use the first 8 bytes of SHA256(data) for content addressing
|
// We use the first 8 bytes of SHA256(data) for content addressing
|
||||||
let fast_gossip_message_id =
|
let fast_gossip_message_id = |message: &RawGossipsubMessage| {
|
||||||
|message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..]);
|
FastMessageId::from(&Sha256::digest(&message.data)[..8])
|
||||||
|
};
|
||||||
|
|
||||||
fn prefix(prefix: [u8; 4], data: &[u8]) -> Vec<u8> {
|
fn prefix(prefix: [u8; 4], data: &[u8]) -> Vec<u8> {
|
||||||
prefix
|
let mut vec = Vec::with_capacity(prefix.len() + data.len());
|
||||||
.to_vec()
|
vec.extend_from_slice(&prefix);
|
||||||
.into_iter()
|
vec.extend_from_slice(data);
|
||||||
.chain(data.iter().cloned())
|
vec
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let gossip_message_id = |message: &GossipsubMessage| {
|
let gossip_message_id = |message: &GossipsubMessage| {
|
||||||
MessageId::from(
|
MessageId::from(
|
||||||
&Sha256::digest(
|
&Sha256::digest(prefix(MESSAGE_DOMAIN_VALID_SNAPPY, &message.data).as_slice())
|
||||||
{
|
[..20],
|
||||||
match &message.data.decompressed {
|
|
||||||
Ok(decompressed) => prefix(MESSAGE_DOMAIN_VALID_SNAPPY, decompressed),
|
|
||||||
_ => prefix(MESSAGE_DOMAIN_INVALID_SNAPPY, &message.data.raw),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.as_slice(),
|
|
||||||
)[..20],
|
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
// gossipsub configuration
|
// gossipsub configuration
|
||||||
// Note: The topics by default are sent as plain strings. Hashes are an optional
|
// Note: The topics by default are sent as plain strings. Hashes are an optional
|
||||||
// parameter.
|
// parameter.
|
||||||
let gs_config = GossipsubConfigBuilder::new()
|
let gs_config = GossipsubConfigBuilder::default()
|
||||||
.max_transmit_size(GOSSIP_MAX_SIZE)
|
.max_transmit_size(GOSSIP_MAX_SIZE)
|
||||||
.heartbeat_interval(Duration::from_millis(700))
|
.heartbeat_interval(Duration::from_millis(700))
|
||||||
.mesh_n(8)
|
.mesh_n(8)
|
||||||
@ -147,6 +139,7 @@ impl Default for Config {
|
|||||||
.gossip_lazy(6)
|
.gossip_lazy(6)
|
||||||
.fanout_ttl(Duration::from_secs(60))
|
.fanout_ttl(Duration::from_secs(60))
|
||||||
.history_length(6)
|
.history_length(6)
|
||||||
|
.max_messages_per_rpc(Some(10))
|
||||||
.history_gossip(3)
|
.history_gossip(3)
|
||||||
.validate_messages() // require validation before propagation
|
.validate_messages() // require validation before propagation
|
||||||
.validation_mode(ValidationMode::Anonymous)
|
.validation_mode(ValidationMode::Anonymous)
|
||||||
|
@ -241,7 +241,7 @@ impl CombinedKeyExt for CombinedKey {
|
|||||||
pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, String> {
|
pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, String> {
|
||||||
// A libp2p peer id byte representation should be 2 length bytes + 4 protobuf bytes + compressed pk bytes
|
// A libp2p peer id byte representation should be 2 length bytes + 4 protobuf bytes + compressed pk bytes
|
||||||
// if generated from a PublicKey with Identity multihash.
|
// if generated from a PublicKey with Identity multihash.
|
||||||
let pk_bytes = &peer_id.as_bytes()[2..];
|
let pk_bytes = &peer_id.to_bytes()[2..];
|
||||||
|
|
||||||
match PublicKey::from_protobuf_encoding(pk_bytes).map_err(|e| {
|
match PublicKey::from_protobuf_encoding(pk_bytes).map_err(|e| {
|
||||||
format!(
|
format!(
|
||||||
|
@ -63,7 +63,6 @@ impl<'de> Deserialize<'de> for PeerIdSerialized {
|
|||||||
pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage, SubnetDiscovery};
|
pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage, SubnetDiscovery};
|
||||||
pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response};
|
pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response};
|
||||||
pub use config::Config as NetworkConfig;
|
pub use config::Config as NetworkConfig;
|
||||||
pub use config::{GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage};
|
|
||||||
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
||||||
pub use discv5;
|
pub use discv5;
|
||||||
pub use libp2p::bandwidth::BandwidthSinks;
|
pub use libp2p::bandwidth::BandwidthSinks;
|
||||||
|
@ -136,7 +136,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
///
|
///
|
||||||
/// Returns true if the peer was accepted into the database.
|
/// Returns true if the peer was accepted into the database.
|
||||||
pub fn dial_peer(&mut self, peer_id: &PeerId) -> bool {
|
pub fn dial_peer(&mut self, peer_id: &PeerId) -> bool {
|
||||||
self.events.push(PeerManagerEvent::Dial(peer_id.clone()));
|
self.events.push(PeerManagerEvent::Dial(*peer_id));
|
||||||
self.connect_peer(peer_id, ConnectingType::Dialing)
|
self.connect_peer(peer_id, ConnectingType::Dialing)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,7 +174,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
{
|
{
|
||||||
// update the state of the peer.
|
// update the state of the peer.
|
||||||
self.events
|
self.events
|
||||||
.push(PeerManagerEvent::DisconnectPeer(peer_id.clone(), reason));
|
.push(PeerManagerEvent::DisconnectPeer(*peer_id, reason));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,7 +282,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
|
|
||||||
/// A STATUS message has been received from a peer. This resets the status timer.
|
/// A STATUS message has been received from a peer. This resets the status timer.
|
||||||
pub fn peer_statusd(&mut self, peer_id: &PeerId) {
|
pub fn peer_statusd(&mut self, peer_id: &PeerId) {
|
||||||
self.status_peers.insert(peer_id.clone());
|
self.status_peers.insert(*peer_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a gossipsub subscription to a peer in the peerdb.
|
/// Adds a gossipsub subscription to a peer in the peerdb.
|
||||||
@ -495,10 +495,10 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
debug!(self.log, "Received a ping request"; "peer_id" => %peer_id, "seq_no" => seq);
|
debug!(self.log, "Received a ping request"; "peer_id" => %peer_id, "seq_no" => seq);
|
||||||
match peer_info.connection_direction {
|
match peer_info.connection_direction {
|
||||||
Some(ConnectionDirection::Incoming) => {
|
Some(ConnectionDirection::Incoming) => {
|
||||||
self.inbound_ping_peers.insert(peer_id.clone());
|
self.inbound_ping_peers.insert(*peer_id);
|
||||||
}
|
}
|
||||||
Some(ConnectionDirection::Outgoing) => {
|
Some(ConnectionDirection::Outgoing) => {
|
||||||
self.outbound_ping_peers.insert(peer_id.clone());
|
self.outbound_ping_peers.insert(*peer_id);
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
warn!(self.log, "Received a ping from a peer with an unknown connection direction"; "peer_id" => %peer_id);
|
warn!(self.log, "Received a ping from a peer with an unknown connection direction"; "peer_id" => %peer_id);
|
||||||
@ -510,15 +510,13 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
if meta_data.seq_number < seq {
|
if meta_data.seq_number < seq {
|
||||||
debug!(self.log, "Requesting new metadata from peer";
|
debug!(self.log, "Requesting new metadata from peer";
|
||||||
"peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "ping_seq_no" => seq);
|
"peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "ping_seq_no" => seq);
|
||||||
self.events
|
self.events.push(PeerManagerEvent::MetaData(*peer_id));
|
||||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// if we don't know the meta-data, request it
|
// if we don't know the meta-data, request it
|
||||||
debug!(self.log, "Requesting first metadata from peer";
|
debug!(self.log, "Requesting first metadata from peer";
|
||||||
"peer_id" => %peer_id);
|
"peer_id" => %peer_id);
|
||||||
self.events
|
self.events.push(PeerManagerEvent::MetaData(*peer_id));
|
||||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
crit!(self.log, "Received a PING from an unknown peer";
|
crit!(self.log, "Received a PING from an unknown peer";
|
||||||
@ -536,15 +534,13 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
if meta_data.seq_number < seq {
|
if meta_data.seq_number < seq {
|
||||||
debug!(self.log, "Requesting new metadata from peer";
|
debug!(self.log, "Requesting new metadata from peer";
|
||||||
"peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "pong_seq_no" => seq);
|
"peer_id" => %peer_id, "known_seq_no" => meta_data.seq_number, "pong_seq_no" => seq);
|
||||||
self.events
|
self.events.push(PeerManagerEvent::MetaData(*peer_id));
|
||||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// if we don't know the meta-data, request it
|
// if we don't know the meta-data, request it
|
||||||
debug!(self.log, "Requesting first metadata from peer";
|
debug!(self.log, "Requesting first metadata from peer";
|
||||||
"peer_id" => %peer_id);
|
"peer_id" => %peer_id);
|
||||||
self.events
|
self.events.push(PeerManagerEvent::MetaData(*peer_id));
|
||||||
.push(PeerManagerEvent::MetaData(peer_id.clone()));
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
crit!(self.log, "Received a PONG from an unknown peer"; "peer_id" => %peer_id);
|
crit!(self.log, "Received a PONG from an unknown peer"; "peer_id" => %peer_id);
|
||||||
@ -677,7 +673,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
&& !peers.is_connected_or_dialing(peer_id)
|
&& !peers.is_connected_or_dialing(peer_id)
|
||||||
&& !peers.is_banned(peer_id)
|
&& !peers.is_banned(peer_id)
|
||||||
{
|
{
|
||||||
Some(peer_id.clone())
|
Some(*peer_id)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -755,18 +751,18 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
ConnectingType::IngoingConnected { multiaddr } => {
|
ConnectingType::IngoingConnected { multiaddr } => {
|
||||||
peerdb.connect_ingoing(peer_id, multiaddr, enr);
|
peerdb.connect_ingoing(peer_id, multiaddr, enr);
|
||||||
// start a timer to ping inbound peers.
|
// start a timer to ping inbound peers.
|
||||||
self.inbound_ping_peers.insert(peer_id.clone());
|
self.inbound_ping_peers.insert(*peer_id);
|
||||||
}
|
}
|
||||||
ConnectingType::OutgoingConnected { multiaddr } => {
|
ConnectingType::OutgoingConnected { multiaddr } => {
|
||||||
peerdb.connect_outgoing(peer_id, multiaddr, enr);
|
peerdb.connect_outgoing(peer_id, multiaddr, enr);
|
||||||
// start a timer for to ping outbound peers.
|
// start a timer for to ping outbound peers.
|
||||||
self.outbound_ping_peers.insert(peer_id.clone());
|
self.outbound_ping_peers.insert(*peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// start a ping and status timer for the peer
|
// start a ping and status timer for the peer
|
||||||
self.status_peers.insert(peer_id.clone());
|
self.status_peers.insert(*peer_id);
|
||||||
|
|
||||||
// increment prometheus metrics
|
// increment prometheus metrics
|
||||||
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
||||||
@ -806,7 +802,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
match info.score_state() {
|
match info.score_state() {
|
||||||
ScoreState::Banned => {
|
ScoreState::Banned => {
|
||||||
debug!(log, "Peer has been banned"; "peer_id" => %peer_id, "score" => %info.score());
|
debug!(log, "Peer has been banned"; "peer_id" => %peer_id, "score" => %info.score());
|
||||||
to_ban_peers.push(peer_id.clone());
|
to_ban_peers.push(*peer_id);
|
||||||
}
|
}
|
||||||
ScoreState::Disconnected => {
|
ScoreState::Disconnected => {
|
||||||
debug!(log, "Peer transitioned to disconnect state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state);
|
debug!(log, "Peer transitioned to disconnect state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state);
|
||||||
@ -815,18 +811,18 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
// Change the state to inform that we are disconnecting the peer.
|
// Change the state to inform that we are disconnecting the peer.
|
||||||
info.disconnecting(false);
|
info.disconnecting(false);
|
||||||
events.push(PeerManagerEvent::DisconnectPeer(
|
events.push(PeerManagerEvent::DisconnectPeer(
|
||||||
peer_id.clone(),
|
*peer_id,
|
||||||
GoodbyeReason::BadScore,
|
GoodbyeReason::BadScore,
|
||||||
));
|
));
|
||||||
} else if info.is_banned() {
|
} else if info.is_banned() {
|
||||||
to_unban_peers.push(peer_id.clone());
|
to_unban_peers.push(*peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ScoreState::Healthy => {
|
ScoreState::Healthy => {
|
||||||
debug!(log, "Peer transitioned to healthy state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state);
|
debug!(log, "Peer transitioned to healthy state"; "peer_id" => %peer_id, "score" => %info.score(), "past_state" => %previous_state);
|
||||||
// unban the peer if it was previously banned.
|
// unban the peer if it was previously banned.
|
||||||
if info.is_banned() {
|
if info.is_banned() {
|
||||||
to_unban_peers.push(peer_id.clone());
|
to_unban_peers.push(*peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -885,7 +881,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
if peer_db.disconnect_and_ban(peer_id) {
|
if peer_db.disconnect_and_ban(peer_id) {
|
||||||
// The peer was currently connected, so we start a disconnection.
|
// The peer was currently connected, so we start a disconnection.
|
||||||
self.events.push(PeerManagerEvent::DisconnectPeer(
|
self.events.push(PeerManagerEvent::DisconnectPeer(
|
||||||
peer_id.clone(),
|
*peer_id,
|
||||||
GoodbyeReason::BadScore,
|
GoodbyeReason::BadScore,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@ -960,7 +956,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
//disconnected in update_peer_scores
|
//disconnected in update_peer_scores
|
||||||
.filter(|(_, info)| info.score_state() == ScoreState::Healthy)
|
.filter(|(_, info)| info.score_state() == ScoreState::Healthy)
|
||||||
{
|
{
|
||||||
disconnecting_peers.push((*peer_id).clone());
|
disconnecting_peers.push(**peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -996,7 +992,7 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
|||||||
loop {
|
loop {
|
||||||
match self.inbound_ping_peers.poll_next_unpin(cx) {
|
match self.inbound_ping_peers.poll_next_unpin(cx) {
|
||||||
Poll::Ready(Some(Ok(peer_id))) => {
|
Poll::Ready(Some(Ok(peer_id))) => {
|
||||||
self.inbound_ping_peers.insert(peer_id.clone());
|
self.inbound_ping_peers.insert(peer_id);
|
||||||
self.events.push(PeerManagerEvent::Ping(peer_id));
|
self.events.push(PeerManagerEvent::Ping(peer_id));
|
||||||
}
|
}
|
||||||
Poll::Ready(Some(Err(e))) => {
|
Poll::Ready(Some(Err(e))) => {
|
||||||
@ -1009,7 +1005,7 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
|||||||
loop {
|
loop {
|
||||||
match self.outbound_ping_peers.poll_next_unpin(cx) {
|
match self.outbound_ping_peers.poll_next_unpin(cx) {
|
||||||
Poll::Ready(Some(Ok(peer_id))) => {
|
Poll::Ready(Some(Ok(peer_id))) => {
|
||||||
self.outbound_ping_peers.insert(peer_id.clone());
|
self.outbound_ping_peers.insert(peer_id);
|
||||||
self.events.push(PeerManagerEvent::Ping(peer_id));
|
self.events.push(PeerManagerEvent::Ping(peer_id));
|
||||||
}
|
}
|
||||||
Poll::Ready(Some(Err(e))) => {
|
Poll::Ready(Some(Err(e))) => {
|
||||||
@ -1024,7 +1020,7 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
|
|||||||
loop {
|
loop {
|
||||||
match self.status_peers.poll_next_unpin(cx) {
|
match self.status_peers.poll_next_unpin(cx) {
|
||||||
Poll::Ready(Some(Ok(peer_id))) => {
|
Poll::Ready(Some(Ok(peer_id))) => {
|
||||||
self.status_peers.insert(peer_id.clone());
|
self.status_peers.insert(peer_id);
|
||||||
self.events.push(PeerManagerEvent::Status(peer_id))
|
self.events.push(PeerManagerEvent::Status(peer_id))
|
||||||
}
|
}
|
||||||
Poll::Ready(Some(Err(e))) => {
|
Poll::Ready(Some(Err(e))) => {
|
||||||
|
@ -322,7 +322,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
|
|
||||||
/// A peer is being dialed.
|
/// A peer is being dialed.
|
||||||
pub fn dialing_peer(&mut self, peer_id: &PeerId, enr: Option<Enr>) {
|
pub fn dialing_peer(&mut self, peer_id: &PeerId, enr: Option<Enr>) {
|
||||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
let info = self.peers.entry(*peer_id).or_default();
|
||||||
info.enr = enr;
|
info.enr = enr;
|
||||||
|
|
||||||
if info.is_disconnected() {
|
if info.is_disconnected() {
|
||||||
@ -341,7 +341,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
|
|
||||||
/// Update min ttl of a peer.
|
/// Update min ttl of a peer.
|
||||||
pub fn update_min_ttl(&mut self, peer_id: &PeerId, min_ttl: Instant) {
|
pub fn update_min_ttl(&mut self, peer_id: &PeerId, min_ttl: Instant) {
|
||||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
let info = self.peers.entry(*peer_id).or_default();
|
||||||
|
|
||||||
// only update if the ttl is longer
|
// only update if the ttl is longer
|
||||||
if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl {
|
if info.min_ttl.is_none() || Some(min_ttl) > info.min_ttl {
|
||||||
@ -382,7 +382,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
enr: Option<Enr>,
|
enr: Option<Enr>,
|
||||||
direction: ConnectionDirection,
|
direction: ConnectionDirection,
|
||||||
) {
|
) {
|
||||||
let info = self.peers.entry(peer_id.clone()).or_default();
|
let info = self.peers.entry(*peer_id).or_default();
|
||||||
info.enr = enr;
|
info.enr = enr;
|
||||||
|
|
||||||
if info.is_disconnected() {
|
if info.is_disconnected() {
|
||||||
@ -459,7 +459,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
// peer's score to be a banned state.
|
// peer's score to be a banned state.
|
||||||
pub fn disconnect_and_ban(&mut self, peer_id: &PeerId) -> bool {
|
pub fn disconnect_and_ban(&mut self, peer_id: &PeerId) -> bool {
|
||||||
let log_ref = &self.log;
|
let log_ref = &self.log;
|
||||||
let info = self.peers.entry(peer_id.clone()).or_insert_with(|| {
|
let info = self.peers.entry(*peer_id).or_insert_with(|| {
|
||||||
warn!(log_ref, "Banning unknown peer";
|
warn!(log_ref, "Banning unknown peer";
|
||||||
"peer_id" => %peer_id);
|
"peer_id" => %peer_id);
|
||||||
PeerInfo::default()
|
PeerInfo::default()
|
||||||
@ -517,7 +517,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
/// If this is called for a banned peer, it will error.
|
/// If this is called for a banned peer, it will error.
|
||||||
pub fn unban(&mut self, peer_id: &PeerId) -> Result<(), &'static str> {
|
pub fn unban(&mut self, peer_id: &PeerId) -> Result<(), &'static str> {
|
||||||
let log_ref = &self.log;
|
let log_ref = &self.log;
|
||||||
let info = self.peers.entry(peer_id.clone()).or_insert_with(|| {
|
let info = self.peers.entry(*peer_id).or_insert_with(|| {
|
||||||
warn!(log_ref, "UnBanning unknown peer";
|
warn!(log_ref, "UnBanning unknown peer";
|
||||||
"peer_id" => %peer_id);
|
"peer_id" => %peer_id);
|
||||||
PeerInfo::default()
|
PeerInfo::default()
|
||||||
@ -557,7 +557,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
{
|
{
|
||||||
self.banned_peers_count
|
self.banned_peers_count
|
||||||
.remove_banned_peer(info.seen_addresses());
|
.remove_banned_peer(info.seen_addresses());
|
||||||
Some(id.clone())
|
Some(*id)
|
||||||
} else {
|
} else {
|
||||||
// If there is no minimum, this is a coding error.
|
// If there is no minimum, this is a coding error.
|
||||||
crit!(
|
crit!(
|
||||||
@ -584,7 +584,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
_ => None,
|
_ => None,
|
||||||
})
|
})
|
||||||
.min_by_key(|(_, since)| *since)
|
.min_by_key(|(_, since)| *since)
|
||||||
.map(|(id, _)| id.clone())
|
.map(|(id, _)| *id)
|
||||||
{
|
{
|
||||||
debug!(self.log, "Removing old disconnected peer"; "peer_id" => %to_drop);
|
debug!(self.log, "Removing old disconnected peer"; "peer_id" => %to_drop);
|
||||||
self.peers.remove(&to_drop);
|
self.peers.remove(&to_drop);
|
||||||
|
@ -190,7 +190,7 @@ where
|
|||||||
debug!(self.log, "Requesting new peer's metadata"; "peer_id" => %peer_id);
|
debug!(self.log, "Requesting new peer's metadata"; "peer_id" => %peer_id);
|
||||||
let rpc_event = RPCSend::Request(RequestId::Behaviour, RPCRequest::MetaData(PhantomData));
|
let rpc_event = RPCSend::Request(RequestId::Behaviour, RPCRequest::MetaData(PhantomData));
|
||||||
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
self.events.push(NetworkBehaviourAction::NotifyHandler {
|
||||||
peer_id: peer_id.clone(),
|
peer_id: *peer_id,
|
||||||
handler: NotifyHandler::Any,
|
handler: NotifyHandler::Any,
|
||||||
event: rpc_event,
|
event: rpc_event,
|
||||||
});
|
});
|
||||||
|
@ -9,7 +9,8 @@ use crate::EnrExt;
|
|||||||
use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource};
|
use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p::core::{
|
use libp2p::core::{
|
||||||
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
|
connection::ConnectionLimits, identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox,
|
||||||
|
transport::Boxed,
|
||||||
};
|
};
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
bandwidth::{BandwidthLogging, BandwidthSinks},
|
bandwidth::{BandwidthLogging, BandwidthSinks},
|
||||||
@ -28,7 +29,7 @@ use types::{ChainSpec, EnrForkId, EthSpec};
|
|||||||
|
|
||||||
pub const NETWORK_KEY_FILENAME: &str = "key";
|
pub const NETWORK_KEY_FILENAME: &str = "key";
|
||||||
/// The maximum simultaneous libp2p connections per peer.
|
/// The maximum simultaneous libp2p connections per peer.
|
||||||
const MAX_CONNECTIONS_PER_PEER: usize = 1;
|
const MAX_CONNECTIONS_PER_PEER: u32 = 1;
|
||||||
/// The filename to store our local metadata.
|
/// The filename to store our local metadata.
|
||||||
pub const METADATA_FILENAME: &str = "metadata";
|
pub const METADATA_FILENAME: &str = "metadata";
|
||||||
|
|
||||||
@ -123,13 +124,20 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
self.0.spawn(f, "libp2p");
|
self.0.spawn(f, "libp2p");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sets up the libp2p connection limits
|
||||||
|
let limits = ConnectionLimits::default()
|
||||||
|
.with_max_pending_incoming(Some(5))
|
||||||
|
.with_max_pending_outgoing(Some(16))
|
||||||
|
.with_max_established_incoming(Some((config.target_peers as f64 * 1.2) as u32))
|
||||||
|
.with_max_established_outgoing(Some((config.target_peers as f64 * 1.2) as u32))
|
||||||
|
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
|
||||||
|
|
||||||
(
|
(
|
||||||
SwarmBuilder::new(transport, behaviour, local_peer_id.clone())
|
SwarmBuilder::new(transport, behaviour, local_peer_id)
|
||||||
.notify_handler_buffer_size(std::num::NonZeroUsize::new(32).expect("Not zero"))
|
.notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero"))
|
||||||
.connection_event_buffer_size(64)
|
.connection_event_buffer_size(64)
|
||||||
.incoming_connection_limit(10)
|
.connection_limits(limits)
|
||||||
.outgoing_connection_limit(config.target_peers * 2)
|
|
||||||
.peer_connection_limit(MAX_CONNECTIONS_PER_PEER)
|
|
||||||
.executor(Box::new(Executor(executor)))
|
.executor(Box::new(Executor(executor)))
|
||||||
.build(),
|
.build(),
|
||||||
bandwidth,
|
bandwidth,
|
||||||
@ -146,7 +154,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
|
match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let mut log_address = listen_multiaddr;
|
let mut log_address = listen_multiaddr;
|
||||||
log_address.push(Protocol::P2p(local_peer_id.clone().into()));
|
log_address.push(Protocol::P2p(local_peer_id.into()));
|
||||||
info!(log, "Listening established"; "address" => %log_address);
|
info!(log, "Listening established"; "address" => %log_address);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
@ -61,7 +61,7 @@ impl<TSpec: EthSpec> NetworkGlobals<TSpec> {
|
|||||||
|
|
||||||
/// Returns the local libp2p PeerID.
|
/// Returns the local libp2p PeerID.
|
||||||
pub fn local_peer_id(&self) -> PeerId {
|
pub fn local_peer_id(&self) -> PeerId {
|
||||||
self.peer_id.read().clone()
|
*self.peer_id.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on.
|
/// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on.
|
||||||
|
@ -13,7 +13,7 @@ pub type EnrBitfield<T: EthSpec> = BitVector<T::SubnetBitfieldLength>;
|
|||||||
pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
|
pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
|
||||||
|
|
||||||
pub use globals::NetworkGlobals;
|
pub use globals::NetworkGlobals;
|
||||||
pub use pubsub::{MessageData, PubsubMessage};
|
pub use pubsub::{PubsubMessage, SnappyTransform};
|
||||||
pub use subnet::SubnetDiscovery;
|
pub use subnet::SubnetDiscovery;
|
||||||
pub use sync_state::SyncState;
|
pub use sync_state::SyncState;
|
||||||
pub use topics::{subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS};
|
pub use topics::{subnet_id_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS};
|
||||||
|
@ -1,44 +1,18 @@
|
|||||||
//! Handles the encoding and decoding of pubsub messages.
|
//! Handles the encoding and decoding of pubsub messages.
|
||||||
|
|
||||||
use crate::config::GOSSIP_MAX_SIZE;
|
|
||||||
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
|
||||||
use crate::TopicHash;
|
use crate::TopicHash;
|
||||||
|
use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage};
|
||||||
use snap::raw::{decompress_len, Decoder, Encoder};
|
use snap::raw::{decompress_len, Decoder, Encoder};
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use std::boxed::Box;
|
use std::boxed::Box;
|
||||||
|
use std::io::{Error, ErrorKind};
|
||||||
use types::SubnetId;
|
use types::SubnetId;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof,
|
Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof,
|
||||||
SignedBeaconBlock, SignedVoluntaryExit,
|
SignedBeaconBlock, SignedVoluntaryExit,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct MessageData {
|
|
||||||
pub raw: Vec<u8>,
|
|
||||||
pub decompressed: Result<Vec<u8>, String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsRef<[u8]> for MessageData {
|
|
||||||
fn as_ref(&self) -> &[u8] {
|
|
||||||
self.raw.as_ref()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<Vec<u8>> for MessageData {
|
|
||||||
fn into(self) -> Vec<u8> {
|
|
||||||
self.raw
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Vec<u8>> for MessageData {
|
|
||||||
fn from(raw: Vec<u8>) -> Self {
|
|
||||||
Self {
|
|
||||||
decompressed: decompress_snappy(raw.as_ref()),
|
|
||||||
raw,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum PubsubMessage<T: EthSpec> {
|
pub enum PubsubMessage<T: EthSpec> {
|
||||||
/// Gossipsub message providing notification of a new block.
|
/// Gossipsub message providing notification of a new block.
|
||||||
@ -55,21 +29,63 @@ pub enum PubsubMessage<T: EthSpec> {
|
|||||||
AttesterSlashing(Box<AttesterSlashing<T>>),
|
AttesterSlashing(Box<AttesterSlashing<T>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decompress_snappy(data: &[u8]) -> Result<Vec<u8>, String> {
|
// Implements the `DataTransform` trait of gossipsub to employ snappy compression
|
||||||
// Exit early if uncompressed data is > GOSSIP_MAX_SIZE
|
pub struct SnappyTransform {
|
||||||
match decompress_len(data) {
|
/// Sets the maximum size we allow gossipsub messages to decompress to.
|
||||||
Ok(n) if n > GOSSIP_MAX_SIZE => {
|
max_size_per_message: usize,
|
||||||
return Err("ssz_snappy decoded data > GOSSIP_MAX_SIZE".into());
|
}
|
||||||
|
|
||||||
|
impl SnappyTransform {
|
||||||
|
pub fn new(max_size_per_message: usize) -> Self {
|
||||||
|
SnappyTransform {
|
||||||
|
max_size_per_message,
|
||||||
}
|
}
|
||||||
Ok(_) => {}
|
}
|
||||||
Err(e) => {
|
}
|
||||||
return Err(format!("{}", e));
|
|
||||||
|
impl DataTransform for SnappyTransform {
|
||||||
|
// Provides the snappy decompression from RawGossipsubMessages
|
||||||
|
fn inbound_transform(
|
||||||
|
&self,
|
||||||
|
raw_message: RawGossipsubMessage,
|
||||||
|
) -> Result<GossipsubMessage, std::io::Error> {
|
||||||
|
// check the length of the raw bytes
|
||||||
|
let len = decompress_len(&raw_message.data)?;
|
||||||
|
if len > self.max_size_per_message {
|
||||||
|
return Err(Error::new(
|
||||||
|
ErrorKind::InvalidData,
|
||||||
|
"ssz_snappy decoded data > GOSSIP_MAX_SIZE",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
};
|
|
||||||
let mut decoder = Decoder::new();
|
let mut decoder = Decoder::new();
|
||||||
match decoder.decompress_vec(data) {
|
let decompressed_data = decoder.decompress_vec(&raw_message.data)?;
|
||||||
Ok(decompressed_data) => Ok(decompressed_data),
|
|
||||||
Err(e) => Err(format!("{}", e)),
|
// Build the GossipsubMessage struct
|
||||||
|
Ok(GossipsubMessage {
|
||||||
|
source: raw_message.source,
|
||||||
|
data: decompressed_data,
|
||||||
|
sequence_number: raw_message.sequence_number,
|
||||||
|
topic: raw_message.topic,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides the snappy compression logic to gossipsub.
|
||||||
|
fn outbound_transform(
|
||||||
|
&self,
|
||||||
|
_topic: &TopicHash,
|
||||||
|
data: Vec<u8>,
|
||||||
|
) -> Result<Vec<u8>, std::io::Error> {
|
||||||
|
// Currently we are not employing topic-based compression. Everything is expected to be
|
||||||
|
// snappy compressed.
|
||||||
|
if data.len() > self.max_size_per_message {
|
||||||
|
return Err(Error::new(
|
||||||
|
ErrorKind::InvalidData,
|
||||||
|
"ssz_snappy Encoded data > GOSSIP_MAX_SIZE",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let mut encoder = Encoder::new();
|
||||||
|
encoder.compress_vec(&data).map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,48 +114,49 @@ impl<T: EthSpec> PubsubMessage<T> {
|
|||||||
/* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will
|
/* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will
|
||||||
* need to be modified.
|
* need to be modified.
|
||||||
*/
|
*/
|
||||||
pub fn decode(topic: &TopicHash, data: &MessageData) -> Result<Self, String> {
|
pub fn decode(topic: &TopicHash, data: &[u8]) -> Result<Self, String> {
|
||||||
match GossipTopic::decode(topic.as_str()) {
|
match GossipTopic::decode(topic.as_str()) {
|
||||||
Err(_) => Err(format!("Unknown gossipsub topic: {:?}", topic)),
|
Err(_) => Err(format!("Unknown gossipsub topic: {:?}", topic)),
|
||||||
Ok(gossip_topic) => {
|
Ok(gossip_topic) => {
|
||||||
let decompressed_data = match gossip_topic.encoding() {
|
// All topics are currently expected to be compressed and decompressed with snappy.
|
||||||
GossipEncoding::SSZSnappy => data.decompressed.as_ref()?.as_slice(),
|
// This is done in the `SnappyTransform` struct.
|
||||||
};
|
// Therefore compression has already been handled for us by the time we are
|
||||||
|
// decoding the objects here.
|
||||||
|
|
||||||
// the ssz decoders
|
// the ssz decoders
|
||||||
match gossip_topic.kind() {
|
match gossip_topic.kind() {
|
||||||
GossipKind::BeaconAggregateAndProof => {
|
GossipKind::BeaconAggregateAndProof => {
|
||||||
let agg_and_proof =
|
let agg_and_proof = SignedAggregateAndProof::from_ssz_bytes(data)
|
||||||
SignedAggregateAndProof::from_ssz_bytes(decompressed_data)
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
|
||||||
Ok(PubsubMessage::AggregateAndProofAttestation(Box::new(
|
Ok(PubsubMessage::AggregateAndProofAttestation(Box::new(
|
||||||
agg_and_proof,
|
agg_and_proof,
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
GossipKind::Attestation(subnet_id) => {
|
GossipKind::Attestation(subnet_id) => {
|
||||||
let attestation = Attestation::from_ssz_bytes(decompressed_data)
|
let attestation =
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
Attestation::from_ssz_bytes(data).map_err(|e| format!("{:?}", e))?;
|
||||||
Ok(PubsubMessage::Attestation(Box::new((
|
Ok(PubsubMessage::Attestation(Box::new((
|
||||||
*subnet_id,
|
*subnet_id,
|
||||||
attestation,
|
attestation,
|
||||||
))))
|
))))
|
||||||
}
|
}
|
||||||
GossipKind::BeaconBlock => {
|
GossipKind::BeaconBlock => {
|
||||||
let beacon_block = SignedBeaconBlock::from_ssz_bytes(decompressed_data)
|
let beacon_block = SignedBeaconBlock::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block)))
|
Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block)))
|
||||||
}
|
}
|
||||||
GossipKind::VoluntaryExit => {
|
GossipKind::VoluntaryExit => {
|
||||||
let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(decompressed_data)
|
let voluntary_exit = SignedVoluntaryExit::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
Ok(PubsubMessage::VoluntaryExit(Box::new(voluntary_exit)))
|
Ok(PubsubMessage::VoluntaryExit(Box::new(voluntary_exit)))
|
||||||
}
|
}
|
||||||
GossipKind::ProposerSlashing => {
|
GossipKind::ProposerSlashing => {
|
||||||
let proposer_slashing = ProposerSlashing::from_ssz_bytes(decompressed_data)
|
let proposer_slashing = ProposerSlashing::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
Ok(PubsubMessage::ProposerSlashing(Box::new(proposer_slashing)))
|
Ok(PubsubMessage::ProposerSlashing(Box::new(proposer_slashing)))
|
||||||
}
|
}
|
||||||
GossipKind::AttesterSlashing => {
|
GossipKind::AttesterSlashing => {
|
||||||
let attester_slashing = AttesterSlashing::from_ssz_bytes(decompressed_data)
|
let attester_slashing = AttesterSlashing::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing)))
|
Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing)))
|
||||||
}
|
}
|
||||||
@ -150,26 +167,18 @@ impl<T: EthSpec> PubsubMessage<T> {
|
|||||||
|
|
||||||
/// Encodes a `PubsubMessage` based on the topic encodings. The first known encoding is used. If
|
/// Encodes a `PubsubMessage` based on the topic encodings. The first known encoding is used. If
|
||||||
/// no encoding is known, and error is returned.
|
/// no encoding is known, and error is returned.
|
||||||
pub fn encode(&self, encoding: GossipEncoding) -> Result<Vec<u8>, String> {
|
pub fn encode(&self, _encoding: GossipEncoding) -> Vec<u8> {
|
||||||
let data = match &self {
|
// Currently do not employ encoding strategies based on the topic. All messages are ssz
|
||||||
|
// encoded.
|
||||||
|
// Also note, that the compression is handled by the `SnappyTransform` struct. Gossipsub will compress the
|
||||||
|
// messages for us.
|
||||||
|
match &self {
|
||||||
PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(),
|
PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(),
|
PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(),
|
PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(),
|
PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(),
|
PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(),
|
||||||
PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(),
|
PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(),
|
||||||
};
|
|
||||||
match encoding {
|
|
||||||
GossipEncoding::SSZSnappy => {
|
|
||||||
let mut encoder = Encoder::new();
|
|
||||||
match encoder.compress_vec(&data) {
|
|
||||||
Ok(compressed) if compressed.len() > GOSSIP_MAX_SIZE => {
|
|
||||||
Err("ssz_snappy Encoded data > GOSSIP_MAX_SIZE".into())
|
|
||||||
}
|
|
||||||
Ok(compressed) => Ok(compressed),
|
|
||||||
Err(e) => Err(format!("{}", e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -200,20 +209,3 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_gossip_max_size() {
|
|
||||||
// Cannot decode more than gossip max size
|
|
||||||
let mut encoder = Encoder::new();
|
|
||||||
let payload = encoder.compress_vec(&[0; GOSSIP_MAX_SIZE + 1]).unwrap();
|
|
||||||
let message_data: MessageData = payload.into();
|
|
||||||
assert_eq!(
|
|
||||||
message_data.decompressed.unwrap_err(),
|
|
||||||
"ssz_snappy decoded data > GOSSIP_MAX_SIZE".to_string()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -3,7 +3,8 @@ use eth2_libp2p::Enr;
|
|||||||
use eth2_libp2p::EnrExt;
|
use eth2_libp2p::EnrExt;
|
||||||
use eth2_libp2p::Multiaddr;
|
use eth2_libp2p::Multiaddr;
|
||||||
use eth2_libp2p::Service as LibP2PService;
|
use eth2_libp2p::Service as LibP2PService;
|
||||||
use eth2_libp2p::{GossipsubConfigBuilder, Libp2pEvent, NetworkConfig};
|
use eth2_libp2p::{Libp2pEvent, NetworkConfig};
|
||||||
|
use libp2p::gossipsub::GossipsubConfigBuilder;
|
||||||
use slog::{debug, error, o, Drain};
|
use slog::{debug, error, o, Drain};
|
||||||
use std::net::{TcpListener, UdpSocket};
|
use std::net::{TcpListener, UdpSocket};
|
||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
|
@ -1389,7 +1389,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
|requested_peer_id: String, network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|requested_peer_id: String, network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
||||||
blocking_json_task(move || {
|
blocking_json_task(move || {
|
||||||
let peer_id = PeerId::from_bytes(
|
let peer_id = PeerId::from_bytes(
|
||||||
bs58::decode(requested_peer_id.as_str())
|
&bs58::decode(requested_peer_id.as_str())
|
||||||
.into_vec()
|
.into_vec()
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
warp_utils::reject::custom_bad_request(format!(
|
warp_utils::reject::custom_bad_request(format!(
|
||||||
|
@ -83,7 +83,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
// Indicate to the `Network` service that this message is valid and can be
|
// Indicate to the `Network` service that this message is valid and can be
|
||||||
// propagated on the gossip network.
|
// propagated on the gossip network.
|
||||||
self.propagate_validation_result(message_id, peer_id.clone(), MessageAcceptance::Accept);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
|
||||||
|
|
||||||
if !should_import {
|
if !should_import {
|
||||||
return;
|
return;
|
||||||
@ -160,7 +160,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
// Indicate to the `Network` service that this message is valid and can be
|
// Indicate to the `Network` service that this message is valid and can be
|
||||||
// propagated on the gossip network.
|
// propagated on the gossip network.
|
||||||
self.propagate_validation_result(message_id, peer_id.clone(), MessageAcceptance::Accept);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL);
|
metrics::inc_counter(&metrics::BEACON_PROCESSOR_AGGREGATED_ATTESTATION_VERIFIED_TOTAL);
|
||||||
|
|
||||||
@ -219,11 +219,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"slot" => verified_block.block.slot(),
|
"slot" => verified_block.block.slot(),
|
||||||
"hash" => %verified_block.block_root
|
"hash" => %verified_block.block_root
|
||||||
);
|
);
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Accept,
|
|
||||||
);
|
|
||||||
verified_block
|
verified_block
|
||||||
}
|
}
|
||||||
Err(BlockError::ParentUnknown(block)) => {
|
Err(BlockError::ParentUnknown(block)) => {
|
||||||
@ -239,7 +235,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
debug!(self.log, "Could not verify block for gossip, ignoring the block";
|
debug!(self.log, "Could not verify block for gossip, ignoring the block";
|
||||||
"error" => %e);
|
"error" => %e);
|
||||||
// Prevent recurring behaviour by penalizing the peer slightly.
|
// Prevent recurring behaviour by penalizing the peer slightly.
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -258,11 +254,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
| Err(e @ BlockError::GenesisBlock) => {
|
| Err(e @ BlockError::GenesisBlock) => {
|
||||||
warn!(self.log, "Could not verify block for gossip, rejecting the block";
|
warn!(self.log, "Could not verify block for gossip, rejecting the block";
|
||||||
"error" => %e);
|
"error" => %e);
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -337,11 +329,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
let exit = match self.chain.verify_voluntary_exit_for_gossip(voluntary_exit) {
|
let exit = match self.chain.verify_voluntary_exit_for_gossip(voluntary_exit) {
|
||||||
Ok(ObservationOutcome::New(exit)) => exit,
|
Ok(ObservationOutcome::New(exit)) => exit,
|
||||||
Ok(ObservationOutcome::AlreadyKnown) => {
|
Ok(ObservationOutcome::AlreadyKnown) => {
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Ignore,
|
|
||||||
);
|
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Dropping exit for already exiting validator";
|
"Dropping exit for already exiting validator";
|
||||||
@ -360,11 +348,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
);
|
);
|
||||||
// These errors occur due to a fault in the beacon chain. It is not necessarily
|
// These errors occur due to a fault in the beacon chain. It is not necessarily
|
||||||
// the fault on the peer.
|
// the fault on the peer.
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Ignore,
|
|
||||||
);
|
|
||||||
// We still penalize a peer slightly to prevent overuse of invalids.
|
// We still penalize a peer slightly to prevent overuse of invalids.
|
||||||
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
||||||
return;
|
return;
|
||||||
@ -416,11 +400,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
"error" => ?e
|
"error" => ?e
|
||||||
);
|
);
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Ignore,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Penalize peer slightly for invalids.
|
// Penalize peer slightly for invalids.
|
||||||
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
||||||
@ -466,11 +446,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
"error" => ?e
|
"error" => ?e
|
||||||
);
|
);
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Ignore,
|
|
||||||
);
|
|
||||||
// Penalize peer slightly for invalids.
|
// Penalize peer slightly for invalids.
|
||||||
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
||||||
return;
|
return;
|
||||||
@ -522,14 +498,10 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
// Peers that are slow or not to spec can spam us with these messages draining our
|
// Peers that are slow or not to spec can spam us with these messages draining our
|
||||||
// bandwidth. We therefore penalize these peers when they do this.
|
// bandwidth. We therefore penalize these peers when they do this.
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
|
|
||||||
// Do not propagate these messages.
|
// Do not propagate these messages.
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Ignore,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
AttnError::InvalidSelectionProof { .. } | AttnError::InvalidSignature => {
|
AttnError::InvalidSelectionProof { .. } | AttnError::InvalidSignature => {
|
||||||
/*
|
/*
|
||||||
@ -537,12 +509,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::EmptyAggregationBitfield => {
|
AttnError::EmptyAggregationBitfield => {
|
||||||
/*
|
/*
|
||||||
@ -552,12 +520,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
* violation of the spec nor indication of fault.
|
* violation of the spec nor indication of fault.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::AggregatorPubkeyUnknown(_) => {
|
AttnError::AggregatorPubkeyUnknown(_) => {
|
||||||
/*
|
/*
|
||||||
@ -573,12 +537,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::AggregatorNotInCommittee { .. } => {
|
AttnError::AggregatorNotInCommittee { .. } => {
|
||||||
/*
|
/*
|
||||||
@ -594,12 +554,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::AttestationAlreadyKnown { .. } => {
|
AttnError::AttestationAlreadyKnown { .. } => {
|
||||||
/*
|
/*
|
||||||
@ -632,10 +588,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"block" => %beacon_block_root,
|
"block" => %beacon_block_root,
|
||||||
"type" => ?attestation_type,
|
"type" => ?attestation_type,
|
||||||
);
|
);
|
||||||
// We still penalize the peer slightly. We don't want this to be a recurring
|
// This is an allowed behaviour.
|
||||||
// behaviour.
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
|
||||||
|
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -646,7 +599,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer is not necessarily faulty.
|
* The peer is not necessarily faulty.
|
||||||
*/
|
*/
|
||||||
trace!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Prior attestation known";
|
"Prior attestation known";
|
||||||
"peer_id" => %peer_id,
|
"peer_id" => %peer_id,
|
||||||
@ -655,7 +608,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
);
|
);
|
||||||
// We still penalize the peer slightly. We don't want this to be a recurring
|
// We still penalize the peer slightly. We don't want this to be a recurring
|
||||||
// behaviour.
|
// behaviour.
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
||||||
|
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
|
|
||||||
@ -668,12 +621,15 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
debug!(
|
||||||
message_id,
|
self.log,
|
||||||
peer_id.clone(),
|
"Validation Index too high";
|
||||||
MessageAcceptance::Reject,
|
"peer_id" => %peer_id,
|
||||||
|
"block" => %beacon_block_root,
|
||||||
|
"type" => ?attestation_type,
|
||||||
);
|
);
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
}
|
}
|
||||||
AttnError::UnknownHeadBlock { beacon_block_root } => {
|
AttnError::UnknownHeadBlock { beacon_block_root } => {
|
||||||
// Note: its a little bit unclear as to whether or not this block is unknown or
|
// Note: its a little bit unclear as to whether or not this block is unknown or
|
||||||
@ -691,10 +647,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
);
|
);
|
||||||
// we don't know the block, get the sync manager to handle the block lookup
|
// we don't know the block, get the sync manager to handle the block lookup
|
||||||
self.sync_tx
|
self.sync_tx
|
||||||
.send(SyncMessage::UnknownBlockHash(
|
.send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root))
|
||||||
peer_id.clone(),
|
|
||||||
*beacon_block_root,
|
|
||||||
))
|
|
||||||
.unwrap_or_else(|_| {
|
.unwrap_or_else(|_| {
|
||||||
warn!(
|
warn!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -722,12 +675,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::BadTargetEpoch => {
|
AttnError::BadTargetEpoch => {
|
||||||
/*
|
/*
|
||||||
@ -736,12 +685,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::NoCommitteeForSlotAndIndex { .. } => {
|
AttnError::NoCommitteeForSlotAndIndex { .. } => {
|
||||||
/*
|
/*
|
||||||
@ -749,12 +694,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::NotExactlyOneAggregationBitSet(_) => {
|
AttnError::NotExactlyOneAggregationBitSet(_) => {
|
||||||
/*
|
/*
|
||||||
@ -762,12 +703,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::AttestsToFutureBlock { .. } => {
|
AttnError::AttestsToFutureBlock { .. } => {
|
||||||
/*
|
/*
|
||||||
@ -775,12 +712,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AttnError::InvalidSubnetId { received, expected } => {
|
AttnError::InvalidSubnetId { received, expected } => {
|
||||||
@ -793,12 +726,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"expected" => ?expected,
|
"expected" => ?expected,
|
||||||
"received" => ?received,
|
"received" => ?received,
|
||||||
);
|
);
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::Invalid(_) => {
|
AttnError::Invalid(_) => {
|
||||||
/*
|
/*
|
||||||
@ -806,12 +735,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::InvalidTargetEpoch { .. } => {
|
AttnError::InvalidTargetEpoch { .. } => {
|
||||||
/*
|
/*
|
||||||
@ -819,12 +744,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::InvalidTargetRoot { .. } => {
|
AttnError::InvalidTargetRoot { .. } => {
|
||||||
/*
|
/*
|
||||||
@ -832,12 +753,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
*
|
*
|
||||||
* The peer has published an invalid consensus message.
|
* The peer has published an invalid consensus message.
|
||||||
*/
|
*/
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::LowToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::TooManySkippedSlots {
|
AttnError::TooManySkippedSlots {
|
||||||
head_block_slot,
|
head_block_slot,
|
||||||
@ -856,12 +773,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
);
|
);
|
||||||
// In this case we wish to penalize gossipsub peers that do this to avoid future
|
// In this case we wish to penalize gossipsub peers that do this to avoid future
|
||||||
// attestations that have too many skip slots.
|
// attestations that have too many skip slots.
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
message_id,
|
self.gossip_penalize_peer(peer_id, PeerAction::MidToleranceError);
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Reject,
|
|
||||||
);
|
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::MidToleranceError);
|
|
||||||
}
|
}
|
||||||
AttnError::BeaconChainError(e) => {
|
AttnError::BeaconChainError(e) => {
|
||||||
/*
|
/*
|
||||||
@ -877,13 +790,9 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"peer_id" => %peer_id,
|
"peer_id" => %peer_id,
|
||||||
"error" => ?e,
|
"error" => ?e,
|
||||||
);
|
);
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
message_id,
|
|
||||||
peer_id.clone(),
|
|
||||||
MessageAcceptance::Ignore,
|
|
||||||
);
|
|
||||||
// Penalize the peer slightly
|
// Penalize the peer slightly
|
||||||
self.gossip_penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
self.gossip_penalize_peer(peer_id, PeerAction::HighToleranceError);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
for root in request.block_roots.iter() {
|
for root in request.block_roots.iter() {
|
||||||
if let Ok(Some(block)) = self.chain.store.get_block(root) {
|
if let Ok(Some(block)) = self.chain.store.get_block(root) {
|
||||||
self.send_response(
|
self.send_response(
|
||||||
peer_id.clone(),
|
peer_id,
|
||||||
Response::BlocksByRoot(Some(Box::new(block))),
|
Response::BlocksByRoot(Some(Box::new(block))),
|
||||||
request_id,
|
request_id,
|
||||||
);
|
);
|
||||||
@ -212,7 +212,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
{
|
{
|
||||||
blocks_sent += 1;
|
blocks_sent += 1;
|
||||||
self.send_network_message(NetworkMessage::SendResponse {
|
self.send_network_message(NetworkMessage::SendResponse {
|
||||||
peer_id: peer_id.clone(),
|
peer_id,
|
||||||
response: Response::BlocksByRange(Some(Box::new(block))),
|
response: Response::BlocksByRange(Some(Box::new(block))),
|
||||||
id: request_id,
|
id: request_id,
|
||||||
});
|
});
|
||||||
|
@ -126,11 +126,8 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
// ignore status responses if we are shutting down
|
// ignore status responses if we are shutting down
|
||||||
if let Ok(status_message) = status_message(&self.chain) {
|
if let Ok(status_message) = status_message(&self.chain) {
|
||||||
// Say status back.
|
// Say status back.
|
||||||
self.network.send_response(
|
self.network
|
||||||
peer_id.clone(),
|
.send_response(peer_id, Response::Status(status_message), request_id);
|
||||||
Response::Status(status_message),
|
|
||||||
request_id,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status))
|
self.send_beacon_processor_work(BeaconWorkEvent::status_message(peer_id, status))
|
||||||
|
@ -713,7 +713,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
// The sent block is not the correct block, remove the head block and downvote
|
// The sent block is not the correct block, remove the head block and downvote
|
||||||
// the peer
|
// the peer
|
||||||
let _ = parent_request.downloaded_blocks.pop();
|
let _ = parent_request.downloaded_blocks.pop();
|
||||||
let peer = parent_request.last_submitted_peer.clone();
|
let peer = parent_request.last_submitted_peer;
|
||||||
|
|
||||||
warn!(self.log, "Peer sent invalid parent.";
|
warn!(self.log, "Peer sent invalid parent.";
|
||||||
"peer_id" => %peer,
|
"peer_id" => %peer,
|
||||||
@ -759,7 +759,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
}
|
}
|
||||||
Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => {
|
Ok(_) | Err(BlockError::BlockIsAlreadyKnown { .. }) => {
|
||||||
let process_id = ProcessId::ParentLookup(
|
let process_id = ProcessId::ParentLookup(
|
||||||
parent_request.last_submitted_peer.clone(),
|
parent_request.last_submitted_peer,
|
||||||
chain_block_hash,
|
chain_block_hash,
|
||||||
);
|
);
|
||||||
let blocks = parent_request.downloaded_blocks;
|
let blocks = parent_request.downloaded_blocks;
|
||||||
@ -852,7 +852,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
|
|
||||||
// We continue to search for the chain of blocks from the same peer. Other peers are not
|
// We continue to search for the chain of blocks from the same peer. Other peers are not
|
||||||
// guaranteed to have this chain of blocks.
|
// guaranteed to have this chain of blocks.
|
||||||
let peer_id = parent_request.last_submitted_peer.clone();
|
let peer_id = parent_request.last_submitted_peer;
|
||||||
|
|
||||||
if let Ok(request_id) = self.network.blocks_by_root_request(peer_id, request) {
|
if let Ok(request_id) = self.network.blocks_by_root_request(peer_id, request) {
|
||||||
// if the request was successful add the queue back into self
|
// if the request was successful add the queue back into self
|
||||||
|
@ -102,11 +102,11 @@ impl<T: EthSpec> BatchInfo<T> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
for attempt in &self.failed_processing_attempts {
|
for attempt in &self.failed_processing_attempts {
|
||||||
peers.insert(attempt.peer_id.clone());
|
peers.insert(attempt.peer_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
for download in &self.failed_download_attempts {
|
for download in &self.failed_download_attempts {
|
||||||
peers.insert(download.clone());
|
peers.insert(*download);
|
||||||
}
|
}
|
||||||
|
|
||||||
peers
|
peers
|
||||||
|
@ -606,7 +606,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
"batch_epoch" => id, "score_adjustment" => %action,
|
"batch_epoch" => id, "score_adjustment" => %action,
|
||||||
"original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id
|
"original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id
|
||||||
);
|
);
|
||||||
network.report_peer(attempt.peer_id.clone(), action);
|
network.report_peer(attempt.peer_id, action);
|
||||||
} else {
|
} else {
|
||||||
// The same peer corrected it's previous mistake. There was an error, so we
|
// The same peer corrected it's previous mistake. There was an error, so we
|
||||||
// negative score the original peer.
|
// negative score the original peer.
|
||||||
@ -615,7 +615,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
"batch_epoch" => id, "score_adjustment" => %action,
|
"batch_epoch" => id, "score_adjustment" => %action,
|
||||||
"original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id
|
"original_peer" => %attempt.peer_id, "new_peer" => %processed_attempt.peer_id
|
||||||
);
|
);
|
||||||
network.report_peer(attempt.peer_id.clone(), action);
|
network.report_peer(attempt.peer_id, action);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -822,11 +822,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
let mut priorized_peers = self
|
let mut priorized_peers = self
|
||||||
.peers
|
.peers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(peer, requests)| (failed_peers.contains(peer), requests.len(), peer))
|
.map(|(peer, requests)| (failed_peers.contains(peer), requests.len(), *peer))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
// Sort peers prioritizing unrelated peers with less active requests.
|
// Sort peers prioritizing unrelated peers with less active requests.
|
||||||
priorized_peers.sort_unstable();
|
priorized_peers.sort_unstable();
|
||||||
priorized_peers.get(0).map(|&(_, _, peer)| peer.clone())
|
priorized_peers.get(0).map(|&(_, _, peer)| peer)
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(peer) = new_peer {
|
if let Some(peer) = new_peer {
|
||||||
@ -846,10 +846,10 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
) -> ProcessingResult {
|
) -> ProcessingResult {
|
||||||
if let Some(batch) = self.batches.get_mut(&batch_id) {
|
if let Some(batch) = self.batches.get_mut(&batch_id) {
|
||||||
let request = batch.to_blocks_by_range_request();
|
let request = batch.to_blocks_by_range_request();
|
||||||
match network.blocks_by_range_request(peer.clone(), request, self.id, batch_id) {
|
match network.blocks_by_range_request(peer, request, self.id, batch_id) {
|
||||||
Ok(request_id) => {
|
Ok(request_id) => {
|
||||||
// inform the batch about the new request
|
// inform the batch about the new request
|
||||||
batch.start_downloading_from_peer(peer.clone(), request_id)?;
|
batch.start_downloading_from_peer(peer, request_id)?;
|
||||||
if self
|
if self
|
||||||
.optimistic_start
|
.optimistic_start
|
||||||
.map(|epoch| epoch == batch_id)
|
.map(|epoch| epoch == batch_id)
|
||||||
@ -879,7 +879,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
warn!(self.log, "Could not send batch request";
|
warn!(self.log, "Could not send batch request";
|
||||||
"batch_id" => batch_id, "error" => e, &batch);
|
"batch_id" => batch_id, "error" => e, &batch);
|
||||||
// register the failed download and check if the batch can be retried
|
// register the failed download and check if the batch can be retried
|
||||||
batch.start_downloading_from_peer(peer.clone(), 1)?; // fake request_id is not relevant
|
batch.start_downloading_from_peer(peer, 1)?; // fake request_id is not relevant
|
||||||
self.peers
|
self.peers
|
||||||
.get_mut(&peer)
|
.get_mut(&peer)
|
||||||
.map(|request| request.remove(&batch_id));
|
.map(|request| request.remove(&batch_id));
|
||||||
@ -922,7 +922,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(peer, requests)| {
|
.filter_map(|(peer, requests)| {
|
||||||
if requests.is_empty() {
|
if requests.is_empty() {
|
||||||
Some(peer.clone())
|
Some(*peer)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user