diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index de889717f..98e97972a 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -49,15 +49,24 @@ jobs: - uses: actions/checkout@v1 - name: Build the root Dockerfile run: docker build . - simulator-ubuntu: + eth1-simulator-ubuntu: runs-on: ubuntu-latest needs: cargo-fmt steps: - uses: actions/checkout@v1 - name: Install ganache-cli run: sudo npm install -g ganache-cli - - name: Run the beacon chain sim - run: cargo run --release --bin simulator beacon-chain-sim + - name: Run the beacon chain sim that starts from an eth1 contract + run: cargo run --release --bin simulator eth1-sim + no-eth1-simulator-ubuntu: + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Install ganache-cli + run: sudo npm install -g ganache-cli + - name: Run the beacon chain sim without an eth1 connection + run: cargo run --release --bin simulator no-eth1-sim check-benchmarks: runs-on: ubuntu-latest needs: cargo-fmt diff --git a/Cargo.lock b/Cargo.lock index 8eab992b6..95a6cd428 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,25 +4,27 @@ name = "account_manager" version = "0.0.1" dependencies = [ - "bls 0.1.0", + "bls 0.2.0", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "deposit_contract 0.1.0", + "clap_utils 0.1.0", + "deposit_contract 0.2.0", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "environment 0.1.0", + "environment 0.2.0", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", - "eth2_testnet_config 0.1.0", + "eth2_testnet_config 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", - "validator_client 0.1.0", - "web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", + "validator_client 0.2.0", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -47,7 +49,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "block-cipher-trait 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -66,21 +68,21 @@ name = "ahash" version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "const-random 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "const-random 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "aho-corasick" -version = "0.7.6" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "amcl" version = "0.2.0" -source = "git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10#52fab41dd086951ade699894b690e95ede1efafd" +source = "git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10#38c6c33925b24c9319a1febfb621ff9bbf6d49f7" dependencies = [ "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -94,19 +96,14 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "anyhow" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "arc-swap" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "arrayref" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -135,8 +132,8 @@ name = "asn1_der_derive" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -144,8 +141,8 @@ name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -161,22 +158,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "backtrace" -version = "0.3.41" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "backtrace-sys" -version = "0.1.32" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -184,7 +181,7 @@ name = "base64" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -193,7 +190,7 @@ name = "base64" version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -201,78 +198,83 @@ name = "base64" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "base64" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "beacon_chain" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "environment 0.1.0", - "eth1 0.1.0", - "eth2_config 0.1.0", + "environment 0.2.0", + "eth1 0.2.0", + "eth2_config 0.2.0", "eth2_hashing 0.1.1", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", "eth2_ssz_types 0.2.0", - "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "genesis 0.1.0", - "integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "genesis 0.2.0", + "integer-sqrt 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lighthouse_metrics 0.1.0", + "lighthouse_metrics 0.2.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "lru 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "merkle_proof 0.1.0", + "merkle_proof 0.2.0", "operation_pool 0.1.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "proto_array_fork_choice 0.1.0", + "proto_array_fork_choice 0.2.0", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "safe_arith 0.1.0", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "slot_clock 0.2.0", - "state_processing 0.1.0", - "store 0.1.0", + "state_processing 0.2.0", + "store 0.2.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", - "websocket_server 0.1.0", + "types 0.2.0", + "websocket_server 0.2.0", ] [[package]] name = "beacon_node" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "beacon_chain 0.1.0", + "beacon_chain 0.2.0", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "client 0.1.0", - "ctrlc 3.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "client 0.2.0", + "ctrlc 3.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "environment 0.1.0", - "eth2-libp2p 0.1.0", - "eth2_config 0.1.0", + "environment 0.2.0", + "eth2-libp2p 0.2.0", + "eth2_config 0.2.0", "eth2_ssz 0.1.2", - "eth2_testnet_config 0.1.0", + "eth2_testnet_config 0.2.0", "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "genesis 0.1.0", - "logging 0.1.0", - "node_test_rig 0.1.0", + "genesis 0.2.0", + "logging 0.2.0", + "node_test_rig 0.2.0", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "store 0.1.0", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "store 0.2.0", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", - "version 0.1.0", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", + "version 0.2.0", ] [[package]] @@ -280,7 +282,7 @@ name = "bigint" version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -289,8 +291,8 @@ name = "bincode" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -305,8 +307,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitvec" -version = "0.15.2" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "radium 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "blake2" @@ -324,7 +330,7 @@ name = "blake2b_simd" version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -336,7 +342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -358,17 +364,17 @@ dependencies = [ [[package]] name = "bls" -version = "0.1.0" +version = "0.2.0" dependencies = [ "eth2_hashing 0.1.1", "eth2_ssz 0.1.2", "eth2_ssz_types 0.2.0", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "milagro_bls 1.0.0 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", + "milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_hex 0.1.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_hex 0.2.0", "tree_hash 0.1.1", ] @@ -379,23 +385,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bs58" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bstr" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bumpalo" -version = "3.1.2" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -415,7 +421,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "byteorder" -version = "1.3.2" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -423,19 +429,11 @@ name = "bytes" version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "c2-chacha" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "c_linked_list" version = "1.1.1" @@ -452,7 +450,7 @@ dependencies = [ "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "quickcheck_macros 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", ] @@ -476,12 +474,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chrono" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -498,6 +496,18 @@ dependencies = [ "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "clap_utils" +version = "0.1.0" +dependencies = [ + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_ssz 0.1.2", + "eth2_testnet_config 0.2.0", + "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", +] + [[package]] name = "clear_on_drop" version = "0.2.3" @@ -508,40 +518,40 @@ dependencies = [ [[package]] name = "client" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "beacon_chain 0.1.0", + "beacon_chain 0.2.0", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "environment 0.1.0", - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", - "eth1 0.1.0", - "eth2-libp2p 0.1.0", - "eth2_config 0.1.0", + "environment 0.2.0", + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", + "eth1 0.2.0", + "eth2-libp2p 0.2.0", + "eth2_config 0.2.0", "eth2_ssz 0.1.2", - "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "genesis 0.1.0", + "genesis 0.2.0", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lighthouse_metrics 0.1.0", - "network 0.1.0", + "lighthouse_metrics 0.2.0", + "network 0.2.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", - "rest_api 0.1.0", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "rest_api 0.2.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "slot_clock 0.2.0", - "store 0.1.0", + "store 0.2.0", + "timer 0.2.0", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "websocket_server 0.1.0", + "websocket_server 0.2.0", ] [[package]] @@ -554,7 +564,7 @@ dependencies = [ [[package]] name = "colored" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -564,14 +574,14 @@ dependencies = [ [[package]] name = "compare_fields" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "compare_fields_derive 0.1.0", + "compare_fields_derive 0.2.0", ] [[package]] name = "compare_fields_derive" -version = "0.1.0" +version = "0.2.0" dependencies = [ "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", @@ -583,25 +593,25 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "const-random" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "const-random-macro 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", + "const-random-macro 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "const-random-macro" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -614,7 +624,7 @@ name = "cookie" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -624,29 +634,29 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "publicsuffix 1.5.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "core-foundation" -version = "0.6.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "core-foundation-sys" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -659,31 +669,31 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "criterion-plot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "csv 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion-plot 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_xoshiro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "oorandom 11.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "plotters 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tinytemplate 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "criterion-plot" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -701,49 +711,44 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-channel 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-channel" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-deque" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-epoch" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-queue" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -752,24 +757,15 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-utils" -version = "0.6.6" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -795,22 +791,22 @@ dependencies = [ [[package]] name = "csv" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bstr 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", - "csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "csv-core" -version = "0.1.6" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -824,10 +820,10 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nix 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nix 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -845,7 +841,7 @@ name = "curve25519-dalek" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "clear_on_drop 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -857,7 +853,7 @@ name = "curve25519-dalek" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "subtle 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -866,7 +862,7 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -876,19 +872,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "deposit_contract" -version = "0.1.0" +version = "0.2.0" dependencies = [ "eth2_ssz 0.1.2", "ethabi 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", ] [[package]] name = "derivative" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", @@ -904,11 +900,21 @@ dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "derive_more" +version = "0.99.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "digest" version = "0.8.1" @@ -932,8 +938,8 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -942,13 +948,13 @@ name = "dns-parser" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "dtoa" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -964,25 +970,25 @@ dependencies = [ [[package]] name = "ef_tests" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "bls 0.1.0", + "bls 0.2.0", "cached_tree_hash 0.1.0", - "compare_fields 0.1.0", + "compare_fields 0.2.0", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_repr 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", - "state_processing 0.1.0", - "swap_or_not_shuffle 0.1.0", + "state_processing 0.2.0", + "swap_or_not_shuffle 0.2.0", "tree_hash 0.1.1", "tree_hash_derive 0.2.0", - "types 0.1.0", + "types 0.2.0", "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1001,20 +1007,21 @@ dependencies = [ [[package]] name = "enr" -version = "0.1.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +version = "0.1.0-alpha.3" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", - "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libsecp256k1 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ed25519-dalek 1.0.0-pre.3 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rlp 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "rlp 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "sha3 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "zeroize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1025,7 +1032,7 @@ dependencies = [ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1037,124 +1044,126 @@ dependencies = [ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "environment" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "beacon_node 0.1.0", + "beacon_node 0.2.0", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ctrlc 3.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ctrlc 3.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_config 0.1.0", - "eth2_testnet_config 0.1.0", + "eth2_config 0.2.0", + "eth2_testnet_config 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "logging 0.1.0", + "logging 0.2.0", "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog-json 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "types 0.2.0", ] [[package]] name = "error-chain" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.41 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.46 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "eth1" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "environment 0.1.0", - "eth1_test_rig 0.1.0", + "environment 0.2.0", + "eth1_test_rig 0.2.0", "eth2_hashing 0.1.1", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", - "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "libflate 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)", - "lighthouse_metrics 0.1.0", - "merkle_proof 0.1.0", + "lighthouse_metrics 0.2.0", + "merkle_proof 0.2.0", "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "state_processing 0.1.0", + "state_processing 0.2.0", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", - "web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "eth1_test_rig" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "deposit_contract 0.1.0", + "deposit_contract 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", - "web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "eth2-libp2p" -version = "0.1.0" +version = "0.2.0" dependencies = [ "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "enr 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", + "eth2_ssz_types 0.2.0", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "hashmap_delay 0.2.0", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "lighthouse_metrics 0.1.0", + "libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "lighthouse_metrics 0.2.0", "lru 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog-stdlog 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "snap 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io-timeout 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "types 0.2.0", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "version 0.1.0", + "version 0.2.0", + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "eth2_config" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", ] [[package]] @@ -1162,24 +1171,24 @@ name = "eth2_hashing" version = "0.1.1" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-test 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-test 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "eth2_interop_keypairs" -version = "0.1.0" +version = "0.2.0" dependencies = [ "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_hashing 0.1.1", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "milagro_bls 1.0.0 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", - "num-bigint 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", + "num-bigint 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1189,7 +1198,7 @@ version = "0.1.2" dependencies = [ "eth2_ssz_derive 0.1.0", "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1205,26 +1214,26 @@ name = "eth2_ssz_types" version = "0.2.0" dependencies = [ "eth2_ssz 0.1.2", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_hex 0.1.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_hex 0.2.0", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", "tree_hash_derive 0.2.0", - "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", + "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "eth2_testnet_config" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "eth2-libp2p 0.1.0", + "eth2-libp2p 0.2.0", "eth2_ssz 0.1.2", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "types 0.2.0", ] [[package]] @@ -1232,12 +1241,26 @@ name = "ethabi" version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ethabi" +version = "9.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1248,8 +1271,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "uint 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1315,21 +1338,21 @@ dependencies = [ [[package]] name = "failure" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.41 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.46 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "failure_derive" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", "synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1343,7 +1366,7 @@ name = "fixed-hash" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1355,7 +1378,7 @@ name = "fixed-hash" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1363,16 +1386,16 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "miniz_oxide 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1423,7 +1446,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1436,31 +1459,31 @@ name = "generic-array" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", + "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "genesis" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "environment 0.1.0", - "eth1 0.1.0", - "eth1_test_rig 0.1.0", + "environment 0.2.0", + "eth1 0.2.0", + "eth1_test_rig 0.2.0", "eth2_hashing 0.1.1", "eth2_ssz 0.1.2", "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "int_to_bytes 0.1.0", - "merkle_proof 0.1.0", + "int_to_bytes 0.2.0", + "merkle_proof 0.2.0", "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "state_processing 0.1.0", + "state_processing 0.2.0", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", ] [[package]] @@ -1470,7 +1493,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "c_linked_list 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "get_if_addrs-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1480,7 +1503,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "gcc 0.3.55 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1489,7 +1512,7 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1498,16 +1521,16 @@ name = "h2" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1524,6 +1547,14 @@ dependencies = [ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "hashmap_delay" +version = "0.2.0" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "heapsize" version = "0.4.2" @@ -1532,20 +1563,12 @@ dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "heck" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "hermit-abi" -version = "0.1.6" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1555,7 +1578,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "hex" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1593,7 +1616,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1630,8 +1653,8 @@ dependencies = [ "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1651,19 +1674,19 @@ dependencies = [ "http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1675,8 +1698,8 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", - "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1686,7 +1709,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1696,7 +1719,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1712,7 +1735,7 @@ name = "impl-codec" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1720,7 +1743,7 @@ name = "impl-rlp" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rlp 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rlp 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1728,7 +1751,7 @@ name = "impl-serde" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1736,20 +1759,20 @@ name = "impl-serde" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "indexmap" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "int_to_bytes" -version = "0.1.0" +version = "0.2.0" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1758,7 +1781,7 @@ dependencies = [ [[package]] name = "integer-sqrt" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1766,12 +1789,12 @@ name = "iovec" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ipnet" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1784,15 +1807,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "js-sys" -version = "0.3.35" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1802,9 +1825,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "jsonrpc-core" +version = "14.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1838,26 +1873,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lcli" -version = "0.1.0" +version = "0.2.0" dependencies = [ "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "deposit_contract 0.1.0", + "clap_utils 0.1.0", + "deposit_contract 0.2.0", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "environment 0.1.0", - "eth1_test_rig 0.1.0", + "environment 0.2.0", + "eth1_test_rig 0.2.0", + "eth2-libp2p 0.2.0", "eth2_ssz 0.1.2", - "eth2_testnet_config 0.1.0", + "eth2_testnet_config 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "genesis 0.1.0", + "genesis 0.2.0", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", - "simple_logger 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "state_processing 0.1.0", + "simple_logger 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "state_processing 0.2.0", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", "web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1868,7 +1905,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "db-key 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "leveldb-sys 2.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1876,12 +1913,12 @@ name = "leveldb-sys" version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libc" -version = "0.2.66" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1898,71 +1935,70 @@ dependencies = [ [[package]] name = "libp2p" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "enr 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-timer 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-core" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "asn1_der 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "ed25519-dalek 1.0.0-pre.3 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libsecp256k1 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1973,50 +2009,50 @@ dependencies = [ [[package]] name = "libp2p-core-derive" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-deflate" version = "0.5.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ - "flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-discv5" version = "0.1.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 4.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "enr 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "enr 0.1.0-alpha.3 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "hkdf 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libsecp256k1 0.3.1 (git+https://github.com/SigP/libsecp256k1?branch=ecdh_generalise)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rlp 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.29 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rlp 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "zeroize 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2024,10 +2060,10 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-dns-unofficial 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2035,61 +2071,61 @@ dependencies = [ [[package]] name = "libp2p-floodsub" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ - "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "cuckoofilter 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-gossipsub" version = "0.1.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "lru 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-identify" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-timer 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2097,24 +2133,24 @@ dependencies = [ [[package]] name = "libp2p-kad" version = "0.13.2" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "uint 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2124,21 +2160,21 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ - "data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "data-encoding 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "dns-parser 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-timer 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2146,35 +2182,35 @@ dependencies = [ [[package]] name = "libp2p-mplex" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-noise" version = "0.11.1" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "curve25519-dalek 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "snow 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "x25519-dalek 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "zeroize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2182,16 +2218,16 @@ dependencies = [ [[package]] name = "libp2p-ping" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-timer 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2199,56 +2235,56 @@ dependencies = [ [[package]] name = "libp2p-plaintext" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-secio" version = "0.13.1" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "aes-ctr 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "ctr 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "twofish 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-bindgen-futures 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-swarm" version = "0.3.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-timer 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2256,57 +2292,57 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "get_if_addrs 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ipnet 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "ipnet 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-uds" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-wasm-ext" version = "0.6.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", "wasm-bindgen-futures 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libp2p-websocket" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "soketto 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-rustls 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2315,42 +2351,28 @@ dependencies = [ [[package]] name = "libp2p-yamux" version = "0.13.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "yamux 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libsecp256k1" -version = "0.3.1" -source = "git+https://github.com/SigP/libsecp256k1?branch=ecdh_generalise#5858db8d1b280417f5866582df2cd0c63983d928" -dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hmac-drbg 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "subtle 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "libsecp256k1" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "crunchy 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "hmac-drbg 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "subtle 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)", + "typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2359,34 +2381,35 @@ version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "lighthouse" -version = "0.1.1" +version = "0.2.0" dependencies = [ "account_manager 0.0.1", - "beacon_node 0.1.0", + "beacon_node 0.2.0", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap_utils 0.1.0", "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "environment 0.1.0", + "environment 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "logging 0.1.0", + "logging 0.2.0", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", - "validator_client 0.1.0", + "types 0.2.0", + "validator_client 0.2.0", ] [[package]] name = "lighthouse_metrics" -version = "0.1.0" +version = "0.2.0" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2402,7 +2425,7 @@ name = "lock_api" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "owning_ref 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2411,15 +2434,15 @@ name = "lock_api" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "lock_api" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2440,12 +2463,12 @@ dependencies = [ [[package]] name = "logging" -version = "0.1.0" +version = "0.2.0" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lighthouse_metrics 0.1.0", + "lighthouse_metrics 0.2.0", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2476,23 +2499,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "memchr" -version = "2.3.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "memoffset" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "merkle_proof" -version = "0.1.0" +version = "0.2.0" dependencies = [ "eth2_hashing 0.1.1", "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2504,14 +2524,13 @@ dependencies = [ [[package]] name = "milagro_bls" -version = "1.0.0" -source = "git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10#52fab41dd086951ade699894b690e95ede1efafd" +version = "1.0.1" +source = "git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10#38c6c33925b24c9319a1febfb621ff9bbf6d49f7" dependencies = [ "amcl 0.2.0 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)", - "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "zeroize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2530,7 +2549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "mime_guess" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2539,7 +2558,7 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2555,7 +2574,7 @@ dependencies = [ "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2580,7 +2599,7 @@ version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2598,30 +2617,30 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.6.1" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "native-tls" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.29 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", - "schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.55 (registry+https://github.com/rust-lang/crates.io-index)", + "schannel 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework-sys 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2631,63 +2650,66 @@ version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "network" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "beacon_chain 0.1.0", - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2-libp2p 0.1.0", + "beacon_chain 0.2.0", + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2-libp2p 0.2.0", "eth2_ssz 0.1.2", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "genesis 0.1.0", + "genesis 0.2.0", + "hashmap_delay 0.2.0", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rlp 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rest_types 0.2.0", + "rlp 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "store 0.1.0", + "slot_clock 0.2.0", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "store 0.2.0", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", ] [[package]] name = "nix" -version = "0.14.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "node_test_rig" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "beacon_node 0.1.0", - "environment 0.1.0", - "eth2_config 0.1.0", + "beacon_node 0.2.0", + "environment 0.2.0", + "eth2_config 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "genesis 0.1.0", - "remote_beacon_node 0.1.0", + "genesis 0.2.0", + "remote_beacon_node 0.2.0", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "types 0.2.0", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "validator_client 0.1.0", + "validator_client 0.2.0", ] [[package]] @@ -2697,21 +2719,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "nohash-hasher" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "nom" -version = "4.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "num-bigint" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2738,13 +2751,18 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.11.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "oorandom" +version = "11.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "opaque-debug" version = "0.2.3" @@ -2752,15 +2770,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "openssl" -version = "0.10.26" +version = "0.10.29" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2770,12 +2788,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "openssl-sys" -version = "0.9.53" +version = "0.9.55" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2786,19 +2804,19 @@ version = "0.1.0" dependencies = [ "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", - "int_to_bytes 0.1.0", + "int_to_bytes 0.2.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "state_processing 0.1.0", - "store 0.1.0", - "types 0.1.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "state_processing 0.2.0", + "store 0.2.0", + "types 0.2.0", ] [[package]] name = "owning_ref" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2810,22 +2828,22 @@ version = "3.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "parity-multiaddr" version = "0.6.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)", + "data-encoding 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)", "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "unsigned-varint 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2833,7 +2851,7 @@ dependencies = [ [[package]] name = "parity-multihash" version = "0.2.0" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "blake2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2846,13 +2864,13 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.1.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "bitvec 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bitvec 0.17.4 (registry+https://github.com/rust-lang/crates.io-index)", "byte-slice-cast 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2884,17 +2902,26 @@ name = "parking_lot" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "lock_api 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parking_lot" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lock_api 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parking_lot_core" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2908,7 +2935,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2923,13 +2950,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parking_lot_core" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "percent-encoding" version = "1.0.1" @@ -2945,6 +2985,17 @@ name = "pkg-config" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "plotters" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ppv-lite86" version = "0.2.6" @@ -2976,13 +3027,8 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.11" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "proc-macro2" @@ -2994,7 +3040,7 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.7" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3015,16 +3061,16 @@ dependencies = [ [[package]] name = "proto_array_fork_choice" -version = "0.1.0" +version = "0.2.0" dependencies = [ "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "types 0.2.0", ] [[package]] @@ -3037,10 +3083,10 @@ name = "publicsuffix" version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3080,18 +3126,23 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rand" version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3101,7 +3152,7 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3114,7 +3165,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3125,7 +3176,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3143,8 +3194,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3160,10 +3211,10 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3217,7 +3268,7 @@ name = "rand_jitter" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3229,21 +3280,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rand_os" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "rand_pcg" version = "0.1.2" @@ -3269,20 +3311,12 @@ dependencies = [ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rand_xoshiro" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "rayon" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3292,11 +3326,11 @@ name = "rayon-core" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3314,54 +3348,53 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "redox_users" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex" -version = "1.3.3" +version = "1.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)", "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex-automata" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex-syntax" -version = "0.6.13" +version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "remote_beacon_node" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "eth2_config 0.1.0", + "eth2_config 0.2.0", "eth2_ssz 0.1.2", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "operation_pool 0.1.0", - "proto_array_fork_choice 0.1.0", + "proto_array_fork_choice 0.2.0", "reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)", - "rest_api 0.1.0", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "rest_types 0.2.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3383,24 +3416,24 @@ dependencies = [ "cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "cookie_store 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", "winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3408,53 +3441,69 @@ dependencies = [ [[package]] name = "rest_api" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "beacon_chain 0.1.0", - "bls 0.1.0", - "eth2-libp2p 0.1.0", - "eth2_config 0.1.0", + "beacon_chain 0.2.0", + "bls 0.2.0", + "eth2-libp2p 0.2.0", + "eth2_config 0.2.0", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", - "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lighthouse_metrics 0.1.0", - "network 0.1.0", - "node_test_rig 0.1.0", + "lighthouse_metrics 0.2.0", + "network 0.2.0", + "node_test_rig 0.2.0", "operation_pool 0.1.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "remote_beacon_node 0.1.0", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "remote_beacon_node 0.2.0", + "rest_types 0.2.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slot_clock 0.2.0", - "state_processing 0.1.0", - "store 0.1.0", + "state_processing 0.2.0", + "store 0.2.0", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "version 0.1.0", + "version 0.2.0", +] + +[[package]] +name = "rest_types" +version = "0.2.0" +dependencies = [ + "bls 0.2.0", + "eth2_hashing 0.1.1", + "eth2_ssz 0.1.2", + "eth2_ssz_derive 0.1.0", + "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "state_processing 0.2.0", + "tree_hash 0.1.1", + "types 0.2.0", ] [[package]] name = "ring" -version = "0.16.9" +version = "0.16.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3465,7 +3514,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rlp" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3473,12 +3522,13 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.5.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3506,24 +3556,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rw-stream-sink" version = "0.1.2" -source = "git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5#49c95c4c4242f1c9f08558a3daac5e9ecac290d5" +source = "git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3#37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ryu" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -3540,12 +3590,12 @@ name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "schannel" -version = "0.1.16" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3569,7 +3619,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "scopeguard" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -3577,27 +3627,29 @@ name = "sct" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "security-framework" -version = "0.3.4" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", + "security-framework-sys 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "security-framework-sys" -version = "0.3.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3620,38 +3672,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde" -version = "1.0.104" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_derive" -version = "1.0.104" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_hex" -version = "0.1.0" +version = "0.2.0" dependencies = [ "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "1.0.44" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3659,9 +3711,9 @@ name = "serde_repr" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3669,9 +3721,9 @@ name = "serde_urlencoded" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3680,9 +3732,9 @@ name = "serde_yaml" version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3727,29 +3779,29 @@ dependencies = [ [[package]] name = "simple_logger" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "colored 1.9.2 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "colored 1.9.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "simulator" -version = "0.1.0" +version = "0.2.0" dependencies = [ "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "eth1_test_rig 0.1.0", + "eth1_test_rig 0.2.0", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "node_test_rig 0.1.0", + "node_test_rig 0.2.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", - "validator_client 0.1.0", + "types 0.2.0", + "validator_client 0.2.0", ] [[package]] @@ -3769,12 +3821,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "slog-async" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3782,9 +3835,9 @@ name = "slog-json" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3793,7 +3846,7 @@ name = "slog-kvfilter" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3802,7 +3855,7 @@ name = "slog-scope" version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "arc-swap 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3831,14 +3884,14 @@ dependencies = [ [[package]] name = "slog-term" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3846,17 +3899,17 @@ name = "sloggers" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "libflate 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog-kvfilter 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog-scope 4.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "slog-stdlog 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "trackable 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3865,9 +3918,9 @@ name = "slot_clock" version = "0.2.0" dependencies = [ "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lighthouse_metrics 0.1.0", + "lighthouse_metrics 0.2.0", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "types 0.2.0", ] [[package]] @@ -3880,7 +3933,12 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.2.0" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "snap" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -3888,9 +3946,9 @@ name = "snow" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "subtle 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3902,7 +3960,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "http 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3910,15 +3968,10 @@ dependencies = [ "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "sourcefile" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "spin" version = "0.5.2" @@ -3931,30 +3984,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "state_processing" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "beacon_chain 0.1.0", - "bls 0.1.0", - "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "beacon_chain 0.2.0", + "bls 0.2.0", + "criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_hashing 0.1.1", "eth2_ssz 0.1.2", "eth2_ssz_types 0.2.0", - "int_to_bytes 0.1.0", - "integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "int_to_bytes 0.2.0", + "integer-sqrt 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "merkle_proof 0.1.0", + "merkle_proof 0.2.0", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "safe_arith 0.1.0", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", - "store 0.1.0", + "store 0.2.0", "tree_hash 0.1.1", "tree_hash_derive 0.2.0", - "types 0.1.0", + "types 0.2.0", ] [[package]] @@ -3969,27 +4022,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "store" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "db-key 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "leveldb 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", - "lighthouse_metrics 0.1.0", + "lighthouse_metrics 0.2.0", "lru 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "state_processing 0.1.0", + "state_processing 0.2.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", ] [[package]] @@ -4025,9 +4078,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "swap_or_not_shuffle" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_hashing 0.1.1", "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4046,11 +4099,11 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.13" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4059,9 +4112,9 @@ name = "synstructure" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4090,7 +4143,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4111,12 +4164,12 @@ name = "termcolor" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "test_random_derive" -version = "0.1.0" +version = "0.2.0" dependencies = [ "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4130,14 +4183,6 @@ dependencies = [ "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "thread_local" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "thread_local" version = "1.0.1" @@ -4148,14 +4193,26 @@ dependencies = [ [[package]] name = "time" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "timer" +version = "0.2.0" +dependencies = [ + "beacon_chain 0.2.0", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slot_clock 0.2.0", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.2.0", +] + [[package]] name = "tiny-keccak" version = "1.5.0" @@ -4169,8 +4226,8 @@ name = "tinytemplate" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4181,19 +4238,19 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4208,12 +4265,12 @@ dependencies = [ [[package]] name = "tokio-codec" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4228,19 +4285,19 @@ dependencies = [ "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-current-thread" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4256,26 +4313,26 @@ dependencies = [ [[package]] name = "tokio-executor" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-fs" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-io" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4290,26 +4347,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-reactor" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4321,13 +4378,13 @@ dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-sync" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4336,31 +4393,31 @@ dependencies = [ [[package]] name = "tokio-tcp" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-threadpool" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4374,13 +4431,13 @@ dependencies = [ [[package]] name = "tokio-timer" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4389,22 +4446,22 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-udp" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4415,37 +4472,37 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tokio-uds" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)", "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "toml" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4453,16 +4510,16 @@ name = "trackable" version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "trackable_derive 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "trackable_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "trackable_derive" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4474,14 +4531,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "tree_hash" version = "0.1.1" dependencies = [ - "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_hashing 0.1.1", "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash_derive 0.2.0", - "types 0.1.0", + "types 0.2.0", ] [[package]] @@ -4511,7 +4568,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "block-cipher-trait 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4522,43 +4579,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "typenum" -version = "1.11.2" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "types" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "bls 0.1.0", + "bls 0.2.0", "cached_tree_hash 0.1.0", - "compare_fields 0.1.0", - "compare_fields_derive 0.1.0", - "criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "derivative 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "compare_fields 0.2.0", + "compare_fields_derive 0.2.0", + "criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "derivative 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "eth2_hashing 0.1.1", - "eth2_interop_keypairs 0.1.0", + "eth2_interop_keypairs 0.2.0", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", "eth2_ssz_types 0.2.0", "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "int_to_bytes 0.1.0", + "int_to_bytes 0.2.0", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "merkle_proof 0.1.0", + "merkle_proof 0.2.0", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "safe_arith 0.1.0", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "swap_or_not_shuffle 0.1.0", + "swap_or_not_shuffle 0.2.0", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "test_random_derive 0.1.0", + "test_random_derive 0.2.0", "tree_hash 0.1.1", "tree_hash_derive 0.2.0", ] @@ -4568,7 +4625,7 @@ name = "uint" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "crunchy 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4579,7 +4636,7 @@ name = "uint" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "crunchy 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4611,17 +4668,12 @@ dependencies = [ [[package]] name = "unicode-normalization" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "unicode-segmentation" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "unicode-width" version = "0.1.7" @@ -4643,7 +4695,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4681,39 +4733,41 @@ dependencies = [ [[package]] name = "validator_client" -version = "0.1.0" +version = "0.2.0" dependencies = [ "bincode 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "bls 0.1.0", + "bls 0.2.0", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "deposit_contract 0.1.0", + "deposit_contract 0.2.0", "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "environment 0.1.0", - "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", - "eth2_config 0.1.0", - "eth2_interop_keypairs 0.1.0", + "environment 0.2.0", + "error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)", + "eth2_config 0.2.0", + "eth2_interop_keypairs 0.2.0", "eth2_ssz 0.1.2", "eth2_ssz_derive 0.1.0", "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", - "logging 0.1.0", + "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)", + "logging 0.2.0", "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "remote_beacon_node 0.1.0", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "remote_beacon_node 0.2.0", + "rest_types 0.2.0", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slot_clock 0.2.0", "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", "tree_hash 0.1.1", - "types 0.1.0", + "types 0.2.0", + "web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4728,7 +4782,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "version" -version = "0.1.0" +version = "0.2.0" dependencies = [ "target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4755,7 +4809,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4775,25 +4829,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wasm-bindgen" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bumpalo 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4803,83 +4857,68 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro-support 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wasm-bindgen-test" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "console_error_panic_hook 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-test-macro 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-futures 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-test-macro 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "wasm-bindgen-webidl" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", - "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4888,23 +4927,20 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", "send_wrapper 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "web-sys" -version = "0.3.35" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", - "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", - "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", - "wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4922,13 +4958,13 @@ dependencies = [ "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4936,11 +4972,39 @@ dependencies = [ ] [[package]] -name = "webpki" -version = "0.21.0" +name = "web3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derive_more 0.99.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 9.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.35 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-tls 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 14.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "url 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "websocket 0.21.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "webpki" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "untrusted 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -4949,7 +5013,7 @@ name = "webpki-roots" version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -4959,15 +5023,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)", - "native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-tls 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4975,27 +5039,18 @@ dependencies = [ [[package]] name = "websocket_server" -version = "0.1.0" +version = "0.2.0" dependencies = [ - "exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", - "types 0.1.0", + "types 0.2.0", "ws 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "weedle" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "winapi" version = "0.2.8" @@ -5022,7 +5077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi-util" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5046,7 +5101,7 @@ name = "ws" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5093,12 +5148,12 @@ dependencies = [ "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "nohash-hasher 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "nohash-hasher 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -5130,12 +5185,11 @@ dependencies = [ "checksum aes-soft 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cfd7e7ae3f9a1fb5c03b389fc6bb9a51400d0c13053f0dca698c832bfd893a0d" "checksum aesni 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f70a6b5f971e473091ab7cfb5ffac6cde81666c4556751d8d5620ead8abf100" "checksum ahash 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" -"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" +"checksum aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" "checksum amcl 0.2.0 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)" = "" "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" -"checksum arc-swap 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d7b8a9123b8027467bce0099fe556c628a53c8d83df0507084c31e9ba2e39aff" -"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" +"checksum arc-swap 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825" +"checksum arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" "checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" "checksum arrayvec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" "checksum asn1_der 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" @@ -5143,99 +5197,99 @@ dependencies = [ "checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" "checksum autocfg 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" "checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" -"checksum backtrace 0.3.41 (registry+https://github.com/rust-lang/crates.io-index)" = "a4ed64ae6d9ebfd9893193c4b2532b1292ec97bd8271c9d7d0fa90cd78a34cba" -"checksum backtrace-sys 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" +"checksum backtrace 0.3.46 (registry+https://github.com/rust-lang/crates.io-index)" = "b1e692897359247cc6bb902933361652380af0f1b7651ae5c5013407f30e109e" +"checksum backtrace-sys 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)" = "78848718ee1255a2485d1309ad9cdecfc2e7d0362dd11c6829364c6b35ae1bc7" "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" "checksum base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" +"checksum base64 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d5ca2cd0adc3f48f9e9ea5a6bbdf9ccc0bfade884847e484d452414c7ccffb3" "checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" "checksum bigint 4.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ebecac13b3c745150d7b6c3ea7572d372f09d627c2077e893bf26c5c7f70d282" "checksum bincode 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" "checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -"checksum bitvec 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a993f74b4c99c1908d156b8d2e0fb6277736b0ecbd833982fd1241d39b2766a6" +"checksum bitvec 0.17.4 (registry+https://github.com/rust-lang/crates.io-index)" = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" "checksum blake2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "94cb07b0da6a73955f8fb85d24c466778e70cda767a568229b104f0264089330" "checksum blake2b_simd 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" "checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" "checksum block-cipher-trait 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1c924d49bd09e7c06003acda26cd9742e796e34282ec6c1189404dee0c1f4774" "checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" "checksum bs58 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c95ee6bba9d950218b6cc910cf62bc9e0a171d0f4537e3627b0f54d08549b188" -"checksum bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b170cd256a3f9fa6b9edae3e44a7dfdfc77e8124dbc3e2612d75f9c3e2396dae" -"checksum bstr 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3ede750122d9d1f87919570cb2cccee38c84fbc8c5599b25c289af40625b7030" -"checksum bumpalo 3.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5fb8038c1ddc0a5f73787b130f4cc75151e96ed33e417fde765eb5a81e3532f4" +"checksum bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" +"checksum bstr 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41" +"checksum bumpalo 3.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" "checksum byte-slice-cast 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" "checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" "checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" -"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" +"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" "checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -"checksum c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" "checksum c_linked_list 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" "checksum cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" "checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" "checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" -"checksum chrono 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01" +"checksum chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2" "checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" "checksum clear_on_drop 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "97276801e127ffb46b66ce23f35cc96bd454fa311294bced4bbace7baa8b1d17" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum colored 1.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8815e2ab78f3a59928fc32e141fbeece88320a240e43f47b2fd64ea3a88a5b3d" +"checksum colored 1.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f4ffc801dacf156c5854b9df4f425a626539c3a6ef7893cc0c5084a23f0b6c59" "checksum console_error_panic_hook 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" -"checksum const-random 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7b641a8c9867e341f3295564203b1c250eb8ce6cb6126e007941f78c4d2ed7fe" -"checksum const-random-macro 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c750ec12b83377637110d5a57f5ae08e895b06c4b16e2bdbf1a94ef717428c59" +"checksum const-random 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "2f1af9ac737b2dd2d577701e59fd09ba34822f6f2ebdb30a7647405d9e55e16a" +"checksum const-random-macro 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "25e4c606eb459dd29f7c57b2e0879f2b6f14ee130918c2b78ccb58a9624e6c7a" "checksum constant_time_eq 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" "checksum cookie 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "888604f00b3db336d2af898ec3c1d5d0ddf5e6d462220f2ededc33a87ac4bbd5" "checksum cookie_store 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46750b3f362965f197996c4448e4a0935e791bf7d6631bfce9ee0af3d24c919c" -"checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d" -"checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" +"checksum core-foundation 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +"checksum core-foundation-sys 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" "checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" -"checksum criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "938703e165481c8d612ea3479ac8342e5615185db37765162e762ec3523e2fc6" -"checksum criterion-plot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eccdc6ce8bbe352ca89025bee672aa6d24f4eb8c53e3a8b5d1bc58011da072a2" +"checksum criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1fc755679c12bda8e5523a71e4d654b6bf2e14bd838dfc48cde6559a05caf7d1" +"checksum criterion-plot 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a01e15e0ea58e8234f96146b1f91fa9d0e4dd7a38da93ff7a75d42c0b9d3a545" "checksum crossbeam 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "bd66663db5a988098a89599d4857919b3acf7f61402e61365acfd3919857b9be" "checksum crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -"checksum crossbeam-channel 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "acec9a3b0b3559f15aee4f90746c4e5e293b701c0f7d3925d24e01645267b68c" -"checksum crossbeam-deque 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" -"checksum crossbeam-epoch 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" -"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" +"checksum crossbeam-channel 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061" +"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" "checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db" -"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" -"checksum crossbeam-utils 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" +"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" "checksum crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a2f4a431c5c9f662e1200b7c7f02c34e91361150e382089a8f2dec3ba680cbda" "checksum crunchy 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" "checksum crypto-mac 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -"checksum csv 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "11f8cbd084b9a431d52dfac0b8428a26b68f1061138a7bea18aa56b9cdf55266" -"checksum csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9b5cadb6b25c77aeff80ba701712494213f4a8418fcda2ee11b6560c3ad0bf4c" +"checksum csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279" +"checksum csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" "checksum ctr 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "022cd691704491df67d25d006fe8eca083098253c4d43516c2206479c58c6736" -"checksum ctrlc 3.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c7dfd2d8b4c82121dfdff120f818e09fc4380b0b7e17a742081a89b94853e87f" +"checksum ctrlc 3.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7a4ba686dff9fa4c1c9636ce1010b0cf98ceb421361b0bb3d6faeec43bd217a7" "checksum cuckoofilter 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" "checksum curve25519-dalek 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" "checksum curve25519-dalek 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" -"checksum data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f47ca1860a761136924ddd2422ba77b2ea54fe8cc75b9040804a0d9d32ad97" +"checksum data-encoding 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11c0346158a19b3627234e15596f5e465c360fcdb97d817bcb255e0510f5a788" "checksum db-key 0.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" -"checksum derivative 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "942ca430eef7a3806595a6737bc388bf51adb888d3fc0dd1b50f1c170167ee3a" +"checksum derivative 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3c6d883546668a3e2011b6a716a7330b82eabb0151b138217f632c8243e17135" "checksum derive_more 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a141330240c921ec6d074a3e188a7c7ef95668bb95e7d44fa0e5778ec2a7afe" +"checksum derive_more 0.99.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" "checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" "checksum dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" "checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" "checksum dns-parser 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" -"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e" +"checksum dtoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3" "checksum ed25519-dalek 1.0.0-pre.3 (registry+https://github.com/rust-lang/crates.io-index)" = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" "checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" "checksum encoding_rs 0.8.22 (registry+https://github.com/rust-lang/crates.io-index)" = "cd8d03faa7fe0c1431609dfad7bbe827af30f82e1e2ae6f7ee4fca6bd764bc28" -"checksum enr 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" +"checksum enr 0.1.0-alpha.3 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" "checksum env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" "checksum env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -"checksum error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3ab49e9dcb602294bc42f9a7dfc9bc6e936fca4418ea300dbfb84fe16de0b7d9" +"checksum error-chain 0.12.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d371106cc88ffdfb1eabd7111e432da544f16f3e2d7bf1dfe8bf575f1df045cd" "checksum ethabi 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97652a7d1f2504d6c885c87e242a06ccef5bd3054093d3fb742d8fb64806231a" "checksum ethabi 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ebdeeea85a6d217b9fcc862906d7e283c047e04114165c433756baf5dce00a6c" +"checksum ethabi 9.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "965126c64662832991f5a748893577630b558e47fa94e7f35aefcd20d737cef7" "checksum ethbloom 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3932e82d64d347a045208924002930dc105a138995ccdc1479d0f05f0359f17c" "checksum ethbloom 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "32cfe1c169414b709cf28aa30c74060bdb830a03a8ba473314d079ac79d80a5f" "checksum ethereum-types 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "62d1bc682337e2c5ec98930853674dd2b4bd5d0d246933a9e98e5280f7c76c5f" "checksum ethereum-types 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba744248e3553a393143d5ebb68939fc3a4ec0c22a269682535f5ffe7fed728c" "checksum exit-future 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d8013f441e38e31c670e7f34ec8f1d5d3a2bd9d303c1ff83976ca886005e8f48" -"checksum failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" -"checksum failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" +"checksum failure 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" +"checksum failure_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" "checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" "checksum fixed-hash 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d1a683d1234507e4f3bf2736eeddf0de1dc65996dc0164d57eba0a74bcf29489" "checksum fixed-hash 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3367952ceb191f4ab95dd5685dc163ac539e36202f9fcfd0cb22f9f9c542fefc" -"checksum flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6bd6d6f4752952feb71363cffc9ebac9411b75b87c6ab6058c40c8900cf43c0f" +"checksum flate2 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" "checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" "checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" @@ -5253,10 +5307,9 @@ dependencies = [ "checksum hashbrown 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1de41fb8dba9714efd92241565cdff73f78508c95697dd56787d3cba27e2353" "checksum hashbrown 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" "checksum heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1679e6ea370dee694f91f1dc469bf94cf8f52051d147aec3e1f9497c6fc22461" -"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" -"checksum hermit-abi 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" +"checksum hermit-abi 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8a0d737e0f947a1864e93d33fdef4af8445a00d1ed8dc0c8ddb73139ea6abf15" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" -"checksum hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "023b39be39e3a2da62a94feb433e91e8bcd37676fbc8bea371daf52b7a769a3e" +"checksum hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" "checksum hkdf 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3fa08a006102488bd9cd5b8013aabe84955cf5ae22e304c2caf655b633aefae3" "checksum hmac 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" "checksum hmac-drbg 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" @@ -5274,14 +5327,15 @@ dependencies = [ "checksum impl-rlp 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8f7a72f11830b52333f36e3b09a288333888bf54380fd0ac0790a3c31ab0f3c5" "checksum impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" "checksum impl-serde 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5bbe9ea9b182f0fb1cabbd61f4ff9b7b7b9197955e95a7e4c27de5055eb29ff8" -"checksum indexmap 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712d7b3ea5827fcb9d4fda14bf4da5f136f0db2ae9c8f4bd4e2d1c6fde4e6db2" -"checksum integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ea155abb3ba6f382a75f1418988c05fe82959ed9ce727de427f9cfd425b0c903" +"checksum indexmap 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" +"checksum integer-sqrt 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" "checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -"checksum ipnet 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f4b06b21db0228860c8dfd17d2106c49c7c6bd07477a4036985347d84def04" +"checksum ipnet 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" "checksum itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" -"checksum js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" +"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +"checksum js-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" "checksum jsonrpc-core 11.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97b83fdc5e0218128d0d270f2f2e7a5ea716f3240c8518a58bc89e6716ba8581" +"checksum jsonrpc-core 14.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "25525f6002338fb4debb5167a89a0b47f727a5a48418417545ad3429758b7fec" "checksum keccak 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" @@ -5289,90 +5343,92 @@ dependencies = [ "checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" "checksum leveldb 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8438a36a31c982ac399c4477d7e3c62cc7a6bf91bb6f42837b7e1033359fcbad" "checksum leveldb-sys 2.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "71f46429bb70612c3e939aaeed27ffd31a24a773d21728a1a426e4089d6778d2" -"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +"checksum libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)" = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005" "checksum libflate 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)" = "d9135df43b1f5d0e333385cb6e7897ecd1a43d7d11b91ac003f4d2c2d2401fdd" -"checksum libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum libsecp256k1 0.3.1 (git+https://github.com/SigP/libsecp256k1?branch=ecdh_generalise)" = "" -"checksum libsecp256k1 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "df6edf84fd62aad1c93932b39324eaeda3912c1d26bc18dfaee6293848e49a50" +"checksum libp2p 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-core 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-core-derive 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-deflate 0.5.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-discv5 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-dns 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-floodsub 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-gossipsub 0.1.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-identify 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-kad 0.13.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-mdns 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-mplex 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-noise 0.11.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-ping 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-plaintext 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-secio 0.13.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-swarm 0.3.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-tcp 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-uds 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-wasm-ext 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-websocket 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libp2p-yamux 0.13.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum libsecp256k1 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" "checksum libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" "checksum linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" "checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" "checksum lock_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed946d4529956a20f2d63ebe1b69996d5a2137c91913fe3ebbeff957f5bca7ff" -"checksum lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" +"checksum lock_api 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" "checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" "checksum lru 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "5d8f669d42c72d18514dfca8115689c5f6370a17d980cb5bd777a67f404594c8" "checksum lru 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" "checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" -"checksum memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223" -"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" -"checksum milagro_bls 1.0.0 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)" = "" +"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +"checksum memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" +"checksum milagro_bls 1.0.1 (git+https://github.com/sigp/milagro_bls?branch=eth2.0-v0.10)" = "" "checksum mime 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" "checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -"checksum mime_guess 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599" -"checksum miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6f3f74f726ae935c3f514300cc6773a0c9492abc5e972d42ba0c0ebb88757625" +"checksum mime_guess 2.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +"checksum miniz_oxide 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5" "checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f" "checksum mio-extras 2.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" "checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum native-tls 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e" +"checksum multistream-select 0.6.1 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum native-tls 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" -"checksum nix 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce" +"checksum nix 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363" "checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" -"checksum nohash-hasher 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4e657a6ec97f9a3ba46f6f7034ea6db9fcd5b71d25ef1074b7bc03da49be0e8e" -"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" -"checksum num-bigint 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f115de20ad793e857f76da2563ff4a09fbcfd6fe93cca0c5d996ab5f3ee38d" +"checksum nohash-hasher 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "721a2bf1c26159ebf17e0a980bc4ce61f4b2fec5ec3b42d42fddd7a84a9e538f" +"checksum num-bigint 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" "checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" "checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" -"checksum num_cpus 1.11.1 (registry+https://github.com/rust-lang/crates.io-index)" = "76dac5ed2a876980778b8b85f75a71b6cbf0db0b1232ee12f826bccb00d09d72" +"checksum num_cpus 1.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +"checksum oorandom 11.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ebcec7c9c2a95cacc7cd0ecb89d8a8454eca13906f6deb55258ffff0adeb9405" "checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" -"checksum openssl 0.10.26 (registry+https://github.com/rust-lang/crates.io-index)" = "3a3cc5799d98e1088141b8e01ff760112bbd9f19d850c124500566ca6901a585" +"checksum openssl 0.10.29 (registry+https://github.com/rust-lang/crates.io-index)" = "cee6d85f4cb4c4f59a6a85d5b68a233d280c82e29e822913b9c8b129fbf20bdd" "checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -"checksum openssl-sys 0.9.53 (registry+https://github.com/rust-lang/crates.io-index)" = "465d16ae7fc0e313318f7de5cecf57b2fbe7511fd213978b457e1c96ff46736f" -"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" +"checksum openssl-sys 0.9.55 (registry+https://github.com/rust-lang/crates.io-index)" = "7717097d810a0f2e2323f9e5d11e71608355e24828410b55b9d4f18aa5f9a5d8" +"checksum owning_ref 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" "checksum parity-codec 3.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "2b9df1283109f542d8852cd6b30e9341acc2137481eb6157d2e62af68b0afec9" -"checksum parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f747c06d9f3b2ad387ac881b9667298c81b1243aa9833f086e05996937c35507" +"checksum parity-multiaddr 0.6.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum parity-multihash 0.2.0 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum parity-scale-codec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "329c8f7f4244ddb5c37c103641027a76c530e65e8e4b8240b29f81ea40508b17" "checksum parity-send-wrapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" +"checksum parking_lot 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" "checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" "checksum parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fa7767817701cce701d5585b9c4db3cdd02086398322c1d7e8bf5094a96a2ce7" "checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" "checksum parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" "checksum parking_lot_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cb88cb1cb3790baa6776844f968fea3be44956cf184fa1be5a03341f5491278c" "checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +"checksum parking_lot_core 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" "checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" "checksum pkg-config 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" +"checksum plotters 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "4e3bb8da247d27ae212529352020f3e5ee16e83c0c258061d27b08ab92675eeb" "checksum ppv-lite86 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" "checksum primitive-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2288eb2a39386c4bc817974cc413afe173010dc80e470fcb1e9a35580869f024" "checksum primitive-types 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e4336f4f5d5524fa60bcbd6fe626f9223d8142a50e7053e979acdf0da41ab975" -"checksum proc-macro-hack 0.5.11 (registry+https://github.com/rust-lang/crates.io-index)" = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" +"checksum proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" "checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -"checksum proc-macro2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "0319972dcae462681daf4da1adeeaa066e3ebd29c69be96c6abb1259d2ee2bcc" +"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" "checksum prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1" "checksum protobuf 2.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40361836defdd5871ff7e84096c6f6444af7fc157f8ef1789f54f147687caa20" "checksum publicsuffix 1.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3bbaa49075179162b49acac1c6aa45fb4dafb5f13cf6794276d77bc7fd95757b" @@ -5380,14 +5436,15 @@ dependencies = [ "checksum quickcheck 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" "checksum quickcheck_macros 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d7dfc1c4a1e048f5cc7d36a4c4118dfcf31d217c79f4b9a61bad65d68185752c" "checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" +"checksum radium 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" "checksum rand 0.3.23 (registry+https://github.com/rust-lang/crates.io-index)" = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" "checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" "checksum rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" "checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" "checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" "checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" "checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" @@ -5396,47 +5453,45 @@ dependencies = [ "checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" "checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" "checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rand_os 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a788ae3edb696cfcba1c19bfd388cc4b8c21f8a408432b199c072825084da58a" "checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" "checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" "checksum rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77d416b86801d23dde1aa643023b775c3a462efc0ed96443add11546cdf1dca8" -"checksum rand_xoshiro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e18c91676f670f6f0312764c759405f13afb98d5d73819840cf72a518487bff" "checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098" "checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9" "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecedbca3bf205f8d8f5c2b44d83cd0690e39ee84b951ed649e9f1841132b66d" -"checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87" -"checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" -"checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" +"checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +"checksum regex 1.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" +"checksum regex-automata 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" "checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" "checksum reqwest 0.9.24 (registry+https://github.com/rust-lang/crates.io-index)" = "f88643aea3c1343c804950d7bf983bd2067f5ab59db6d613a08e05572f2714ab" -"checksum ring 0.16.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6747f8da1f2b1fabbee1aaa4eb8a11abf9adef0bf58a41cee45db5d59cecdfac" +"checksum ring 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" "checksum rle-decode-fast 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac" -"checksum rlp 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3a44d5ae8afcb238af8b75640907edc6c931efcfab2c854e81ed35fa080f84cd" -"checksum rust-argon2 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ca4eaef519b494d1f2848fc602d18816fed808a981aedf4f1f00ceb7c9d32cf" +"checksum rlp 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4a7d3f9bed94764eac15b8f14af59fac420c236adaff743b7bcc88e265cb4345" +"checksum rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" "checksum rustc-demangle 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" "checksum rustc-hex 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum rustls 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -"checksum rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p/?rev=49c95c4c4242f1c9f08558a3daac5e9ecac290d5)" = "" -"checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" +"checksum rw-stream-sink 0.1.2 (git+https://github.com/SigP/rust-libp2p?rev=37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3)" = "" +"checksum ryu 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" "checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" "checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -"checksum schannel 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "87f550b06b6cba9c8b8be3ee73f391990116bf527450d2556e9b9ce263b9a021" +"checksum schannel 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" "checksum scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" "checksum scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" "checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" -"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" "checksum sct 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -"checksum security-framework 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8ef2429d7cefe5fd28bd1d2ed41c944547d4ff84776f5935b456da44593a16df" -"checksum security-framework-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e31493fc37615debb8c5090a7aeb4a9730bc61e77ab10b9af59f1a202284f895" +"checksum security-framework 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "572dfa3a0785509e7a44b5b4bebcf94d41ba34e9ed9eb9df722545c3b3c4144a" +"checksum security-framework-sys 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8ddb15a5fec93b7021b8a9e96009c5d8d51c15673569f7c0f6b7204e5b7b404f" "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" "checksum send_wrapper 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" -"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" -"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" -"checksum serde_json 1.0.44 (registry+https://github.com/rust-lang/crates.io-index)" = "48c575e0cc52bdd09b47f330f646cf59afc586e9c4e3ccd6fc1f625b8ea1dad7" +"checksum serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399" +"checksum serde_derive 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)" = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" +"checksum serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)" = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9" "checksum serde_repr 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "cd02c7587ec314570041b2754829f84d873ced14a96d1fd1823531e11db40573" "checksum serde_urlencoded 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "642dd69105886af2efd227f75a520ec9b44a820d65bc133a9131f7d229fd165a" "checksum serde_yaml 0.8.11 (registry+https://github.com/rust-lang/crates.io-index)" = "691b17f19fc1ec9d94ec0b5864859290dff279dbd7b03f017afda54eb36c3c35" @@ -5444,23 +5499,23 @@ dependencies = [ "checksum sha1 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" "checksum sha2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" "checksum sha3 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" -"checksum simple_logger 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "109facdf91db4b79de557313b5e031f0f8a86373e316bf01158190aa68bcc74e" +"checksum simple_logger 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fea0c4611f32f4c2bac73754f22dca1f57e6c1945e0590dae4e5f2a077b92367" "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" "checksum slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1cc9c640a4adbfbcc11ffb95efe5aa7af7309e002adab54b185507dbf2377b99" -"checksum slog-async 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e544d16c6b230d84c866662fe55e31aacfca6ae71e6fc49ae9a311cb379bfc2f" +"checksum slog-async 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b3336ce47ce2f96673499fc07eb85e3472727b9a7a2959964b002c2ce8fbbb" "checksum slog-json 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ddc0d2aff1f8f325ef660d9a0eb6e6dcd20b30b3f581a5897f58bf42d061c37a" "checksum slog-kvfilter 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ae939ed7d169eed9699f4f5cd440f046f5dc5dfc27c19e3cd311619594c175e0" "checksum slog-scope 4.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c44c89dd8b0ae4537d1ae318353eaf7840b4869c536e31c41e963d1ea523ee6" "checksum slog-stdlog 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f1c469573d1e3f36f9eee66cd132206caf47b50c94b1f6c6e7b4d8235e9ecf01" "checksum slog-stdlog 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "be4d87903baf655da2d82bc3ac3f7ef43868c58bf712b3a661fda72009304c23" -"checksum slog-term 2.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "54b50e85b73c2bd42ceb97b6ded235576d405bd1e974242ccfe634fa269f6da7" +"checksum slog-term 2.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "124501187c410b6a46fe8a47a48435ae462fae4e02d03c558d358f40b17308cb" "checksum sloggers 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d41aa58f9a02e205e21117ffa08e94c37f06e1f1009be2639b621f351a75796d" "checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" -"checksum smallvec 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" +"checksum smallvec 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05720e22615919e4734f6a99ceae50d00226c3c5aca406e102ebc33298214e0a" +"checksum snap 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fb9b0bb877b35a1cc1474a3b43d9c226a2625311760cdda2cbccbc0c7a8376" "checksum snow 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "afb767eee7d257ba202f0b9b08673bc13b22281632ef45267b19f13100accd2f" "checksum soketto 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bceb1a3a15232d013d9a3b7cac9e5ce8e2313f348f01d4bc1097e5e53aa07095" -"checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" "checksum spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" "checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" "checksum static_assertions 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c19be23126415861cb3a23e501d34a708f7f9b2183c5252d690941c2e69199d5" @@ -5471,7 +5526,7 @@ dependencies = [ "checksum subtle 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" "checksum subtle 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" "checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -"checksum syn 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1e4ff033220a41d1a57d8125eab57bf5263783dfdcc18688b1dacc6ce9651ef8" +"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" "checksum synstructure 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" "checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" "checksum target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" @@ -5480,48 +5535,46 @@ dependencies = [ "checksum term 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" "checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" "checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" "checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +"checksum time 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" "checksum tiny-keccak 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" "checksum tinytemplate 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "57a3c6667d3e65eb1bc3aed6fd14011c6cbc3a0665218ab7f5daf040b9ec371a" "checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" "checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -"checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" +"checksum tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" "checksum tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71" -"checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" +"checksum tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" "checksum tokio-dns-unofficial 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "82c65483db54eb91b4ef3a9389a3364558590faf30ce473141707c0e16fda975" -"checksum tokio-executor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "ca6df436c42b0c3330a82d855d2ef017cd793090ad550a6bc2184f4b933532ab" -"checksum tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe6dc22b08d6993916647d108a1a7d15b9cd29c4f4496c62b92c45b5041b7af" -"checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" +"checksum tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" +"checksum tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" +"checksum tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" "checksum tokio-io-timeout 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "135ce81f15cfd7982fac684f9057a1299eebeb79e98a8a709969b9aa51123129" -"checksum tokio-reactor 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6732fe6b53c8d11178dcb77ac6d9682af27fc6d4cb87789449152e5377377146" +"checksum tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" "checksum tokio-rustls 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2d7cf08f990090abd6c6a73cab46fed62f85e8aef8b99e4b918a9f4a637f0676" -"checksum tokio-sync 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "d06554cce1ae4a50f42fba8023918afa931413aded705b560e29600ccf7c6d76" -"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" -"checksum tokio-threadpool 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c32ffea4827978e9aa392d2f743d973c1dfa3730a2ed3f22ce1e6984da848c" +"checksum tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" +"checksum tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" +"checksum tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" "checksum tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6131e780037787ff1b3f8aad9da83bca02438b72277850dd6ad0d455e0e20efc" -"checksum tokio-timer 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1739638e364e558128461fc1ad84d997702c8e31c2e6b18fb99842268199e827" +"checksum tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" "checksum tokio-tls 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" -"checksum tokio-udp 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f02298505547f73e60f568359ef0d016d5acd6e830ab9bc7c4a5b3403440121b" +"checksum tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" "checksum tokio-uds 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "65ae5d255ce739e8537221ed2942e0445f4b3b813daebac1c0050ddaaa3587f9" -"checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" -"checksum toml 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "01d1404644c8b12b16bfcffa4322403a91a451584daaaa7c28d3152e6cbc98cf" +"checksum tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798" +"checksum toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" "checksum trackable 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)" = "11475c3c53b075360eac9794965822cb053996046545f91cf61d90e00b72efa5" -"checksum trackable_derive 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0f4062d54dd240bde289717d6b4af18048c3dd552f01a0fd93824f5fc4d2d084" +"checksum trackable_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "edcf0b9b2caa5f4804ef77aeee1b929629853d806117c48258f402b69737e65c" "checksum traitobject 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" "checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" "checksum try_from 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "283d3b89e1368717881a9d51dad843cc435380d8109c9e47d38780a324698d8b" "checksum twofish 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712d261e83e727c8e2dbb75dacac67c36e35db36a958ee504f2164fc052434e1" "checksum typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" -"checksum typenum 1.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" +"checksum typenum 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" "checksum uint 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2143cded94692b156c356508d92888acc824db5bffc0b4089732264c6fcf86d4" "checksum uint 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d" "checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" "checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b561e267b2326bb4cebfc0ef9e68355c7abe6c6f522aeac2f5bf95d56c59bdcf" -"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +"checksum unicode-normalization 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" "checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" @@ -5538,28 +5591,27 @@ dependencies = [ "checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" "checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" "checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -"checksum wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" -"checksum wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" +"checksum wasm-bindgen 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" +"checksum wasm-bindgen-backend 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" "checksum wasm-bindgen-futures 0.3.27 (registry+https://github.com/rust-lang/crates.io-index)" = "83420b37346c311b9ed822af41ec2e82839bfe99867ec6c54e2da43b7538771c" -"checksum wasm-bindgen-futures 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8bbdd49e3e28b40dec6a9ba8d17798245ce32b019513a845369c641b275135d9" -"checksum wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" -"checksum wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" -"checksum wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" -"checksum wasm-bindgen-test 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "98fd0ec352c44d1726b6c2bec524612b1c81e34a7d858f597a6c71f8e018c82e" -"checksum wasm-bindgen-test-macro 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "97837a6e83ab24a4b3a38d44a257e13335b54f4b4548b2c9d71edd0bf570cb4f" -"checksum wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" +"checksum wasm-bindgen-futures 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" +"checksum wasm-bindgen-macro 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" +"checksum wasm-bindgen-macro-support 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" +"checksum wasm-bindgen-shared 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" +"checksum wasm-bindgen-test 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)" = "648da3460c6d2aa04b715a936329e2e311180efe650b2127d6267f4193ccac14" +"checksum wasm-bindgen-test-macro 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)" = "cf2f86cd78a2aa7b1fb4bb6ed854eccb7f9263089c79542dca1576a1518a8467" "checksum wasm-timer 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aa3e01d234bb71760e685cfafa5e2c96f8ad877c161a721646356651069e26ac" -"checksum web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" +"checksum web-sys 0.3.37 (registry+https://github.com/rust-lang/crates.io-index)" = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" +"checksum web3 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a0631c83208cf420eeb2ed9b6cb2d5fc853aa76a43619ccec2a3d52d741f1261" "checksum web3 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "076f34ed252d74a8521e3b013254b1a39f94a98f23aae7cfc85cda6e7b395664" -"checksum webpki 0.21.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d7e664e770ac0110e2384769bcc59ed19e329d81f555916a6e072714957b81b4" +"checksum webpki 0.21.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1f50e1972865d6b1adb54167d1c8ed48606004c2c9d0ea5f1eeb34d95e863ef" "checksum webpki-roots 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" "checksum websocket 0.21.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c9faed2bff8af2ea6b9f8b917d3d00b467583f6781fe3def174a9e33c879703" -"checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" +"checksum winapi-util 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum winreg 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" "checksum ws 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a2c47b5798ccc774ffb93ff536aec7c4275d722fd9c740c83cdd1af1f2d94" diff --git a/Cargo.toml b/Cargo.toml index 9d25d3dfc..453577d23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "eth2/state_processing", "eth2/types", "eth2/utils/bls", + "eth2/utils/clap_utils", "eth2/utils/compare_fields", "eth2/utils/compare_fields_derive", "eth2/utils/deposit_contract", @@ -13,12 +14,14 @@ members = [ "eth2/utils/eth2_testnet_config", "eth2/utils/logging", "eth2/utils/eth2_hashing", + "eth2/utils/hashmap_delay", "eth2/utils/lighthouse_metrics", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/safe_arith", "eth2/utils/serde_hex", "eth2/utils/slot_clock", + "eth2/utils/rest_types", "eth2/utils/ssz", "eth2/utils/ssz_derive", "eth2/utils/ssz_types", @@ -28,14 +31,15 @@ members = [ "eth2/utils/tree_hash_derive", "eth2/utils/test_random_derive", "beacon_node", - "beacon_node/store", - "beacon_node/client", - "beacon_node/rest_api", - "beacon_node/network", - "beacon_node/eth2-libp2p", - "beacon_node/version", - "beacon_node/eth1", "beacon_node/beacon_chain", + "beacon_node/client", + "beacon_node/eth1", + "beacon_node/eth2-libp2p", + "beacon_node/network", + "beacon_node/rest_api", + "beacon_node/store", + "beacon_node/timer", + "beacon_node/version", "beacon_node/websocket_server", "tests/simulator", "tests/ef_tests", diff --git a/Makefile b/Makefile index 7462ee56e..bebf7f826 100644 --- a/Makefile +++ b/Makefile @@ -2,12 +2,16 @@ EF_TESTS = "tests/ef_tests" -# Builds the entire workspace in release (optimized). +# Builds the Lighthouse binary in release (optimized). # # Binaries will most likely be found in `./target/release` install: cargo install --path lighthouse --force --locked +# Builds the lcli binary in release (optimized). +install-lcli: + cargo install --path lcli --force --locked + # Runs the full workspace tests in **release**, without downloading any additional # test vectors. test-release: diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index ad91ad4ef..9588767fa 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -24,5 +24,7 @@ hex = "0.3" validator_client = { path = "../validator_client" } rayon = "1.2.0" eth2_testnet_config = { path = "../eth2/utils/eth2_testnet_config" } -web3 = "0.8.0" +web3 = "0.10.0" futures = "0.1.25" +clap_utils = { path = "../eth2/utils/clap_utils" } +tokio = "0.1.22" diff --git a/account_manager/src/cli.rs b/account_manager/src/cli.rs index 07685fb70..ad84e0bd1 100644 --- a/account_manager/src/cli.rs +++ b/account_manager/src/cli.rs @@ -1,3 +1,4 @@ +use crate::deposits; use clap::{App, Arg, SubCommand}; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { @@ -7,6 +8,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .subcommand( SubCommand::with_name("validator") .about("Generate or manage Etheruem 2.0 validators.") + .subcommand(deposits::cli_app()) .subcommand( SubCommand::with_name("new") .about("Create a new Ethereum 2.0 validator.") @@ -52,14 +54,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .help("The password file to unlock the eth1 account (see --index)"), ) - .arg( - Arg::with_name("testnet-dir") - .long("testnet-dir") - .value_name("DIRECTORY") - .takes_value(true) - .help("The directory from which to read the deposit contract / - address. Defaults to the current Lighthouse testnet."), - ) .subcommand( SubCommand::with_name("insecure") .about("Produce insecure, ephemeral validators. DO NOT USE TO STORE VALUE.") diff --git a/account_manager/src/deposits.rs b/account_manager/src/deposits.rs new file mode 100644 index 000000000..5f6cc82fc --- /dev/null +++ b/account_manager/src/deposits.rs @@ -0,0 +1,255 @@ +use clap::{App, Arg, ArgMatches}; +use clap_utils; +use environment::Environment; +use futures::{ + future::{self, loop_fn, Loop}, + Future, +}; +use slog::{info, Logger}; +use std::fs; +use std::path::PathBuf; +use std::time::{Duration, Instant}; +use tokio::timer::Delay; +use types::EthSpec; +use validator_client::validator_directory::ValidatorDirectoryBuilder; +use web3::{ + transports::Ipc, + types::{Address, SyncInfo, SyncState}, + Transport, Web3, +}; + +const SYNCING_STATE_RETRY_DELAY: Duration = Duration::from_secs(2); + +pub fn cli_app<'a, 'b>() -> App<'a, 'b> { + App::new("deposited") + .about("Creates new Lighthouse validator keys and directories. Each newly-created validator + will have a deposit transaction formed and submitted to the deposit contract via + --eth1-ipc. This application will only write each validator keys to disk if the deposit + transaction returns successfully from the eth1 node. The process exits immediately if any + Eth1 tx fails. Does not wait for Eth1 confirmation blocks, so there is no guarantee that a + deposit will be accepted in the Eth1 chain. Before key generation starts, this application + will wait until the eth1 indicates that it is not syncing via the eth_syncing endpoint") + .arg( + Arg::with_name("validator-dir") + .long("validator-dir") + .value_name("VALIDATOR_DIRECTORY") + .help("The path where the validator directories will be created. Defaults to ~/.lighthouse/validators") + .takes_value(true), + ) + .arg( + Arg::with_name("eth1-ipc") + .long("eth1-ipc") + .value_name("ETH1_IPC_PATH") + .help("Path to an Eth1 JSON-RPC IPC endpoint") + .takes_value(true) + .required(true) + ) + .arg( + Arg::with_name("from-address") + .long("from-address") + .value_name("FROM_ETH1_ADDRESS") + .help("The address that will submit the eth1 deposit. Must be unlocked on the node + at --eth1-ipc.") + .takes_value(true) + .required(true) + ) + .arg( + Arg::with_name("deposit-gwei") + .long("deposit-gwei") + .value_name("DEPOSIT_GWEI") + .help("The GWEI value of the deposit amount. Defaults to the minimum amount + required for an active validator (MAX_EFFECTIVE_BALANCE.") + .takes_value(true), + ) + .arg( + Arg::with_name("count") + .long("count") + .value_name("DEPOSIT_COUNT") + .help("The number of deposits to create, regardless of how many already exist") + .conflicts_with("limit") + .takes_value(true), + ) + .arg( + Arg::with_name("at-most") + .long("at-most") + .value_name("VALIDATOR_COUNT") + .help("Observe the number of validators in --validator-dir, only creating enough to + ensure reach the given count. Never deletes an existing validator.") + .conflicts_with("count") + .takes_value(true), + ) +} + +pub fn cli_run(matches: &ArgMatches, mut env: Environment) -> Result<(), String> { + let spec = env.core_context().eth2_config.spec; + let log = env.core_context().log; + + let validator_dir = clap_utils::parse_path_with_default_in_home_dir( + matches, + "validator_dir", + PathBuf::new().join(".lighthouse").join("validators"), + )?; + let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; + let from_address: Address = clap_utils::parse_required(matches, "from-address")?; + let deposit_gwei = clap_utils::parse_optional(matches, "deposit-gwei")? + .unwrap_or_else(|| spec.max_effective_balance); + let count: Option = clap_utils::parse_optional(matches, "count")?; + let at_most: Option = clap_utils::parse_optional(matches, "at-most")?; + + let starting_validator_count = existing_validator_count(&validator_dir)?; + + let n = match (count, at_most) { + (Some(_), Some(_)) => Err("Cannot supply --count and --at-most".to_string()), + (None, None) => Err("Must supply either --count or --at-most".to_string()), + (Some(count), None) => Ok(count), + (None, Some(at_most)) => Ok(at_most.saturating_sub(starting_validator_count)), + }?; + + if n == 0 { + info!( + log, + "No need to produce and validators, exiting"; + "--count" => count, + "--at-most" => at_most, + "existing_validators" => starting_validator_count, + ); + return Ok(()); + } + + let deposit_contract = env + .testnet + .as_ref() + .ok_or_else(|| "Unable to run account manager without a testnet dir".to_string())? + .deposit_contract_address() + .map_err(|e| format!("Unable to parse deposit contract address: {}", e))?; + + if deposit_contract == Address::zero() { + return Err("Refusing to deposit to the zero address. Check testnet configuration.".into()); + } + + let (_event_loop_handle, transport) = + Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; + let web3 = Web3::new(transport); + + env.runtime() + .block_on(poll_until_synced(web3.clone(), log.clone()))?; + + for i in 0..n { + let tx_hash_log = log.clone(); + + env.runtime() + .block_on( + ValidatorDirectoryBuilder::default() + .spec(spec.clone()) + .custom_deposit_amount(deposit_gwei) + .thread_random_keypairs() + .submit_eth1_deposit(web3.clone(), from_address, deposit_contract) + .map(move |(builder, tx_hash)| { + info!( + tx_hash_log, + "Validator deposited"; + "eth1_tx_hash" => format!("{:?}", tx_hash), + "index" => format!("{}/{}", i + 1, n), + ); + builder + }), + )? + .create_directory(validator_dir.clone())? + .write_keypair_files()? + .write_eth1_data_file()? + .build()?; + } + + let ending_validator_count = existing_validator_count(&validator_dir)?; + let delta = ending_validator_count.saturating_sub(starting_validator_count); + + info!( + log, + "Success"; + "validators_created_and_deposited" => delta, + ); + + Ok(()) +} + +/// Returns the number of validators that exist in the given `validator_dir`. +/// +/// This function just assumes any file is a validator directory, making it likely to return a +/// higher number than accurate but never a lower one. +fn existing_validator_count(validator_dir: &PathBuf) -> Result { + fs::read_dir(&validator_dir) + .map(|iter| iter.count()) + .map_err(|e| format!("Unable to read {:?}: {}", validator_dir, e)) +} + +/// Run a poll on the `eth_syncing` endpoint, blocking until the node is synced. +fn poll_until_synced(web3: Web3, log: Logger) -> impl Future + Send +where + T: Transport + Send + 'static, + ::Out: Send, +{ + loop_fn((web3.clone(), log.clone()), move |(web3, log)| { + web3.clone() + .eth() + .syncing() + .map_err(|e| format!("Unable to read syncing state from eth1 node: {:?}", e)) + .and_then::<_, Box + Send>>(move |sync_state| { + match sync_state { + SyncState::Syncing(SyncInfo { + current_block, + highest_block, + .. + }) => { + info!( + log, + "Waiting for eth1 node to sync"; + "est_highest_block" => format!("{}", highest_block), + "current_block" => format!("{}", current_block), + ); + + Box::new( + Delay::new(Instant::now() + SYNCING_STATE_RETRY_DELAY) + .map_err(|e| format!("Failed to trigger delay: {:?}", e)) + .and_then(|_| future::ok(Loop::Continue((web3, log)))), + ) + } + SyncState::NotSyncing => Box::new( + web3.clone() + .eth() + .block_number() + .map_err(|e| { + format!("Unable to read block number from eth1 node: {:?}", e) + }) + .and_then::<_, Box + Send>>( + |block_number| { + if block_number > 0.into() { + info!( + log, + "Eth1 node is synced"; + "head_block" => format!("{}", block_number), + ); + Box::new(future::ok(Loop::Break((web3, log)))) + } else { + Box::new( + Delay::new(Instant::now() + SYNCING_STATE_RETRY_DELAY) + .map_err(|e| { + format!("Failed to trigger delay: {:?}", e) + }) + .and_then(|_| { + info!( + log, + "Waiting for eth1 node to sync"; + "current_block" => 0, + ); + future::ok(Loop::Continue((web3, log))) + }), + ) + } + }, + ), + ), + } + }) + }) + .map(|_| ()) +} diff --git a/account_manager/src/lib.rs b/account_manager/src/lib.rs index 101c7634e..4f3c80ec7 100644 --- a/account_manager/src/lib.rs +++ b/account_manager/src/lib.rs @@ -1,4 +1,5 @@ mod cli; +mod deposits; use clap::ArgMatches; use deposit_contract::DEPOSIT_GAS; @@ -6,7 +7,7 @@ use environment::{Environment, RuntimeContext}; use eth2_testnet_config::Eth2TestnetConfig; use futures::{future, Future, IntoFuture, Stream}; use rayon::prelude::*; -use slog::{crit, error, info, Logger}; +use slog::{error, info, Logger}; use std::fs; use std::fs::File; use std::io::Read; @@ -21,20 +22,8 @@ use web3::{ pub use cli::cli_app; -/// Run the account manager, logging an error if the operation did not succeed. -pub fn run(matches: &ArgMatches, mut env: Environment) { - let log = env.core_context().log.clone(); - match run_account_manager(matches, env) { - Ok(()) => (), - Err(e) => crit!(log, "Account manager failed"; "error" => e), - } -} - /// Run the account manager, returning an error if the operation did not succeed. -fn run_account_manager( - matches: &ArgMatches, - mut env: Environment, -) -> Result<(), String> { +pub fn run(matches: &ArgMatches, mut env: Environment) -> Result<(), String> { let context = env.core_context(); let log = context.log.clone(); @@ -60,6 +49,7 @@ fn run_account_manager( match matches.subcommand() { ("validator", Some(matches)) => match matches.subcommand() { + ("deposited", Some(matches)) => deposits::cli_run(matches, env)?, ("new", Some(matches)) => run_new_validator_subcommand(matches, datadir, env)?, _ => { return Err("Invalid 'validator new' command. See --help.".to_string()); diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9ed5a2758..7239edbf7 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner ", "Age Manning ", "Age Manning "] edition = "2018" @@ -32,10 +32,10 @@ eth2_ssz_derive = "0.1.0" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1.0" types = { path = "../../eth2/types" } +tokio = "0.1.22" eth1 = { path = "../eth1" } websocket_server = { path = "../websocket_server" } futures = "0.1.25" -exit-future = "0.1.3" genesis = { path = "../genesis" } integer-sqrt = "0.1" rand = "0.7.2" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c04e9b1e3..e76bd8daf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,3 +1,7 @@ +use crate::block_verification::{ + check_block_relevancy, get_block_root, signature_verify_chain_segment, BlockError, + FullyVerifiedBlock, GossipVerifiedBlock, IntoFullyVerifiedBlock, +}; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::events::{EventHandler, EventKind}; @@ -5,6 +9,7 @@ use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::head_tracker::HeadTracker; use crate::metrics; use crate::migrate::Migrate; +use crate::naive_aggregation_pool::{Error as NaiveAggregationError, NaiveAggregationPool}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::ShufflingCache; use crate::snapshot_cache::SnapshotCache; @@ -12,23 +17,20 @@ use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::BeaconSnapshot; use operation_pool::{OperationPool, PersistedOperationPool}; -use slog::{debug, error, info, trace, warn, Logger}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; -use ssz::Encode; use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, ExitValidationError, ProposerSlashingValidationError, }; use state_processing::{ common::get_indexed_attestation, per_block_processing, per_slot_processing, - signature_sets::indexed_attestation_signature_set_from_pubkeys, BlockProcessingError, - BlockSignatureStrategy, BlockSignatureVerifier, + signature_sets::indexed_attestation_signature_set_from_pubkeys, BlockSignatureStrategy, }; use std::borrow::Cow; use std::cmp::Ordering; use std::collections::HashMap; use std::collections::HashSet; -use std::fs; use std::io::prelude::*; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -36,24 +38,14 @@ use store::iter::{ BlockRootsIterator, ParentRootBlockIterator, ReverseBlockRootIterator, ReverseStateRootIterator, StateRootsIterator, }; -use store::{Error as DBError, StateBatch, Store}; -use tree_hash::TreeHash; +use store::{Error as DBError, Store}; use types::*; // Text included in blocks. // Must be 32-bytes or panic. // // |-------must be this long------| -pub const GRAFFITI: &str = "sigp/lighthouse-0.1.1-prerelease"; - -/// If true, everytime a block is processed the pre-state, post-state and block are written to SSZ -/// files in the temp directory. -/// -/// Only useful for testing. -const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); - -/// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. -const MAXIMUM_BLOCK_SLOT_NUMBER: u64 = 4_294_967_296; // 2^32 +pub const GRAFFITI: &str = "sigp/lighthouse-0.2.0-prerelease"; /// The time-out before failure during an operation to take a read/write RwLock on the canonical /// head. @@ -61,14 +53,14 @@ const HEAD_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// The time-out before failure during an operation to take a read/write RwLock on the block /// processing cache. -const BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +pub const BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// The time-out before failure during an operation to take a read/write RwLock on the /// attestation cache. const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// The time-out before failure during an operation to take a read/write RwLock on the /// validator pubkey cache. -const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); pub const BEACON_CHAIN_DB_KEY: [u8; 32] = [0; 32]; pub const OP_POOL_DB_KEY: [u8; 32] = [0; 32]; @@ -76,42 +68,40 @@ pub const ETH1_CACHE_DB_KEY: [u8; 32] = [0; 32]; pub const FORK_CHOICE_DB_KEY: [u8; 32] = [0; 32]; #[derive(Debug, PartialEq)] -pub enum BlockProcessingOutcome { - /// Block was valid and imported into the block graph. - Processed { - block_root: Hash256, - }, - InvalidSignature, - /// The parent block was unknown. - ParentUnknown { - parent: Hash256, - reference_location: &'static str, - }, - /// The block slot is greater than the present slot. - FutureSlot { - present_slot: Slot, - block_slot: Slot, - }, - /// The block state_root does not match the generated state. - StateRootMismatch { - block: Hash256, - local: Hash256, - }, - /// The block was a genesis block, these blocks cannot be re-imported. - GenesisBlock, - /// The slot is finalized, no need to import. - WouldRevertFinalizedSlot { - block_slot: Slot, - finalized_slot: Slot, - }, - /// Block is already known, no need to re-import. - BlockIsAlreadyKnown, - /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. - BlockSlotLimitReached, - /// The block could not be applied to the state, it is invalid. - PerBlockProcessingError(BlockProcessingError), +pub enum AttestationType { + /// An attestation with a single-signature that has been published in accordance with the naive + /// aggregation strategy. + /// + /// These attestations may have come from a `committee_index{subnet_id}_beacon_attestation` + /// gossip subnet or they have have come directly from a validator attached to our API. + /// + /// If `should_store == true`, the attestation will be added to the `NaiveAggregationPool`. + Unaggregated { should_store: bool }, + /// An attestation with one more more signatures that has passed through the aggregation phase + /// of the naive aggregation scheme. + /// + /// These attestations must have come from the `beacon_aggregate_and_proof` gossip subnet. + Aggregated, } +/// The result of a chain segment processing. +#[derive(Debug)] +pub enum ChainSegmentResult { + /// Processing this chain segment finished successfully. + Successful { imported_blocks: usize }, + /// There was an error processing this chain segment. Before the error, some blocks could + /// have been imported. + Failed { + imported_blocks: usize, + error: BlockError, + }, +} + +/// The accepted clock drift for nodes gossiping blocks and attestations (spec v0.11.0). See: +/// +/// https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/p2p-interface.md#configuration +pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500); + #[derive(Debug, PartialEq)] pub enum AttestationProcessingOutcome { Processed, @@ -160,6 +150,7 @@ pub enum StateSkipConfig { WithoutStateRoots, } +#[derive(Debug, PartialEq)] pub struct HeadInfo { pub slot: Slot, pub block_root: Hash256, @@ -193,12 +184,20 @@ pub struct BeaconChain { /// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for /// inclusion in a block. pub op_pool: OperationPool, + /// A pool of attestations dedicated to the "naive aggregation strategy" defined in the eth2 + /// specs. + /// + /// This pool accepts `Attestation` objects that only have one aggregation bit set and provides + /// a method to get an aggregated `Attestation` for some `AttestationData`. + pub naive_aggregation_pool: NaiveAggregationPool, /// Provides information from the Ethereum 1 (PoW) chain. pub eth1_chain: Option>, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. pub(crate) canonical_head: TimeoutRwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, + /// The root of the list of genesis validators, used during syncing. + pub genesis_validators_root: Hash256, /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: ForkChoice, @@ -207,11 +206,13 @@ pub struct BeaconChain { /// Used to track the heads of the beacon chain. pub(crate) head_tracker: Arc, /// A cache dedicated to block processing. - pub(crate) block_processing_cache: TimeoutRwLock>, + pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the shuffling for a given epoch and state root. pub(crate) shuffling_cache: TimeoutRwLock, /// Caches a map of `validator_index -> validator_pubkey`. pub(crate) validator_pubkey_cache: TimeoutRwLock, + /// A list of any hard-coded forks that have been disabled. + pub disabled_forks: Vec, /// Logging to CLI, etc. pub(crate) log: Logger, } @@ -689,7 +690,59 @@ impl BeaconChain { .map_err(Into::into) } - /// Produce an `Attestation` that is valid for the given `slot` and `index`. + /// Returns the attestation slot and committee index for a given validator index. + /// + /// Information is read from the current state, so only information from the present and prior + /// epoch is available. + pub fn validator_attestation_slot_and_index( + &self, + validator_index: usize, + epoch: Epoch, + ) -> Result, Error> { + let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head()?.beacon_state; + + let mut state = if epoch == as_epoch(head_state.slot) { + self.head()?.beacon_state + } else { + // The block proposer shuffling is not affected by the state roots, so we don't need to + // calculate them. + self.state_at_slot( + epoch.start_slot(T::EthSpec::slots_per_epoch()), + StateSkipConfig::WithoutStateRoots, + )? + }; + + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + if as_epoch(state.slot) != epoch { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", + as_epoch(state.slot), + epoch + ))); + } + + if let Some(attestation_duty) = + state.get_attestation_duties(validator_index, RelativeEpoch::Current)? + { + Ok(Some((attestation_duty.slot, attestation_duty.index))) + } else { + Ok(None) + } + } + + /// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`. + /// + /// The attestation will be obtained from `self.naive_aggregation_pool`. + pub fn get_aggregated_attestation( + &self, + data: &AttestationData, + ) -> Result>, Error> { + self.naive_aggregation_pool.get(data).map_err(Into::into) + } + + /// Produce a raw unsigned `Attestation` that is valid for the given `slot` and `index`. /// /// Always attests to the canonical chain. pub fn produce_attestation( @@ -805,14 +858,21 @@ impl BeaconChain { /// /// - Whilst the `attestation` is added to fork choice, the head is not updated. That must be /// done separately. + /// + /// The `store_raw` parameter determines if this attestation is to be stored in the operation + /// pool. `None` indicates the attestation is not stored in the operation pool (we don't have a + /// validator required to aggregate these attestations). `Some(true)` indicates we are storing a + /// raw un-aggregated attestation from a subnet into the `op_pool` which is short-lived and `Some(false)` + /// indicates that we are storing an aggregate attestation in the `op_pool`. pub fn process_attestation( &self, attestation: Attestation, + attestation_type: AttestationType, ) -> Result { metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); - let outcome = self.process_attestation_internal(attestation.clone()); + let outcome = self.process_attestation_internal(attestation.clone(), attestation_type); match &outcome { Ok(outcome) => match outcome { @@ -866,6 +926,7 @@ impl BeaconChain { pub fn process_attestation_internal( &self, attestation: Attestation, + attestation_type: AttestationType, ) -> Result { let initial_validation_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_INITIAL_VALIDATION_TIMES); @@ -1104,15 +1165,73 @@ impl BeaconChain { } // Provide the valid attestation to op pool, which may choose to retain the - // attestation for inclusion in a future block. + // attestation for inclusion in a future block. If we receive an attestation from a + // subnet without a validator responsible for aggregating it, we don't store it in the + // op pool. if self.eth1_chain.is_some() { - self.op_pool.insert_attestation( - attestation, - &fork, - genesis_validators_root, - &self.spec, - )?; - }; + match attestation_type { + AttestationType::Unaggregated { should_store } if should_store => { + match self.naive_aggregation_pool.insert(&attestation) { + Ok(outcome) => trace!( + self.log, + "Stored unaggregated attestation"; + "outcome" => format!("{:?}", outcome), + "index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + ), + Err(NaiveAggregationError::SlotTooLow { + slot, + lowest_permissible_slot, + }) => { + trace!( + self.log, + "Refused to store unaggregated attestation"; + "lowest_permissible_slot" => lowest_permissible_slot.as_u64(), + "slot" => slot.as_u64(), + ); + } + Err(e) => error!( + self.log, + "Failed to store unaggregated attestation"; + "error" => format!("{:?}", e), + "index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + ), + } + } + AttestationType::Unaggregated { .. } => trace!( + self.log, + "Did not store unaggregated attestation"; + "index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + ), + AttestationType::Aggregated => { + let index = attestation.data.index; + let slot = attestation.data.slot; + + match self.op_pool.insert_attestation( + attestation, + &fork, + genesis_validators_root, + &self.spec, + ) { + Ok(_) => {} + Err(e) => { + error!( + self.log, + "Failed to add attestation to op pool"; + "error" => format!("{:?}", e), + "index" => index, + "slot" => slot.as_u64(), + ); + } + } + } + } + } + + // Update the metrics. + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_SUCCESSES); Ok(AttestationProcessingOutcome::Processed) } else { @@ -1267,285 +1386,294 @@ impl BeaconChain { } } - /// Accept some block and attempt to add it to block DAG. + /// Attempt to verify and import a chain of blocks to `self`. /// - /// Will accept blocks from prior slots, however it will reject any block from a future slot. - pub fn process_block( + /// The provided blocks _must_ each reference the previous block via `block.parent_root` (i.e., + /// be a chain). An error will be returned if this is not the case. + /// + /// This operation is not atomic; if one of the blocks in the chain is invalid then some prior + /// blocks might be imported. + /// + /// This method is generally much more efficient than importing each block using + /// `Self::process_block`. + pub fn process_chain_segment( + &self, + chain_segment: Vec>, + ) -> ChainSegmentResult { + let mut filtered_chain_segment = Vec::with_capacity(chain_segment.len()); + let mut imported_blocks = 0; + + // Produce a list of the parent root and slot of the child of each block. + // + // E.g., `children[0] == (chain_segment[1].parent_root(), chain_segment[1].slot())` + let children = chain_segment + .iter() + .skip(1) + .map(|block| (block.parent_root(), block.slot())) + .collect::>(); + + for (i, block) in chain_segment.into_iter().enumerate() { + let block_root = get_block_root(&block); + + if let Some((child_parent_root, child_slot)) = children.get(i) { + // If this block has a child in this chain segment, ensure that its parent root matches + // the root of this block. + // + // Without this check it would be possible to have a block verified using the + // incorrect shuffling. That would be bad, mmkay. + if block_root != *child_parent_root { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::NonLinearParentRoots, + }; + } + + // Ensure that the slots are strictly increasing throughout the chain segment. + if *child_slot <= block.slot() { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::NonLinearSlots, + }; + } + } + + match check_block_relevancy(&block, Some(block_root), self) { + // If the block is relevant, add it to the filtered chain segment. + Ok(_) => filtered_chain_segment.push((block_root, block)), + // If the block is already known, simply ignore this block. + Err(BlockError::BlockIsAlreadyKnown) => continue, + // If the block is the genesis block, simply ignore this block. + Err(BlockError::GenesisBlock) => continue, + // If the block is is for a finalized slot, simply ignore this block. + // + // The block is either: + // + // 1. In the canonical finalized chain. + // 2. In some non-canonical chain at a slot that has been finalized already. + // + // In the case of (1), there's no need to re-import and later blocks in this + // segement might be useful. + // + // In the case of (2), skipping the block is valid since we should never import it. + // However, we will potentially get a `ParentUnknown` on a later block. The sync + // protocol will need to ensure this is handled gracefully. + Err(BlockError::WouldRevertFinalizedSlot { .. }) => continue, + // If there was an error whilst determining if the block was invalid, return that + // error. + Err(BlockError::BeaconChainError(e)) => { + return ChainSegmentResult::Failed { + imported_blocks, + error: BlockError::BeaconChainError(e), + } + } + // If the block was decided to be irrelevant for any other reason, don't include + // this block or any of it's children in the filtered chain segment. + _ => break, + } + } + + while !filtered_chain_segment.is_empty() { + // Determine the epoch of the first block in the remaining segment. + let start_epoch = filtered_chain_segment + .first() + .map(|(_root, block)| block) + .expect("chain_segment cannot be empty") + .slot() + .epoch(T::EthSpec::slots_per_epoch()); + + // The `last_index` indicates the position of the last block that is in the current + // epoch of `start_epoch`. + let last_index = filtered_chain_segment + .iter() + .position(|(_root, block)| { + block.slot().epoch(T::EthSpec::slots_per_epoch()) > start_epoch + }) + .unwrap_or_else(|| filtered_chain_segment.len()); + + // Split off the first section blocks that are all either within the current epoch of + // the first block. These blocks can all be signature-verified with the same + // `BeaconState`. + let mut blocks = filtered_chain_segment.split_off(last_index); + std::mem::swap(&mut blocks, &mut filtered_chain_segment); + + // Verify the signature of the blocks, returning early if the signature is invalid. + let signature_verified_blocks = match signature_verify_chain_segment(blocks, self) { + Ok(blocks) => blocks, + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error, + } + } + }; + + // Import the blocks into the chain. + for signature_verified_block in signature_verified_blocks { + match self.process_block(signature_verified_block) { + Ok(_) => imported_blocks += 1, + Err(error) => { + return ChainSegmentResult::Failed { + imported_blocks, + error, + } + } + } + } + } + + ChainSegmentResult::Successful { imported_blocks } + } + + /// Returns `Ok(GossipVerifiedBlock)` if the supplied `block` should be forwarded onto the + /// gossip network. The block is not imported into the chain, it is just partially verified. + /// + /// The returned `GossipVerifiedBlock` should be provided to `Self::process_block` immediately + /// after it is returned, unless some other circumstance decides it should not be imported at + /// all. + /// + /// ## Errors + /// + /// Returns an `Err` if the given block was invalid, or an error was encountered during + pub fn verify_block_for_gossip( &self, block: SignedBeaconBlock, - ) -> Result { - let outcome = self.process_block_internal(block.clone()); + ) -> Result, BlockError> { + let slot = block.message.slot; + let graffiti_string = String::from_utf8(block.message.body.graffiti[..].to_vec()) + .unwrap_or_else(|_| format!("{:?}", &block.message.body.graffiti[..])); - match &outcome { - Ok(outcome) => match outcome { - BlockProcessingOutcome::Processed { block_root } => { - trace!( - self.log, - "Beacon block imported"; - "block_root" => format!("{:?}", block_root), - "block_slot" => format!("{:?}", block.slot().as_u64()), - ); - let _ = self.event_handler.register(EventKind::BeaconBlockImported { - block_root: *block_root, - block: Box::new(block), - }); - } - other => { - trace!( - self.log, - "Beacon block rejected"; - "reason" => format!("{:?}", other), - ); - let _ = self.event_handler.register(EventKind::BeaconBlockRejected { - reason: format!("Invalid block: {:?}", other), - block: Box::new(block), - }); - } - }, + match GossipVerifiedBlock::new(block, self) { + Ok(verified) => { + debug!( + self.log, + "Successfully processed gossip block"; + "graffiti" => graffiti_string, + "slot" => slot, + "root" => format!("{:?}", verified.block_root()), + ); + + Ok(verified) + } Err(e) => { - error!( + debug!( + self.log, + "Rejected gossip block"; + "error" => format!("{:?}", e), + "graffiti" => graffiti_string, + "slot" => slot, + ); + + Err(e) + } + } + } + + /// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and + /// imported into the chain. + /// + /// Items that implement `IntoFullyVerifiedBlock` include: + /// + /// - `SignedBeaconBlock` + /// - `GossipVerifiedBlock` + /// + /// ## Errors + /// + /// Returns an `Err` if the given block was invalid, or an error was encountered during + /// verification. + pub fn process_block>( + &self, + unverified_block: B, + ) -> Result { + // Start the Prometheus timer. + let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); + + // Increment the Prometheus counter for block processing requests. + metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + + // Clone the block so we can provide it to the event handler. + let block = unverified_block.block().clone(); + + // A small closure to group the verification and import errors. + let import_block = |unverified_block: B| -> Result { + let fully_verified = unverified_block.into_fully_verified_block(self)?; + self.import_block(fully_verified) + }; + + // Verify and import the block. + let result = match import_block(unverified_block) { + // The block was successfully verified and imported. Yay. + Ok(block_root) => { + trace!( + self.log, + "Beacon block imported"; + "block_root" => format!("{:?}", block_root), + "block_slot" => format!("{:?}", block.slot().as_u64()), + ); + + // Increment the Prometheus counter for block processing successes. + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); + + let _ = self.event_handler.register(EventKind::BeaconBlockImported { + block_root: block_root, + block: Box::new(block), + }); + + Ok(block_root) + } + // There was an error whilst attempting to verify and import the block. The block might + // be partially verified or partially imported. + Err(BlockError::BeaconChainError(e)) => { + crit!( self.log, "Beacon block processing error"; "error" => format!("{:?}", e), ); + let _ = self.event_handler.register(EventKind::BeaconBlockRejected { reason: format!("Internal error: {:?}", e), block: Box::new(block), }); - } - } - outcome + Err(BlockError::BeaconChainError(e)) + } + // The block failed verification. + Err(other) => { + trace!( + self.log, + "Beacon block rejected"; + "reason" => format!("{:?}", other), + ); + + let _ = self.event_handler.register(EventKind::BeaconBlockRejected { + reason: format!("Invalid block: {:?}", other), + block: Box::new(block), + }); + + Err(other) + } + }; + + // Stop the Prometheus timer. + metrics::stop_timer(full_timer); + + result } - /// Accept some block and attempt to add it to block DAG. + /// Accepts a fully-verified block and imports it into the chain without performing any + /// additional verification. /// - /// Will accept blocks from prior slots, however it will reject any block from a future slot. - fn process_block_internal( + /// An error is returned if the block was unable to be imported. It may be partially imported + /// (i.e., this function is not atomic). + fn import_block( &self, - signed_block: SignedBeaconBlock, - ) -> Result { - metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); - let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); - + fully_verified_block: FullyVerifiedBlock, + ) -> Result { + let signed_block = fully_verified_block.block; let block = &signed_block.message; - - let finalized_slot = self - .head_info()? - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - - if block.slot == 0 { - return Ok(BlockProcessingOutcome::GenesisBlock); - } - - if block.slot >= MAXIMUM_BLOCK_SLOT_NUMBER { - return Ok(BlockProcessingOutcome::BlockSlotLimitReached); - } - - if block.slot <= finalized_slot { - return Ok(BlockProcessingOutcome::WouldRevertFinalizedSlot { - block_slot: block.slot, - finalized_slot, - }); - } - - // Reject any block if its parent is not known to fork choice. - // - // A block that is not in fork choice is either: - // - // - Not yet imported: we should reject this block because we should only import a child - // after its parent has been fully imported. - // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore it - // because it will revert finalization. Note that the finalized block is stored in fork - // choice, so we will not reject any child of the finalized block (this is relevant during - // genesis). - if !self.fork_choice.contains_block(&block.parent_root) { - return Ok(BlockProcessingOutcome::ParentUnknown { - parent: block.parent_root, - reference_location: "fork_choice", - }); - } - - let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); - - let block_root = block.canonical_root(); - - metrics::stop_timer(block_root_timer); - - if block_root == self.genesis_block_root { - return Ok(BlockProcessingOutcome::GenesisBlock); - } - - let present_slot = self.slot()?; - - if block.slot > present_slot { - return Ok(BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot: block.slot, - }); - } - - // Check if the block is already known. We know it is post-finalization, so it is - // sufficient to check the fork choice. - if self.fork_choice.contains_block(&block_root) { - return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown); - } - - // Records the time taken to load the block and state from the database during block - // processing. - let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); - - let cached_snapshot = self - .block_processing_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|mut block_processing_cache| { - block_processing_cache.try_remove(block.parent_root) - }); - - let (parent_block, parent_state) = if let Some(snapshot) = cached_snapshot { - (snapshot.beacon_block, snapshot.beacon_state) - } else { - // Load the blocks parent block from the database, returning invalid if that block is not - // found. - let parent_block = match self.get_block(&block.parent_root)? { - Some(block) => block, - None => { - return Ok(BlockProcessingOutcome::ParentUnknown { - parent: block.parent_root, - reference_location: "database", - }); - } - }; - - // Load the parent blocks state from the database, returning an error if it is not found. - // It is an error because if we know the parent block we should also know the parent state. - let parent_state_root = parent_block.state_root(); - let parent_state = self - .get_state(&parent_state_root, Some(parent_block.slot()))? - .ok_or_else(|| { - Error::DBInconsistent(format!("Missing state {:?}", parent_state_root)) - })?; - - (parent_block, parent_state) - }; - - metrics::stop_timer(db_read_timer); - - write_block(&block, block_root, &self.log); - - let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); - - // Keep a batch of any states that were "skipped" (block-less) in between the parent state - // slot and the block slot. These will be stored in the database. - let mut intermediate_states = StateBatch::new(); - - // Transition the parent state to the block slot. - let mut state: BeaconState = parent_state; - let distance = block.slot.as_u64().saturating_sub(state.slot.as_u64()); - for i in 0..distance { - let state_root = if i == 0 { - parent_block.state_root() - } else { - // This is a new state we've reached, so stage it for storage in the DB. - // Computing the state root here is time-equivalent to computing it during slot - // processing, but we get early access to it. - let state_root = state.update_tree_hash_cache()?; - intermediate_states.add_state(state_root, &state)?; - state_root - }; - - per_slot_processing(&mut state, Some(state_root), &self.spec)?; - } - - metrics::stop_timer(catchup_timer); - - let committee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE); - - state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - - metrics::stop_timer(committee_timer); - - write_state( - &format!("state_pre_block_{}", block_root), - &state, - &self.log, - ); - - let signature_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_SIGNATURE); - - let signature_verification_result = { - let validator_pubkey_cache = self - .validator_pubkey_cache - .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?; - - BlockSignatureVerifier::verify_entire_block( - &state, - |validator_index| { - // Disallow access to any validator pubkeys that are not in the current beacon - // state. - if validator_index < state.validators.len() { - validator_pubkey_cache - .get(validator_index) - .map(|pk| Cow::Borrowed(pk.as_point())) - } else { - None - } - }, - &signed_block, - Some(block_root), - &self.spec, - ) - }; - - if signature_verification_result.is_err() { - return Ok(BlockProcessingOutcome::InvalidSignature); - } - - metrics::stop_timer(signature_timer); - - let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); - - // Apply the received block to its parent state (which has been transitioned into this - // slot). - match per_block_processing( - &mut state, - &signed_block, - Some(block_root), - // Signatures were verified earlier in this function. - BlockSignatureStrategy::NoVerification, - &self.spec, - ) { - Err(BlockProcessingError::BeaconStateError(e)) => { - return Err(Error::BeaconStateError(e)) - } - Err(e) => return Ok(BlockProcessingOutcome::PerBlockProcessingError(e)), - _ => {} - } - - metrics::stop_timer(core_timer); - - let state_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_STATE_ROOT); - - let state_root = state.update_tree_hash_cache()?; - - metrics::stop_timer(state_root_timer); - - write_state( - &format!("state_post_block_{}", block_root), - &state, - &self.log, - ); - - if block.state_root != state_root { - return Ok(BlockProcessingOutcome::StateRootMismatch { - block: block.state_root, - local: state_root, - }); - } + let block_root = fully_verified_block.block_root; + let state = fully_verified_block.state; + let parent_block = fully_verified_block.parent_block; + let intermediate_states = fully_verified_block.intermediate_states; let fork_choice_register_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER); @@ -1588,7 +1716,7 @@ impl BeaconChain { // Register the new block with the fork choice service. if let Err(e) = self .fork_choice - .process_block(self, &state, &block, block_root) + .process_block(self, &state, block, block_root) { error!( self.log, @@ -1618,24 +1746,24 @@ impl BeaconChain { // solution would be to use a database transaction (once our choice of database and API // settles down). // See: https://github.com/sigp/lighthouse/issues/692 - self.store.put_state(&state_root, &state)?; + self.store.put_state(&block.state_root, &state)?; self.store.put_block(&block_root, signed_block.clone())?; - self.block_processing_cache + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut block_processing_cache| { - block_processing_cache.insert(BeaconSnapshot { + .map(|mut snapshot_cache| { + snapshot_cache.insert(BeaconSnapshot { + beacon_state: state, + beacon_state_root: signed_block.state_root(), beacon_block: signed_block, beacon_block_root: block_root, - beacon_state: state, - beacon_state_root: state_root, }); }) .unwrap_or_else(|| { error!( self.log, "Failed to obtain cache write lock"; - "lock" => "block_processing_cache", + "lock" => "snapshot_cache", "task" => "process block" ); }); @@ -1644,9 +1772,7 @@ impl BeaconChain { metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); - metrics::stop_timer(full_timer); - - Ok(BlockProcessingOutcome::Processed { block_root }) + Ok(block_root) } /// Produce a new block at the given `slot`. @@ -1815,9 +1941,9 @@ impl BeaconChain { // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling // back to a database read if that fails. let new_head = self - .block_processing_cache + .snapshot_cache .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|block_processing_cache| block_processing_cache.get_cloned(beacon_block_root)) + .and_then(|snapshot_cache| snapshot_cache.get_cloned(beacon_block_root)) .map::, _>(|snapshot| Ok(snapshot)) .unwrap_or_else(|| { let beacon_block = self @@ -1916,16 +2042,16 @@ impl BeaconChain { metrics::stop_timer(update_head_timer); - self.block_processing_cache + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut block_processing_cache| { - block_processing_cache.update_head(beacon_block_root); + .map(|mut snapshot_cache| { + snapshot_cache.update_head(beacon_block_root); }) .unwrap_or_else(|| { error!( self.log, "Failed to obtain cache write lock"; - "lock" => "block_processing_cache", + "lock" => "snapshot_cache", "task" => "update head" ); }); @@ -1947,6 +2073,16 @@ impl BeaconChain { Ok(()) } + /// Called by the timer on every slot. + /// + /// Performs slot-based pruning. + pub fn per_slot_task(&self) { + trace!(self.log, "Running beacon chain per slot tasks"); + if let Some(slot) = self.slot_clock.now() { + self.naive_aggregation_pool.prune(slot); + } + } + /// Called after `self` has had a new block finalized. /// /// Performs pruning and finality-based optimizations. @@ -1972,16 +2108,16 @@ impl BeaconChain { } else { self.fork_choice.prune()?; - self.block_processing_cache + self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut block_processing_cache| { - block_processing_cache.prune(new_finalized_epoch); + .map(|mut snapshot_cache| { + snapshot_cache.prune(new_finalized_epoch); }) .unwrap_or_else(|| { error!( self.log, "Failed to obtain cache write lock"; - "lock" => "block_processing_cache", + "lock" => "snapshot_cache", "task" => "prune" ); }); @@ -2069,6 +2205,22 @@ impl BeaconChain { Ok(dump) } + /// Gets the current `EnrForkId`. + pub fn enr_fork_id(&self) -> EnrForkId { + // If we are unable to read the slot clock we assume that it is prior to genesis and + // therefore use the genesis slot. + let slot = self.slot().unwrap_or_else(|_| self.spec.genesis_slot); + + self.spec.enr_fork_id(slot, self.genesis_validators_root) + } + + /// Calculates the `Duration` to the next fork, if one exists. + pub fn duration_to_next_fork(&self) -> Option { + let epoch = self.spec.next_fork_epoch()?; + self.slot_clock + .duration_to_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) + } + pub fn dump_as_dot(&self, output: &mut W) { let canonical_head_hash = self .canonical_head @@ -2187,49 +2339,6 @@ impl Drop for BeaconChain { } } -fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { - if WRITE_BLOCK_PROCESSING_SSZ { - let root = state.tree_hash_root(); - let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root); - let mut path = std::env::temp_dir().join("lighthouse"); - let _ = fs::create_dir_all(path.clone()); - path = path.join(filename); - - match fs::File::create(path.clone()) { - Ok(mut file) => { - let _ = file.write_all(&state.as_ssz_bytes()); - } - Err(e) => error!( - log, - "Failed to log state"; - "path" => format!("{:?}", path), - "error" => format!("{:?}", e) - ), - } - } -} - -fn write_block(block: &BeaconBlock, root: Hash256, log: &Logger) { - if WRITE_BLOCK_PROCESSING_SSZ { - let filename = format!("block_slot_{}_root{}.ssz", block.slot, root); - let mut path = std::env::temp_dir().join("lighthouse"); - let _ = fs::create_dir_all(path.clone()); - path = path.join(filename); - - match fs::File::create(path.clone()) { - Ok(mut file) => { - let _ = file.write_all(&block.as_ssz_bytes()); - } - Err(e) => error!( - log, - "Failed to log block"; - "path" => format!("{:?}", path), - "error" => format!("{:?}", e) - ), - } - } -} - impl From for Error { fn from(e: DBError) -> Error { Error::DBError(e) @@ -2247,3 +2356,12 @@ impl From for Error { Error::BeaconStateError(e) } } + +impl ChainSegmentResult { + pub fn to_block_error(self) -> Result<(), BlockError> { + match self { + ChainSegmentResult::Failed { error, .. } => Err(error), + ChainSegmentResult::Successful { .. } => Ok(()), + } + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs new file mode 100644 index 000000000..5dce07b04 --- /dev/null +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -0,0 +1,808 @@ +//! Provides `SignedBeaconBlock` verification logic. +//! +//! Specifically, it provides the following: +//! +//! - Verification for gossip blocks (i.e., should we gossip some block from the network). +//! - Verification for normal blocks (e.g., some block received on the RPC during a parent lookup). +//! - Verification for chain segments (e.g., some chain of blocks received on the RPC during a +//! sync). +//! +//! The primary source of complexity here is that we wish to avoid doing duplicate work as a block +//! moves through the verification process. For example, if some block is verified for gossip, we +//! do not wish to re-verify the block proposal signature or re-hash the block. Or, if we've +//! verified the signatures of a block during a chain segment import, we do not wish to verify each +//! signature individually again. +//! +//! The incremental processing steps (e.g., signatures verified but not the state transition) is +//! represented as a sequence of wrapper-types around the block. There is a linear progression of +//! types, starting at a `SignedBeaconBlock` and finishing with a `Fully VerifiedBlock` (see +//! diagram below). +//! +//! ```ignore +//! START +//! | +//! â–¼ +//! SignedBeaconBlock +//! |--------------- +//! | | +//! | â–¼ +//! | GossipVerifiedBlock +//! | | +//! |--------------- +//! | +//! â–¼ +//! SignatureVerifiedBlock +//! | +//! â–¼ +//! FullyVerifiedBlock +//! | +//! â–¼ +//! END +//! +//! ``` +use crate::validator_pubkey_cache::ValidatorPubkeyCache; +use crate::{ + beacon_chain::{ + BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + }, + metrics, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, +}; +use parking_lot::RwLockReadGuard; +use slot_clock::SlotClock; +use state_processing::{ + block_signature_verifier::{ + BlockSignatureVerifier, Error as BlockSignatureVerifierError, G1Point, + }, + per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, + SlotProcessingError, +}; +use std::borrow::Cow; +use store::{Error as DBError, StateBatch}; +use types::{ + BeaconBlock, BeaconState, BeaconStateError, ChainSpec, CloneConfig, EthSpec, Hash256, + RelativeEpoch, SignedBeaconBlock, Slot, +}; + +mod block_processing_outcome; + +pub use block_processing_outcome::BlockProcessingOutcome; + +/// Maximum block slot number. Block with slots bigger than this constant will NOT be processed. +const MAXIMUM_BLOCK_SLOT_NUMBER: u64 = 4_294_967_296; // 2^32 + +/// Returned when a block was not verified. A block is not verified for two reasons: +/// +/// - The block is malformed/invalid (indicated by all results other than `BeaconChainError`. +/// - We encountered an error whilst trying to verify the block (a `BeaconChainError`). +#[derive(Debug, PartialEq)] +pub enum BlockError { + /// The parent block was unknown. + ParentUnknown(Hash256), + /// The block slot is greater than the present slot. + FutureSlot { + present_slot: Slot, + block_slot: Slot, + }, + /// The block state_root does not match the generated state. + StateRootMismatch { block: Hash256, local: Hash256 }, + /// The block was a genesis block, these blocks cannot be re-imported. + GenesisBlock, + /// The slot is finalized, no need to import. + WouldRevertFinalizedSlot { + block_slot: Slot, + finalized_slot: Slot, + }, + /// Block is already known, no need to re-import. + BlockIsAlreadyKnown, + /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. + BlockSlotLimitReached, + /// The proposal signature in invalid. + ProposalSignatureInvalid, + /// A signature in the block is invalid (exactly which is unknown). + InvalidSignature, + /// The provided block is from an earlier slot than its parent. + BlockIsNotLaterThanParent { block_slot: Slot, state_slot: Slot }, + /// At least one block in the chain segment did not have it's parent root set to the root of + /// the prior block. + NonLinearParentRoots, + /// The slots of the blocks in the chain segment were not strictly increasing. I.e., a child + /// had lower slot than a parent. + NonLinearSlots, + /// The block failed the specification's `per_block_processing` function, it is invalid. + PerBlockProcessingError(BlockProcessingError), + /// There was an error whilst processing the block. It is not necessarily invalid. + BeaconChainError(BeaconChainError), +} + +impl From for BlockError { + fn from(e: BlockSignatureVerifierError) -> Self { + BlockError::BeaconChainError(BeaconChainError::BlockSignatureVerifierError(e)) + } +} + +impl From for BlockError { + fn from(e: BeaconChainError) -> Self { + BlockError::BeaconChainError(e) + } +} + +impl From for BlockError { + fn from(e: BeaconStateError) -> Self { + BlockError::BeaconChainError(BeaconChainError::BeaconStateError(e)) + } +} + +impl From for BlockError { + fn from(e: SlotProcessingError) -> Self { + BlockError::BeaconChainError(BeaconChainError::SlotProcessingError(e)) + } +} + +impl From for BlockError { + fn from(e: DBError) -> Self { + BlockError::BeaconChainError(BeaconChainError::DBError(e)) + } +} + +/// Verify all signatures (except deposit signatures) on all blocks in the `chain_segment`. If all +/// signatures are valid, the `chain_segment` is mapped to a `Vec` that can +/// later be transformed into a `FullyVerifiedBlock` without re-checking the signatures. If any +/// signature in the block is invalid, an `Err` is returned (it is not possible to known _which_ +/// signature was invalid). +/// +/// ## Errors +/// +/// The given `chain_segment` must span no more than two epochs, otherwise an error will be +/// returned. +pub fn signature_verify_chain_segment( + chain_segment: Vec<(Hash256, SignedBeaconBlock)>, + chain: &BeaconChain, +) -> Result>, BlockError> { + let (mut parent, slot) = if let Some(block) = chain_segment.first().map(|(_, block)| block) { + let parent = load_parent(&block.message, chain)?; + (parent, block.slot()) + } else { + return Ok(vec![]); + }; + + let highest_slot = chain_segment + .last() + .map(|(_, block)| block.slot()) + .unwrap_or_else(|| slot); + + let state = cheap_state_advance_to_obtain_committees( + &mut parent.beacon_state, + highest_slot, + &chain.spec, + )?; + + let pubkey_cache = get_validator_pubkey_cache(chain)?; + let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + + for (block_root, block) in &chain_segment { + signature_verifier.include_all_signatures(block, Some(*block_root))?; + } + + if signature_verifier.verify().is_err() { + return Err(BlockError::InvalidSignature); + } + + drop(pubkey_cache); + + let mut signature_verified_blocks = chain_segment + .into_iter() + .map(|(block_root, block)| SignatureVerifiedBlock { + block, + block_root, + parent: None, + }) + .collect::>(); + + if let Some(signature_verified_block) = signature_verified_blocks.first_mut() { + signature_verified_block.parent = Some(parent); + } + + Ok(signature_verified_blocks) +} + +/// A wrapper around a `SignedBeaconBlock` that indicates it has been approved for re-gossiping on +/// the p2p network. +pub struct GossipVerifiedBlock { + pub block: SignedBeaconBlock, + block_root: Hash256, + parent: BeaconSnapshot, +} + +/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit +/// signatures) have been verified. +pub struct SignatureVerifiedBlock { + block: SignedBeaconBlock, + block_root: Hash256, + parent: Option>, +} + +/// A wrapper around a `SignedBeaconBlock` that indicates that this block is fully verified and +/// ready to import into the `BeaconChain`. The validation includes: +/// +/// - Parent is known +/// - Signatures +/// - State root check +/// - Per block processing +/// +/// Note: a `FullyVerifiedBlock` is not _forever_ valid to be imported, it may later become invalid +/// due to finality or some other event. A `FullyVerifiedBlock` should be imported into the +/// `BeaconChain` immediately after it is instantiated. +pub struct FullyVerifiedBlock { + pub block: SignedBeaconBlock, + pub block_root: Hash256, + pub state: BeaconState, + pub parent_block: SignedBeaconBlock, + pub intermediate_states: StateBatch, +} + +/// Implemented on types that can be converted into a `FullyVerifiedBlock`. +/// +/// Used to allow functions to accept blocks at various stages of verification. +pub trait IntoFullyVerifiedBlock { + fn into_fully_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError>; + + fn block(&self) -> &SignedBeaconBlock; +} + +impl GossipVerifiedBlock { + /// Instantiates `Self`, a wrapper that indicates the given `block` is safe to be re-gossiped + /// on the p2p network. + /// + /// Returns an error if the block is invalid, or if the block was unable to be verified. + pub fn new( + block: SignedBeaconBlock, + chain: &BeaconChain, + ) -> Result { + // Do not gossip or process blocks from future slots. + let present_slot_with_tolerance = chain + .slot_clock + .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or_else(|| BeaconChainError::UnableToReadSlot)?; + if block.slot() > present_slot_with_tolerance { + return Err(BlockError::FutureSlot { + present_slot: present_slot_with_tolerance, + block_slot: block.slot(), + }); + } + + // Do not gossip a block from a finalized slot. + check_block_against_finalized_slot(&block.message, chain)?; + + // TODO: add check for the `(block.proposer_index, block.slot)` tuple once we have v0.11.0 + + let mut parent = load_parent(&block.message, chain)?; + let block_root = get_block_root(&block); + + let state = cheap_state_advance_to_obtain_committees( + &mut parent.beacon_state, + block.slot(), + &chain.spec, + )?; + + let pubkey_cache = get_validator_pubkey_cache(chain)?; + + let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + + signature_verifier.include_block_proposal(&block, Some(block_root))?; + + if signature_verifier.verify().is_ok() { + Ok(Self { + block, + block_root, + parent, + }) + } else { + Err(BlockError::ProposalSignatureInvalid) + } + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } +} + +impl IntoFullyVerifiedBlock for GossipVerifiedBlock { + /// Completes verification of the wrapped `block`. + fn into_fully_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError> { + let fully_verified = SignatureVerifiedBlock::from_gossip_verified_block(self, chain)?; + fully_verified.into_fully_verified_block(chain) + } + + fn block(&self) -> &SignedBeaconBlock { + &self.block + } +} + +impl SignatureVerifiedBlock { + /// Instantiates `Self`, a wrapper that indicates that all signatures (except the deposit + /// signatures) are valid (i.e., signed by the correct public keys). + /// + /// Returns an error if the block is invalid, or if the block was unable to be verified. + pub fn new( + block: SignedBeaconBlock, + chain: &BeaconChain, + ) -> Result { + let mut parent = load_parent(&block.message, chain)?; + let block_root = get_block_root(&block); + + let state = cheap_state_advance_to_obtain_committees( + &mut parent.beacon_state, + block.slot(), + &chain.spec, + )?; + + let pubkey_cache = get_validator_pubkey_cache(chain)?; + + let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + + signature_verifier.include_all_signatures(&block, Some(block_root))?; + + if signature_verifier.verify().is_ok() { + Ok(Self { + block, + block_root, + parent: Some(parent), + }) + } else { + Err(BlockError::InvalidSignature) + } + } + + /// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify + /// the proposer signature. + pub fn from_gossip_verified_block( + from: GossipVerifiedBlock, + chain: &BeaconChain, + ) -> Result { + let mut parent = from.parent; + let block = from.block; + + let state = cheap_state_advance_to_obtain_committees( + &mut parent.beacon_state, + block.slot(), + &chain.spec, + )?; + + let pubkey_cache = get_validator_pubkey_cache(chain)?; + + let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + + signature_verifier.include_all_signatures_except_proposal(&block)?; + + if signature_verifier.verify().is_ok() { + Ok(Self { + block, + block_root: from.block_root, + parent: Some(parent), + }) + } else { + Err(BlockError::InvalidSignature) + } + } +} + +impl IntoFullyVerifiedBlock for SignatureVerifiedBlock { + /// Completes verification of the wrapped `block`. + fn into_fully_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError> { + let block = self.block; + let parent = self + .parent + .map(Result::Ok) + .unwrap_or_else(|| load_parent(&block.message, chain))?; + + FullyVerifiedBlock::from_signature_verified_components( + block, + self.block_root, + parent, + chain, + ) + } + + fn block(&self) -> &SignedBeaconBlock { + &self.block + } +} + +impl IntoFullyVerifiedBlock for SignedBeaconBlock { + /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` + /// and then using that implementation of `IntoFullyVerifiedBlock` to complete verification. + fn into_fully_verified_block( + self, + chain: &BeaconChain, + ) -> Result, BlockError> { + SignatureVerifiedBlock::new(self, chain)?.into_fully_verified_block(chain) + } + + fn block(&self) -> &SignedBeaconBlock { + &self + } +} + +impl FullyVerifiedBlock { + /// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See + /// the struct-level documentation for more information. + /// + /// Note: this function does not verify block signatures, it assumes they are valid. Signature + /// verification must be done upstream (e.g., via a `SignatureVerifiedBlock` + /// + /// Returns an error if the block is invalid, or if the block was unable to be verified. + pub fn from_signature_verified_components( + block: SignedBeaconBlock, + block_root: Hash256, + parent: BeaconSnapshot, + chain: &BeaconChain, + ) -> Result { + // Reject any block if its parent is not known to fork choice. + // + // A block that is not in fork choice is either: + // + // - Not yet imported: we should reject this block because we should only import a child + // after its parent has been fully imported. + // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore it + // because it will revert finalization. Note that the finalized block is stored in fork + // choice, so we will not reject any child of the finalized block (this is relevant during + // genesis). + if !chain.fork_choice.contains_block(&block.parent_root()) { + return Err(BlockError::ParentUnknown(block.parent_root())); + } + + /* + * Perform cursory checks to see if the block is even worth processing. + */ + + check_block_relevancy(&block, Some(block_root), chain)?; + + /* + * Advance the given `parent.beacon_state` to the slot of the given `block`. + */ + + let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); + + // Keep a batch of any states that were "skipped" (block-less) in between the parent state + // slot and the block slot. These will be stored in the database. + let mut intermediate_states = StateBatch::new(); + + // The block must have a higher slot than its parent. + if block.slot() <= parent.beacon_state.slot { + return Err(BlockError::BlockIsNotLaterThanParent { + block_slot: block.slot(), + state_slot: parent.beacon_state.slot, + }); + } + + // Transition the parent state to the block slot. + let mut state = parent.beacon_state; + let distance = block.slot().as_u64().saturating_sub(state.slot.as_u64()); + for i in 0..distance { + let state_root = if i == 0 { + parent.beacon_block.state_root() + } else { + // This is a new state we've reached, so stage it for storage in the DB. + // Computing the state root here is time-equivalent to computing it during slot + // processing, but we get early access to it. + let state_root = state.update_tree_hash_cache()?; + intermediate_states.add_state(state_root, &state)?; + state_root + }; + + per_slot_processing(&mut state, Some(state_root), &chain.spec)?; + } + + metrics::stop_timer(catchup_timer); + + /* + * Build the committee caches on the state. + */ + + let committee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE); + + state.build_committee_cache(RelativeEpoch::Previous, &chain.spec)?; + state.build_committee_cache(RelativeEpoch::Current, &chain.spec)?; + + metrics::stop_timer(committee_timer); + + /* + * Perform `per_block_processing` on the block and state, returning early if the block is + * invalid. + */ + + let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); + + if let Err(err) = per_block_processing( + &mut state, + &block, + Some(block_root), + // Signatures were verified earlier in this function. + BlockSignatureStrategy::NoVerification, + &chain.spec, + ) { + match err { + // Capture `BeaconStateError` so that we can easily distinguish between a block + // that's invalid and one that caused an internal error. + BlockProcessingError::BeaconStateError(e) => return Err(e.into()), + other => return Err(BlockError::PerBlockProcessingError(other)), + } + }; + + metrics::stop_timer(core_timer); + + /* + * Calculate the state root of the newly modified state + */ + + let state_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_STATE_ROOT); + + let state_root = state.update_tree_hash_cache()?; + + metrics::stop_timer(state_root_timer); + + /* + * Check to ensure the state root on the block matches the one we have calculated. + */ + + if block.state_root() != state_root { + return Err(BlockError::StateRootMismatch { + block: block.state_root(), + local: state_root, + }); + } + + Ok(Self { + block, + block_root, + state, + parent_block: parent.beacon_block, + intermediate_states, + }) + } +} + +/// Returns `Ok(())` if the block is later than the finalized slot on `chain`. +/// +/// Returns an error if the block is earlier or equal to the finalized slot, or there was an error +/// verifying that condition. +fn check_block_against_finalized_slot( + block: &BeaconBlock, + chain: &BeaconChain, +) -> Result<(), BlockError> { + let finalized_slot = chain + .head_info()? + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + if block.slot <= finalized_slot { + Err(BlockError::WouldRevertFinalizedSlot { + block_slot: block.slot, + finalized_slot, + }) + } else { + Ok(()) + } +} + +/// Performs simple, cheap checks to ensure that the block is relevant to be imported. +/// +/// `Ok(block_root)` is returned if the block passes these checks and should progress with +/// verification (viz., it is relevant). +/// +/// Returns an error if the block fails one of these checks (viz., is not relevant) or an error is +/// experienced whilst attempting to verify. +pub fn check_block_relevancy( + signed_block: &SignedBeaconBlock, + block_root: Option, + chain: &BeaconChain, +) -> Result { + let block = &signed_block.message; + + // Do not process blocks from the future. + if block.slot > chain.slot()? { + return Err(BlockError::FutureSlot { + present_slot: chain.slot()?, + block_slot: block.slot, + }); + } + + // Do not re-process the genesis block. + if block.slot == 0 { + return Err(BlockError::GenesisBlock); + } + + // This is an artificial (non-spec) restriction that provides some protection from overflow + // abuses. + if block.slot >= MAXIMUM_BLOCK_SLOT_NUMBER { + return Err(BlockError::BlockSlotLimitReached); + } + + // Do not process a block from a finalized slot. + check_block_against_finalized_slot(block, chain)?; + + let block_root = block_root.unwrap_or_else(|| get_block_root(&signed_block)); + + // Check if the block is already known. We know it is post-finalization, so it is + // sufficient to check the fork choice. + if chain.fork_choice.contains_block(&block_root) { + return Err(BlockError::BlockIsAlreadyKnown); + } + + Ok(block_root) +} + +/// Returns the canonical root of the given `block`. +/// +/// Use this function to ensure that we report the block hashing time Prometheus metric. +pub fn get_block_root(block: &SignedBeaconBlock) -> Hash256 { + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); + + let block_root = block.canonical_root(); + + metrics::stop_timer(block_root_timer); + + block_root +} + +/// Load the parent snapshot (block and state) of the given `block`. +/// +/// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs +/// whilst attempting the operation. +fn load_parent( + block: &BeaconBlock, + chain: &BeaconChain, +) -> Result, BlockError> { + let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); + + // Reject any block if its parent is not known to fork choice. + // + // A block that is not in fork choice is either: + // + // - Not yet imported: we should reject this block because we should only import a child + // after its parent has been fully imported. + // - Pre-finalized: if the parent block is _prior_ to finalization, we should ignore it + // because it will revert finalization. Note that the finalized block is stored in fork + // choice, so we will not reject any child of the finalized block (this is relevant during + // genesis). + if !chain.fork_choice.contains_block(&block.parent_root) { + return Err(BlockError::ParentUnknown(block.parent_root)); + } + + // Load the parent block and state from disk, returning early if it's not available. + let result = chain + .snapshot_cache + .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) + .and_then(|mut snapshot_cache| snapshot_cache.try_remove(block.parent_root)) + .map(|snapshot| Ok(Some(snapshot))) + .unwrap_or_else(|| { + // Load the blocks parent block from the database, returning invalid if that block is not + // found. + // + // We don't return a DBInconsistent error here since it's possible for a block to + // exist in fork choice but not in the database yet. In such a case we simply + // indicate that we don't yet know the parent. + let parent_block = if let Some(block) = chain.get_block(&block.parent_root)? { + block + } else { + return Ok(None); + }; + + // Load the parent blocks state from the database, returning an error if it is not found. + // It is an error because if we know the parent block we should also know the parent state. + let parent_state_root = parent_block.state_root(); + let parent_state = chain + .get_state(&parent_state_root, Some(parent_block.slot()))? + .ok_or_else(|| { + BeaconChainError::DBInconsistent(format!( + "Missing state {:?}", + parent_state_root + )) + })?; + + Ok(Some(BeaconSnapshot { + beacon_block: parent_block, + beacon_block_root: block.parent_root, + beacon_state: parent_state, + beacon_state_root: parent_state_root, + })) + }) + .map_err(BlockError::BeaconChainError)? + .ok_or_else(|| BlockError::ParentUnknown(block.parent_root)); + + metrics::stop_timer(db_read_timer); + + result +} + +/// Performs a cheap (time-efficient) state advancement so the committees for `slot` can be +/// obtained from `state`. +/// +/// The state advancement is "cheap" since it does not generate state roots. As a result, the +/// returned state might be holistically invalid but the committees will be correct (since they do +/// not rely upon state roots). +/// +/// If the given `state` can already serve the `slot`, the committees will be built on the `state` +/// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply +/// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never +/// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build). +fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( + state: &'a mut BeaconState, + block_slot: Slot, + spec: &ChainSpec, +) -> Result>, BlockError> { + let block_epoch = block_slot.epoch(E::slots_per_epoch()); + + if state.current_epoch() == block_epoch { + state.build_committee_cache(RelativeEpoch::Current, spec)?; + + Ok(Cow::Borrowed(state)) + } else if state.slot > block_slot { + Err(BlockError::BlockIsNotLaterThanParent { + block_slot, + state_slot: state.slot, + }) + } else { + let mut state = state.clone_with(CloneConfig::committee_caches_only()); + + while state.current_epoch() < block_epoch { + // Don't calculate state roots since they aren't required for calculating + // shuffling (achieved by providing Hash256::zero()). + per_slot_processing(&mut state, Some(Hash256::zero()), spec).map_err(|e| { + BlockError::BeaconChainError(BeaconChainError::SlotProcessingError(e)) + })?; + } + + state.build_committee_cache(RelativeEpoch::Current, spec)?; + + Ok(Cow::Owned(state)) + } +} + +/// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. +fn get_validator_pubkey_cache( + chain: &BeaconChain, +) -> Result, BlockError> { + chain + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheLockTimeout) + .map_err(BlockError::BeaconChainError) +} + +/// Produces an _empty_ `BlockSignatureVerifier`. +/// +/// The signature verifier is empty because it does not yet have any of this block's signatures +/// added to it. Use `Self::apply_to_signature_verifier` to apply the signatures. +fn get_signature_verifier<'a, E: EthSpec>( + state: &'a BeaconState, + validator_pubkey_cache: &'a ValidatorPubkeyCache, + spec: &'a ChainSpec, +) -> BlockSignatureVerifier<'a, E, impl Fn(usize) -> Option> + Clone> { + BlockSignatureVerifier::new( + state, + move |validator_index| { + // Disallow access to any validator pubkeys that are not in the current beacon + // state. + if validator_index < state.validators.len() { + validator_pubkey_cache + .get(validator_index) + .map(|pk| Cow::Borrowed(pk.as_point())) + } else { + None + } + }, + spec, + ) +} diff --git a/beacon_node/beacon_chain/src/block_verification/block_processing_outcome.rs b/beacon_node/beacon_chain/src/block_verification/block_processing_outcome.rs new file mode 100644 index 000000000..cdfa35cab --- /dev/null +++ b/beacon_node/beacon_chain/src/block_verification/block_processing_outcome.rs @@ -0,0 +1,105 @@ +use crate::{BeaconChainError, BlockError}; +use state_processing::BlockProcessingError; +use types::{Hash256, Slot}; + +/// This is a legacy object that is being kept around to reduce merge conflicts. +/// +/// As soon as this is merged into master, it should be removed as soon as possible. +#[derive(Debug, PartialEq)] +pub enum BlockProcessingOutcome { + /// Block was valid and imported into the block graph. + Processed { + block_root: Hash256, + }, + InvalidSignature, + /// The proposal signature in invalid. + ProposalSignatureInvalid, + /// The parent block was unknown. + ParentUnknown(Hash256), + /// The block slot is greater than the present slot. + FutureSlot { + present_slot: Slot, + block_slot: Slot, + }, + /// The block state_root does not match the generated state. + StateRootMismatch { + block: Hash256, + local: Hash256, + }, + /// The block was a genesis block, these blocks cannot be re-imported. + GenesisBlock, + /// The slot is finalized, no need to import. + WouldRevertFinalizedSlot { + block_slot: Slot, + finalized_slot: Slot, + }, + /// Block is already known, no need to re-import. + BlockIsAlreadyKnown, + /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. + BlockSlotLimitReached, + /// The provided block is from an earlier slot than its parent. + BlockIsNotLaterThanParent { + block_slot: Slot, + state_slot: Slot, + }, + /// At least one block in the chain segement did not have it's parent root set to the root of + /// the prior block. + NonLinearParentRoots, + /// The slots of the blocks in the chain segment were not strictly increasing. I.e., a child + /// had lower slot than a parent. + NonLinearSlots, + /// The block could not be applied to the state, it is invalid. + PerBlockProcessingError(BlockProcessingError), +} + +impl BlockProcessingOutcome { + pub fn shim( + result: Result, + ) -> Result { + match result { + Ok(block_root) => Ok(BlockProcessingOutcome::Processed { block_root }), + Err(BlockError::ParentUnknown(root)) => Ok(BlockProcessingOutcome::ParentUnknown(root)), + Err(BlockError::FutureSlot { + present_slot, + block_slot, + }) => Ok(BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + }), + Err(BlockError::StateRootMismatch { block, local }) => { + Ok(BlockProcessingOutcome::StateRootMismatch { block, local }) + } + Err(BlockError::GenesisBlock) => Ok(BlockProcessingOutcome::GenesisBlock), + Err(BlockError::WouldRevertFinalizedSlot { + block_slot, + finalized_slot, + }) => Ok(BlockProcessingOutcome::WouldRevertFinalizedSlot { + block_slot, + finalized_slot, + }), + Err(BlockError::BlockIsAlreadyKnown) => Ok(BlockProcessingOutcome::BlockIsAlreadyKnown), + Err(BlockError::BlockSlotLimitReached) => { + Ok(BlockProcessingOutcome::BlockSlotLimitReached) + } + Err(BlockError::ProposalSignatureInvalid) => { + Ok(BlockProcessingOutcome::ProposalSignatureInvalid) + } + Err(BlockError::InvalidSignature) => Ok(BlockProcessingOutcome::InvalidSignature), + Err(BlockError::BlockIsNotLaterThanParent { + block_slot, + state_slot, + }) => Ok(BlockProcessingOutcome::BlockIsNotLaterThanParent { + block_slot, + state_slot, + }), + Err(BlockError::NonLinearParentRoots) => { + Ok(BlockProcessingOutcome::NonLinearParentRoots) + } + Err(BlockError::NonLinearSlots) => Ok(BlockProcessingOutcome::NonLinearSlots), + Err(BlockError::PerBlockProcessingError(e)) => { + Ok(BlockProcessingOutcome::PerBlockProcessingError(e)) + } + Err(BlockError::BeaconChainError(e)) => Err(e), + } + } +} diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 01bded409..7a433aab4 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -88,6 +88,7 @@ pub struct BeaconChainBuilder { pubkey_cache_path: Option, validator_pubkey_cache: Option, spec: ChainSpec, + disabled_forks: Vec, log: Option, } @@ -122,6 +123,7 @@ where head_tracker: None, pubkey_cache_path: None, data_dir: None, + disabled_forks: Vec::new(), validator_pubkey_cache: None, spec: TEthSpec::default_spec(), log: None, @@ -168,6 +170,12 @@ where self } + /// Sets a list of hard-coded forks that will not be activated. + pub fn disabled_forks(mut self, disabled_forks: Vec) -> Self { + self.disabled_forks = disabled_forks; + self + } + /// Attempt to load an existing eth1 cache from the builder's `Store`. pub fn get_persisted_eth1_backend(&self) -> Result, String> { let store = self @@ -421,7 +429,10 @@ where op_pool: self .op_pool .ok_or_else(|| "Cannot build without op pool".to_string())?, + // TODO: allow for persisting and loading the pool from disk. + naive_aggregation_pool: <_>::default(), eth1_chain: self.eth1_chain, + genesis_validators_root: canonical_head.beacon_state.genesis_validators_root, canonical_head: TimeoutRwLock::new(canonical_head.clone()), genesis_block_root: self .genesis_block_root @@ -433,12 +444,13 @@ where .event_handler .ok_or_else(|| "Cannot build without an event handler".to_string())?, head_tracker: Arc::new(self.head_tracker.unwrap_or_default()), - block_processing_cache: TimeoutRwLock::new(SnapshotCache::new( + snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( DEFAULT_SNAPSHOT_CACHE_SIZE, canonical_head, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), + disabled_forks: self.disabled_forks, log: log.clone(), }; @@ -661,7 +673,7 @@ mod test { #[test] fn recent_genesis() { - let validator_count = 8; + let validator_count = 1; let genesis_time = 13_371_337; let log = get_logger(); diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 7605a2214..ffe32f340 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,11 +1,14 @@ use crate::eth1_chain::Error as Eth1ChainError; use crate::fork_choice::Error as ForkChoiceError; +use crate::naive_aggregation_pool::Error as NaiveAggregationError; use operation_pool::OpPoolError; use ssz::DecodeError; use ssz_types::Error as SszTypesError; -use state_processing::per_block_processing::errors::AttestationValidationError; -use state_processing::BlockProcessingError; -use state_processing::SlotProcessingError; +use state_processing::{ + block_signature_verifier::Error as BlockSignatureVerifierError, + per_block_processing::errors::AttestationValidationError, + signature_sets::Error as SignatureSetError, BlockProcessingError, SlotProcessingError, +}; use std::time::Duration; use types::*; @@ -57,15 +60,20 @@ pub enum BeaconChainError { IncorrectStateForAttestation(RelativeEpochError), InvalidValidatorPubkeyBytes(DecodeError), ValidatorPubkeyCacheIncomplete(usize), - SignatureSetError(state_processing::signature_sets::Error), + SignatureSetError(SignatureSetError), BlockSignatureVerifierError(state_processing::block_signature_verifier::Error), DuplicateValidatorPublicKey, ValidatorPubkeyCacheFileError(String), + OpPoolError(OpPoolError), + NaiveAggregationError(NaiveAggregationError), } easy_from_to!(SlotProcessingError, BeaconChainError); easy_from_to!(AttestationValidationError, BeaconChainError); easy_from_to!(SszTypesError, BeaconChainError); +easy_from_to!(OpPoolError, BeaconChainError); +easy_from_to!(NaiveAggregationError, BeaconChainError); +easy_from_to!(BlockSignatureVerifierError, BeaconChainError); #[derive(Debug, PartialEq)] pub enum BlockProductionError { diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index b3ad966a8..66e125ddd 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,7 +1,6 @@ use crate::metrics; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2_hashing::hash; -use exit_future::Exit; use futures::Future; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; @@ -287,7 +286,10 @@ impl> CachingEth1Backend { } /// Starts the routine which connects to the external eth1 node and updates the caches. - pub fn start(&self, exit: Exit) -> impl Future { + pub fn start( + &self, + exit: tokio::sync::oneshot::Receiver<()>, + ) -> impl Future { self.core.auto_update(exit) } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 97d419290..0ab3594ae 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -4,6 +4,7 @@ extern crate lazy_static; mod beacon_chain; mod beacon_snapshot; +mod block_verification; pub mod builder; mod errors; pub mod eth1_chain; @@ -12,6 +13,7 @@ mod fork_choice; mod head_tracker; mod metrics; pub mod migrate; +mod naive_aggregation_pool; mod persisted_beacon_chain; mod shuffling_cache; mod snapshot_cache; @@ -20,11 +22,12 @@ mod timeout_rw_lock; mod validator_pubkey_cache; pub use self::beacon_chain::{ - AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, - StateSkipConfig, + AttestationProcessingOutcome, AttestationType, BeaconChain, BeaconChainTypes, + ChainSegmentResult, StateSkipConfig, }; pub use self::beacon_snapshot::BeaconSnapshot; pub use self::errors::{BeaconChainError, BlockProductionError}; +pub use block_verification::{BlockError, BlockProcessingOutcome, GossipVerifiedBlock}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::EventHandler; pub use fork_choice::ForkChoice; diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs new file mode 100644 index 000000000..6718fd2f3 --- /dev/null +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -0,0 +1,480 @@ +use parking_lot::RwLock; +use std::collections::HashMap; +use types::{Attestation, AttestationData, EthSpec, Slot}; + +/// The number of slots that will be stored in the pool. +/// +/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations +/// at slots less than `4` will be dropped and any future attestation with a slot less than `4` +/// will be refused. +const SLOTS_RETAINED: usize = 3; + +/// The maximum number of distinct `AttestationData` that will be stored in each slot. +/// +/// This is a DoS protection measure. +const MAX_ATTESTATIONS_PER_SLOT: usize = 16_384; + +/// Returned upon successfully inserting an attestation into the pool. +#[derive(Debug, PartialEq)] +pub enum InsertOutcome { + /// The `attestation.data` had not been seen before and was added to the pool. + NewAttestationData { committee_index: usize }, + /// A validator signature for the given `attestation.data` was already known. No changes were + /// made. + SignatureAlreadyKnown { committee_index: usize }, + /// The `attestation.data` was known, but a signature for the given validator was not yet + /// known. The signature was aggregated into the pool. + SignatureAggregated { committee_index: usize }, +} + +#[derive(Debug, PartialEq)] +pub enum Error { + /// The given `attestation.data.slot` was too low to be stored. No changes were made. + SlotTooLow { + slot: Slot, + lowest_permissible_slot: Slot, + }, + /// The given `attestation.aggregation_bits` field was empty. + NoAggregationBitsSet, + /// The given `attestation.aggregation_bits` field had more than one signature. The number of + /// signatures found is included. + MoreThanOneAggregationBitSet(usize), + /// We have reached the maximum number of unique `AttestationData` that can be stored in a + /// slot. This is a DoS protection function. + ReachedMaxAttestationsPerSlot(usize), + /// The given `attestation.aggregation_bits` field had a different length to the one currently + /// stored. This indicates a fairly serious error somewhere in the code that called this + /// function. + InconsistentBitfieldLengths, + /// The function to obtain a map index failed, this is an internal error. + InvalidMapIndex(usize), + /// The given `attestation` was for the incorrect slot. This is an internal error. + IncorrectSlot { expected: Slot, attestation: Slot }, +} + +/// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all +/// `attestation` are from the same slot. +struct AggregatedAttestationMap { + map: HashMap>, + slot: Slot, +} + +impl AggregatedAttestationMap { + /// Create an empty collection that will only contain attestation for the given `slot`. + pub fn new(slot: Slot) -> Self { + Self { + slot, + map: <_>::default(), + } + } + + /// Insert an attestation into `self`, aggregating it into the pool. + /// + /// The given attestation (`a`) must only have one signature and be from the slot that `self` + /// was initialized with. + pub fn insert(&mut self, a: &Attestation) -> Result { + if a.data.slot != self.slot { + return Err(Error::IncorrectSlot { + expected: self.slot, + attestation: a.data.slot, + }); + } + + let set_bits = a + .aggregation_bits + .iter() + .enumerate() + .filter(|(_i, bit)| *bit) + .map(|(i, _bit)| i) + .collect::>(); + + let committee_index = set_bits + .first() + .copied() + .ok_or_else(|| Error::NoAggregationBitsSet)?; + + if set_bits.len() > 1 { + return Err(Error::MoreThanOneAggregationBitSet(set_bits.len())); + } + + if let Some(existing_attestation) = self.map.get_mut(&a.data) { + if existing_attestation + .aggregation_bits + .get(committee_index) + .map_err(|_| Error::InconsistentBitfieldLengths)? + { + Ok(InsertOutcome::SignatureAlreadyKnown { committee_index }) + } else { + existing_attestation.aggregate(a); + Ok(InsertOutcome::SignatureAggregated { committee_index }) + } + } else { + if self.map.len() >= MAX_ATTESTATIONS_PER_SLOT { + return Err(Error::ReachedMaxAttestationsPerSlot( + MAX_ATTESTATIONS_PER_SLOT, + )); + } + + self.map.insert(a.data.clone(), a.clone()); + Ok(InsertOutcome::NewAttestationData { committee_index }) + } + } + + /// Returns an aggregated `Attestation` with the given `data`, if any. + /// + /// The given `a.data.slot` must match the slot that `self` was initialized with. + pub fn get(&self, data: &AttestationData) -> Result>, Error> { + if data.slot != self.slot { + return Err(Error::IncorrectSlot { + expected: self.slot, + attestation: data.slot, + }); + } + + Ok(self.map.get(data).cloned()) + } +} + +/// A pool of `Attestation` that is specially designed to store "unaggregated" attestations from +/// the native aggregation scheme. +/// +/// **The `NaiveAggregationPool` does not do any signature or attestation verification. It assumes +/// that all `Attestation` objects provided are valid.** +/// +/// ## Details +/// +/// The pool sorts the `Attestation` by `attestation.data.slot`, then by `attestation.data`. +/// +/// As each unaggregated attestation is added it is aggregated with any existing `attestation` with +/// the same `AttestationData`. Considering that the pool only accepts attestations with a single +/// signature, there should only ever be a single aggregated `Attestation` for any given +/// `AttestationData`. +/// +/// The pool has a capacity for `SLOTS_RETAINED` slots, when a new `attestation.data.slot` is +/// provided, the oldest slot is dropped and replaced with the new slot. The pool can also be +/// pruned by supplying a `current_slot`; all existing attestations with a slot lower than +/// `current_slot - SLOTS_RETAINED` will be removed and any future attestation with a slot lower +/// than that will also be refused. Pruning is done automatically based upon the attestations it +/// receives and it can be triggered manually. +pub struct NaiveAggregationPool { + lowest_permissible_slot: RwLock, + maps: RwLock>>, +} + +impl Default for NaiveAggregationPool { + fn default() -> Self { + Self { + lowest_permissible_slot: RwLock::new(Slot::new(0)), + maps: RwLock::new(vec![]), + } + } +} + +impl NaiveAggregationPool { + /// Insert an attestation into `self`, aggregating it into the pool. + /// + /// The given attestation (`a`) must only have one signature and have an + /// `attestation.data.slot` that is not lower than `self.lowest_permissible_slot`. + /// + /// The pool may be pruned if the given `attestation.data` has a slot higher than any + /// previously seen. + pub fn insert(&self, attestation: &Attestation) -> Result { + let lowest_permissible_slot = *self.lowest_permissible_slot.read(); + + // Reject any attestations that are too old. + if attestation.data.slot < lowest_permissible_slot { + return Err(Error::SlotTooLow { + slot: attestation.data.slot, + lowest_permissible_slot, + }); + } + + // Prune the pool if this attestation indicates that the current slot has advanced. + if (lowest_permissible_slot + SLOTS_RETAINED as u64) < attestation.data.slot + 1 { + self.prune(attestation.data.slot) + } + + let index = self.get_map_index(attestation.data.slot); + + self.maps + .write() + .get_mut(index) + .ok_or_else(|| Error::InvalidMapIndex(index))? + .insert(attestation) + } + + /// Returns an aggregated `Attestation` with the given `data`, if any. + pub fn get(&self, data: &AttestationData) -> Result>, Error> { + self.maps + .read() + .iter() + .find(|map| map.slot == data.slot) + .map(|map| map.get(data)) + .unwrap_or_else(|| Ok(None)) + } + + /// Removes any attestations with a slot lower than `current_slot` and bars any future + /// attestations with a slot lower than `current_slot - SLOTS_RETAINED`. + pub fn prune(&self, current_slot: Slot) { + // Taking advantage of saturating subtraction on `Slot`. + let lowest_permissible_slot = current_slot - Slot::from(SLOTS_RETAINED); + + self.maps + .write() + .retain(|map| map.slot >= lowest_permissible_slot); + + *self.lowest_permissible_slot.write() = lowest_permissible_slot; + } + + /// Returns the index of `self.maps` that matches `slot`. + /// + /// If there is no existing map for this slot one will be created. If `self.maps.len() >= + /// SLOTS_RETAINED`, the map with the lowest slot will be replaced. + fn get_map_index(&self, slot: Slot) -> usize { + let mut maps = self.maps.write(); + + if let Some(index) = maps.iter().position(|map| map.slot == slot) { + return index; + } + + if maps.len() < SLOTS_RETAINED || maps.is_empty() { + let index = maps.len(); + maps.push(AggregatedAttestationMap::new(slot)); + return index; + } + + let index = maps + .iter() + .enumerate() + .min_by_key(|(_i, map)| map.slot) + .map(|(i, _map)| i) + .expect("maps cannot be empty due to previous .is_empty() check"); + + maps[index] = AggregatedAttestationMap::new(slot); + + index + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz_types::BitList; + use types::{ + test_utils::{generate_deterministic_keypair, test_random_instance}, + Fork, Hash256, + }; + + type E = types::MainnetEthSpec; + + fn get_attestation(slot: Slot) -> Attestation { + let mut a: Attestation = test_random_instance(); + a.data.slot = slot; + a.aggregation_bits = BitList::with_capacity(4).expect("should create bitlist"); + a + } + + fn sign(a: &mut Attestation, i: usize, genesis_validators_root: Hash256) { + a.sign( + &generate_deterministic_keypair(i).sk, + i, + &Fork::default(), + genesis_validators_root, + &E::default_spec(), + ) + .expect("should sign attestation"); + } + + fn unset_bit(a: &mut Attestation, i: usize) { + a.aggregation_bits + .set(i, false) + .expect("should unset aggregation bit") + } + + #[test] + fn single_attestation() { + let mut a = get_attestation(Slot::new(0)); + + let pool = NaiveAggregationPool::default(); + + assert_eq!( + pool.insert(&a), + Err(Error::NoAggregationBitsSet), + "should not accept attestation without any signatures" + ); + + sign(&mut a, 0, Hash256::random()); + + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), + "should accept new attestation" + ); + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::SignatureAlreadyKnown { committee_index: 0 }), + "should acknowledge duplicate signature" + ); + + let retrieved = pool + .get(&a.data) + .expect("should not error while getting attestation") + .expect("should get an attestation"); + assert_eq!( + retrieved, a, + "retrieved attestation should equal the one inserted" + ); + + sign(&mut a, 1, Hash256::random()); + + assert_eq!( + pool.insert(&a), + Err(Error::MoreThanOneAggregationBitSet(2)), + "should not accept attestation with multiple signatures" + ); + } + + #[test] + fn multiple_attestations() { + let mut a_0 = get_attestation(Slot::new(0)); + let mut a_1 = a_0.clone(); + + let genesis_validators_root = Hash256::random(); + sign(&mut a_0, 0, genesis_validators_root); + sign(&mut a_1, 1, genesis_validators_root); + + let pool = NaiveAggregationPool::default(); + + assert_eq!( + pool.insert(&a_0), + Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), + "should accept a_0" + ); + assert_eq!( + pool.insert(&a_1), + Ok(InsertOutcome::SignatureAggregated { committee_index: 1 }), + "should accept a_1" + ); + + let retrieved = pool + .get(&a_0.data) + .expect("should not error while getting attestation") + .expect("should get an attestation"); + + let mut a_01 = a_0.clone(); + a_01.aggregate(&a_1); + + assert_eq!( + retrieved, a_01, + "retrieved attestation should be aggregated" + ); + + /* + * Throw a different attestation data in there and ensure it isn't aggregated + */ + + let mut a_different = a_0.clone(); + let different_root = Hash256::from_low_u64_be(1337); + unset_bit(&mut a_different, 0); + sign(&mut a_different, 2, genesis_validators_root); + assert!(a_different.data.beacon_block_root != different_root); + a_different.data.beacon_block_root = different_root; + + assert_eq!( + pool.insert(&a_different), + Ok(InsertOutcome::NewAttestationData { committee_index: 2 }), + "should accept a_different" + ); + + assert_eq!( + pool.get(&a_0.data) + .expect("should not error while getting attestation") + .expect("should get an attestation"), + retrieved, + "should not have aggregated different attestation data" + ); + } + + #[test] + fn auto_pruning() { + let mut base = get_attestation(Slot::new(0)); + sign(&mut base, 0, Hash256::random()); + + let pool = NaiveAggregationPool::default(); + + for i in 0..SLOTS_RETAINED * 2 { + let slot = Slot::from(i); + let mut a = base.clone(); + a.data.slot = slot; + + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), + "should accept new attestation" + ); + + if i < SLOTS_RETAINED { + let len = i + 1; + assert_eq!( + pool.maps.read().len(), + len, + "the pool should have length {}", + len + ); + } else { + assert_eq!( + pool.maps.read().len(), + SLOTS_RETAINED, + "the pool should have length SLOTS_RETAINED" + ); + + let mut pool_slots = pool + .maps + .read() + .iter() + .map(|map| map.slot) + .collect::>(); + + pool_slots.sort_unstable(); + + for (j, pool_slot) in pool_slots.iter().enumerate() { + let expected_slot = slot - (SLOTS_RETAINED - 1 - j) as u64; + assert_eq!( + *pool_slot, expected_slot, + "the slot of the map should be {}", + expected_slot + ) + } + } + } + } + + #[test] + fn max_attestations() { + let mut base = get_attestation(Slot::new(0)); + sign(&mut base, 0, Hash256::random()); + + let pool = NaiveAggregationPool::default(); + + for i in 0..=MAX_ATTESTATIONS_PER_SLOT { + let mut a = base.clone(); + a.data.beacon_block_root = Hash256::from_low_u64_be(i as u64); + + if i < MAX_ATTESTATIONS_PER_SLOT { + assert_eq!( + pool.insert(&a), + Ok(InsertOutcome::NewAttestationData { committee_index: 0 }), + "should accept attestation below limit" + ); + } else { + assert_eq!( + pool.insert(&a), + Err(Error::ReachedMaxAttestationsPerSlot( + MAX_ATTESTATIONS_PER_SLOT + )), + "should not accept attestation above limit" + ); + } + } + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index bc4271aec..91f3a833c 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -7,8 +7,7 @@ use crate::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, events::NullEventHandler, - AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, - StateSkipConfig, + AttestationProcessingOutcome, AttestationType, BeaconChain, BeaconChainTypes, StateSkipConfig, }; use genesis::interop_genesis_state; use rayon::prelude::*; @@ -263,20 +262,15 @@ where let (block, new_state) = self.build_block(state.clone(), slot, block_strategy); - let outcome = self + let block_root = self .chain .process_block(block) .expect("should not error during block processing"); self.chain.fork_choice().expect("should find head"); - if let BlockProcessingOutcome::Processed { block_root } = outcome { - head_block_root = Some(block_root); - - self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); - } else { - panic!("block should be successfully processed: {:?}", outcome); - } + head_block_root = Some(block_root); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); state = new_state; slot += 1; @@ -310,20 +304,16 @@ where let (block, new_state) = self.build_block(state.clone(), slot, block_strategy); - let outcome = self + let block_root = self .chain .process_block(block) .expect("should not error during block processing"); self.chain.fork_choice().expect("should find head"); - if let BlockProcessingOutcome::Processed { block_root } = outcome { - let attestation_strategy = AttestationStrategy::SomeValidators(validators.to_vec()); - self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); - (block_root.into(), new_state) - } else { - panic!("block should be successfully processed: {:?}", outcome); - } + let attestation_strategy = AttestationStrategy::SomeValidators(validators.to_vec()); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); + (block_root.into(), new_state) } /// `add_block()` repeated `num_blocks` times. @@ -478,7 +468,7 @@ where .for_each(|attestation| { match self .chain - .process_attestation(attestation) + .process_attestation(attestation, AttestationType::Aggregated) .expect("should not error during attestation processing") { // PastEpoch can occur if we fork over several epochs diff --git a/beacon_node/beacon_chain/tests/attestation_tests.rs b/beacon_node/beacon_chain/tests/attestation_tests.rs index 52ccd67c4..137746c7f 100644 --- a/beacon_node/beacon_chain/tests/attestation_tests.rs +++ b/beacon_node/beacon_chain/tests/attestation_tests.rs @@ -6,7 +6,7 @@ extern crate lazy_static; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, }; -use beacon_chain::AttestationProcessingOutcome; +use beacon_chain::{AttestationProcessingOutcome, AttestationType}; use state_processing::per_slot_processing; use types::{ test_utils::generate_deterministic_keypair, AggregateSignature, BitList, EthSpec, Hash256, @@ -56,7 +56,7 @@ fn attestation_validity() { .expect("should get at least one attestation"); assert_eq!( - chain.process_attestation(valid_attestation.clone()), + chain.process_attestation(valid_attestation.clone(), AttestationType::Aggregated), Ok(AttestationProcessingOutcome::Processed), "should accept valid attestation" ); @@ -71,7 +71,7 @@ fn attestation_validity() { assert_eq!( harness .chain - .process_attestation(epoch_mismatch_attestation), + .process_attestation(epoch_mismatch_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::BadTargetEpoch), "should not accept attestation where the slot is not in the same epoch as the target" ); @@ -85,7 +85,9 @@ fn attestation_validity() { early_attestation.data.slot = (current_epoch + 1).start_slot(MainnetEthSpec::slots_per_epoch()); assert_eq!( - harness.chain.process_attestation(early_attestation), + harness + .chain + .process_attestation(early_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::FutureEpoch { attestation_epoch: current_epoch + 1, current_epoch @@ -118,7 +120,9 @@ fn attestation_validity() { .expect("should get at least one late attestation"); assert_eq!( - harness.chain.process_attestation(late_attestation), + harness + .chain + .process_attestation(late_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::PastEpoch { attestation_epoch: current_epoch - 2, current_epoch @@ -134,7 +138,9 @@ fn attestation_validity() { bad_target_attestation.data.target.root = Hash256::from_low_u64_be(42); assert_eq!( - harness.chain.process_attestation(bad_target_attestation), + harness + .chain + .process_attestation(bad_target_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::UnknownTargetRoot( Hash256::from_low_u64_be(42) )), @@ -149,7 +155,9 @@ fn attestation_validity() { future_block_attestation.data.slot -= 1; assert_eq!( - harness.chain.process_attestation(future_block_attestation), + harness + .chain + .process_attestation(future_block_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::AttestsToFutureBlock { block: current_slot, attestation: current_slot - 1 @@ -165,7 +173,9 @@ fn attestation_validity() { bad_head_attestation.data.beacon_block_root = Hash256::from_low_u64_be(42); assert_eq!( - harness.chain.process_attestation(bad_head_attestation), + harness + .chain + .process_attestation(bad_head_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::UnknownHeadBlock { beacon_block_root: Hash256::from_low_u64_be(42) }), @@ -183,7 +193,9 @@ fn attestation_validity() { bad_signature_attestation.signature = agg_sig; assert_eq!( - harness.chain.process_attestation(bad_signature_attestation), + harness + .chain + .process_attestation(bad_signature_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::InvalidSignature), "should not accept bad_signature attestation" ); @@ -199,7 +211,7 @@ fn attestation_validity() { assert_eq!( harness .chain - .process_attestation(empty_bitfield_attestation), + .process_attestation(empty_bitfield_attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::EmptyAggregationBitfield), "should not accept empty_bitfield attestation" ); @@ -247,7 +259,9 @@ fn attestation_that_skips_epochs() { .expect("should get at least one attestation"); assert_eq!( - harness.chain.process_attestation(attestation), + harness + .chain + .process_attestation(attestation, AttestationType::Aggregated), Ok(AttestationProcessingOutcome::Processed), "should process attestation that skips slots" ); diff --git a/beacon_node/beacon_chain/tests/import_chain_segment_tests.rs b/beacon_node/beacon_chain/tests/import_chain_segment_tests.rs new file mode 100644 index 000000000..bbefe5be3 --- /dev/null +++ b/beacon_node/beacon_chain/tests/import_chain_segment_tests.rs @@ -0,0 +1,588 @@ +#![cfg(not(debug_assertions))] + +#[macro_use] +extern crate lazy_static; + +use beacon_chain::{ + test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType}, + BeaconSnapshot, BlockError, +}; +use types::{ + test_utils::generate_deterministic_keypair, AggregateSignature, AttestationData, + AttesterSlashing, Checkpoint, Deposit, DepositData, Epoch, EthSpec, Hash256, + IndexedAttestation, Keypair, MainnetEthSpec, ProposerSlashing, Signature, SignedBeaconBlock, + SignedBeaconBlockHeader, SignedVoluntaryExit, Slot, VoluntaryExit, DEPOSIT_TREE_DEPTH, +}; + +type E = MainnetEthSpec; + +// Should ideally be divisible by 3. +pub const VALIDATOR_COUNT: usize = 24; +pub const CHAIN_SEGMENT_LENGTH: usize = 64 * 5; + +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); + + /// A cached set of valid blocks + static ref CHAIN_SEGMENT: Vec> = get_chain_segment(); +} + +fn get_chain_segment() -> Vec> { + let harness = get_harness(VALIDATOR_COUNT); + + harness.extend_chain( + CHAIN_SEGMENT_LENGTH, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + harness + .chain + .chain_dump() + .expect("should dump chain") + .into_iter() + .skip(1) + .collect() +} + +fn get_harness(validator_count: usize) -> BeaconChainHarness> { + let harness = BeaconChainHarness::new(MainnetEthSpec, KEYPAIRS[0..validator_count].to_vec()); + + harness.advance_slot(); + + harness +} + +fn chain_segment_blocks() -> Vec> { + CHAIN_SEGMENT + .iter() + .map(|snapshot| snapshot.beacon_block.clone()) + .collect() +} + +fn junk_signature() -> Signature { + let kp = generate_deterministic_keypair(VALIDATOR_COUNT); + let message = &[42, 42]; + Signature::new(message, &kp.sk) +} + +fn junk_aggregate_signature() -> AggregateSignature { + let mut agg_sig = AggregateSignature::new(); + agg_sig.add(&junk_signature()); + agg_sig +} + +fn update_proposal_signatures( + snapshots: &mut [BeaconSnapshot], + harness: &BeaconChainHarness>, +) { + for snapshot in snapshots { + let spec = &harness.chain.spec; + let slot = snapshot.beacon_block.slot(); + let state = &snapshot.beacon_state; + let proposer_index = state + .get_beacon_proposer_index(slot, spec) + .expect("should find proposer index"); + let keypair = harness + .keypairs + .get(proposer_index) + .expect("proposer keypair should be available"); + + snapshot.beacon_block = snapshot.beacon_block.message.clone().sign( + &keypair.sk, + &state.fork, + state.genesis_validators_root, + spec, + ); + } +} + +fn update_parent_roots(snapshots: &mut [BeaconSnapshot]) { + for i in 0..snapshots.len() { + let root = snapshots[i].beacon_block.canonical_root(); + if let Some(child) = snapshots.get_mut(i + 1) { + child.beacon_block.message.parent_root = root + } + } +} + +#[test] +fn chain_segment_full_segment() { + let harness = get_harness(VALIDATOR_COUNT); + let blocks = chain_segment_blocks(); + + harness + .chain + .slot_clock + .set_slot(blocks.last().unwrap().slot().as_u64()); + + // Sneak in a little check to ensure we can process empty chain segments. + harness + .chain + .process_chain_segment(vec![]) + .to_block_error() + .expect("should import empty chain segment"); + + harness + .chain + .process_chain_segment(blocks.clone()) + .to_block_error() + .expect("should import chain segment"); + + harness.chain.fork_choice().expect("should run fork choice"); + + assert_eq!( + harness + .chain + .head_info() + .expect("should get harness b head") + .block_root, + blocks.last().unwrap().canonical_root(), + "harness should have last block as head" + ); +} + +#[test] +fn chain_segment_varying_chunk_size() { + for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { + let harness = get_harness(VALIDATOR_COUNT); + let blocks = chain_segment_blocks(); + + harness + .chain + .slot_clock + .set_slot(blocks.last().unwrap().slot().as_u64()); + + for chunk in blocks.chunks(*chunk_size) { + harness + .chain + .process_chain_segment(chunk.to_vec()) + .to_block_error() + .expect(&format!( + "should import chain segment of len {}", + chunk_size + )); + } + + harness.chain.fork_choice().expect("should run fork choice"); + + assert_eq!( + harness + .chain + .head_info() + .expect("should get harness b head") + .block_root, + blocks.last().unwrap().canonical_root(), + "harness should have last block as head" + ); + } +} + +#[test] +fn chain_segment_non_linear_parent_roots() { + let harness = get_harness(VALIDATOR_COUNT); + harness + .chain + .slot_clock + .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + + /* + * Test with a block removed. + */ + let mut blocks = chain_segment_blocks(); + blocks.remove(2); + + assert_eq!( + harness + .chain + .process_chain_segment(blocks.clone()) + .to_block_error(), + Err(BlockError::NonLinearParentRoots), + "should not import chain with missing parent" + ); + + /* + * Test with a modified parent root. + */ + let mut blocks = chain_segment_blocks(); + blocks[3].message.parent_root = Hash256::zero(); + + assert_eq!( + harness + .chain + .process_chain_segment(blocks.clone()) + .to_block_error(), + Err(BlockError::NonLinearParentRoots), + "should not import chain with a broken parent root link" + ); +} + +#[test] +fn chain_segment_non_linear_slots() { + let harness = get_harness(VALIDATOR_COUNT); + harness + .chain + .slot_clock + .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + + /* + * Test where a child is lower than the parent. + */ + + let mut blocks = chain_segment_blocks(); + blocks[3].message.slot = Slot::new(0); + + assert_eq!( + harness + .chain + .process_chain_segment(blocks.clone()) + .to_block_error(), + Err(BlockError::NonLinearSlots), + "should not import chain with a parent that has a lower slot than its child" + ); + + /* + * Test where a child is equal to the parent. + */ + + let mut blocks = chain_segment_blocks(); + blocks[3].message.slot = blocks[2].message.slot; + + assert_eq!( + harness + .chain + .process_chain_segment(blocks.clone()) + .to_block_error(), + Err(BlockError::NonLinearSlots), + "should not import chain with a parent that has an equal slot to its child" + ); +} + +#[test] +fn invalid_signatures() { + let mut checked_attestation = false; + + for &block_index in &[0, 1, 32, 64, 68 + 1, 129, CHAIN_SEGMENT.len() - 1] { + let harness = get_harness(VALIDATOR_COUNT); + harness + .chain + .slot_clock + .set_slot(CHAIN_SEGMENT.last().unwrap().beacon_block.slot().as_u64()); + + // Import all the ancestors before the `block_index` block. + let ancestor_blocks = CHAIN_SEGMENT + .iter() + .take(block_index) + .map(|snapshot| snapshot.beacon_block.clone()) + .collect(); + harness + .chain + .process_chain_segment(ancestor_blocks) + .to_block_error() + .expect("should import all blocks prior to the one being tested"); + + // For the given snapshots, test the following: + // + // - The `process_chain_segment` function returns `InvalidSignature`. + // - The `process_block` function returns `InvalidSignature` when importing the + // `SignedBeaconBlock` directly. + // - The `verify_block_for_gossip` function does _not_ return an error. + // - The `process_block` function returns `InvalidSignature` when verifying the + // `GossipVerifiedBlock`. + let assert_invalid_signature = |snapshots: &[BeaconSnapshot], item: &str| { + let blocks = snapshots + .iter() + .map(|snapshot| snapshot.beacon_block.clone()) + .collect(); + + // Ensure the block will be rejected if imported in a chain segment. + assert_eq!( + harness.chain.process_chain_segment(blocks).to_block_error(), + Err(BlockError::InvalidSignature), + "should not import chain segment with an invalid {} signature", + item + ); + + // Ensure the block will be rejected if imported on its own (without gossip checking). + assert_eq!( + harness + .chain + .process_block(snapshots[block_index].beacon_block.clone()), + Err(BlockError::InvalidSignature), + "should not import individual block with an invalid {} signature", + item + ); + + let gossip_verified = harness + .chain + .verify_block_for_gossip(snapshots[block_index].beacon_block.clone()) + .expect("should obtain gossip verified block"); + assert_eq!( + harness.chain.process_block(gossip_verified), + Err(BlockError::InvalidSignature), + "should not import gossip verified block with an invalid {} signature", + item + ); + }; + + /* + * Block proposal + */ + let mut snapshots = CHAIN_SEGMENT.clone(); + snapshots[block_index].beacon_block.signature = junk_signature(); + let blocks = snapshots + .iter() + .map(|snapshot| snapshot.beacon_block.clone()) + .collect(); + // Ensure the block will be rejected if imported in a chain segment. + assert_eq!( + harness.chain.process_chain_segment(blocks).to_block_error(), + Err(BlockError::InvalidSignature), + "should not import chain segment with an invalid gossip signature", + ); + // Ensure the block will be rejected if imported on its own (without gossip checking). + assert_eq!( + harness + .chain + .process_block(snapshots[block_index].beacon_block.clone()), + Err(BlockError::InvalidSignature), + "should not import individual block with an invalid gossip signature", + ); + + /* + * Randao reveal + */ + let mut snapshots = CHAIN_SEGMENT.clone(); + snapshots[block_index] + .beacon_block + .message + .body + .randao_reveal = junk_signature(); + update_parent_roots(&mut snapshots); + update_proposal_signatures(&mut snapshots, &harness); + assert_invalid_signature(&snapshots, "randao"); + + /* + * Proposer slashing + */ + let mut snapshots = CHAIN_SEGMENT.clone(); + let proposer_slashing = ProposerSlashing { + signed_header_1: SignedBeaconBlockHeader { + message: snapshots[block_index].beacon_block.message.block_header(), + signature: junk_signature(), + }, + signed_header_2: SignedBeaconBlockHeader { + message: snapshots[block_index].beacon_block.message.block_header(), + signature: junk_signature(), + }, + }; + snapshots[block_index] + .beacon_block + .message + .body + .proposer_slashings + .push(proposer_slashing) + .expect("should update proposer slashing"); + update_parent_roots(&mut snapshots); + update_proposal_signatures(&mut snapshots, &harness); + assert_invalid_signature(&snapshots, "proposer slashing"); + + /* + * Attester slashing + */ + let mut snapshots = CHAIN_SEGMENT.clone(); + let indexed_attestation = IndexedAttestation { + attesting_indices: vec![0].into(), + data: AttestationData { + slot: Slot::new(0), + index: 0, + beacon_block_root: Hash256::zero(), + source: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, + target: Checkpoint { + epoch: Epoch::new(0), + root: Hash256::zero(), + }, + }, + signature: junk_aggregate_signature(), + }; + let attester_slashing = AttesterSlashing { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + snapshots[block_index] + .beacon_block + .message + .body + .attester_slashings + .push(attester_slashing) + .expect("should update attester slashing"); + update_parent_roots(&mut snapshots); + update_proposal_signatures(&mut snapshots, &harness); + assert_invalid_signature(&snapshots, "attester slashing"); + + /* + * Attestation + */ + let mut snapshots = CHAIN_SEGMENT.clone(); + if let Some(attestation) = snapshots[block_index] + .beacon_block + .message + .body + .attestations + .get_mut(0) + { + attestation.signature = junk_aggregate_signature(); + update_parent_roots(&mut snapshots); + update_proposal_signatures(&mut snapshots, &harness); + assert_invalid_signature(&snapshots, "attestation"); + checked_attestation = true; + } + + /* + * Deposit + * + * Note: an invalid deposit signature is permitted! + */ + let mut snapshots = CHAIN_SEGMENT.clone(); + let deposit = Deposit { + proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), + data: DepositData { + pubkey: Keypair::random().pk.into(), + withdrawal_credentials: Hash256::zero(), + amount: 0, + signature: junk_signature().into(), + }, + }; + snapshots[block_index] + .beacon_block + .message + .body + .deposits + .push(deposit) + .expect("should update deposit"); + update_parent_roots(&mut snapshots); + update_proposal_signatures(&mut snapshots, &harness); + let blocks = snapshots + .iter() + .map(|snapshot| snapshot.beacon_block.clone()) + .collect(); + assert!( + harness.chain.process_chain_segment(blocks).to_block_error() + != Err(BlockError::InvalidSignature), + "should not throw an invalid signature error for a bad deposit signature" + ); + + /* + * Voluntary exit + */ + let mut snapshots = CHAIN_SEGMENT.clone(); + let epoch = snapshots[block_index].beacon_state.current_epoch(); + snapshots[block_index] + .beacon_block + .message + .body + .voluntary_exits + .push(SignedVoluntaryExit { + message: VoluntaryExit { + epoch, + validator_index: 0, + }, + signature: junk_signature(), + }) + .expect("should update deposit"); + update_parent_roots(&mut snapshots); + update_proposal_signatures(&mut snapshots, &harness); + assert_invalid_signature(&snapshots, "voluntary exit"); + } + + assert!( + checked_attestation, + "the test should check an attestation signature" + ) +} + +fn unwrap_err(result: Result) -> E { + match result { + Ok(_) => panic!("called unwrap_err on Ok"), + Err(e) => e, + } +} + +#[test] +fn gossip_verification() { + let harness = get_harness(VALIDATOR_COUNT); + + let block_index = CHAIN_SEGMENT_LENGTH - 2; + + harness + .chain + .slot_clock + .set_slot(CHAIN_SEGMENT[block_index].beacon_block.slot().as_u64()); + + // Import the ancestors prior to the block we're testing. + for snapshot in &CHAIN_SEGMENT[0..block_index] { + let gossip_verified = harness + .chain + .verify_block_for_gossip(snapshot.beacon_block.clone()) + .expect("should obtain gossip verified block"); + + harness + .chain + .process_block(gossip_verified) + .expect("should import valid gossip verified block"); + } + + /* + * Block with invalid signature + */ + + let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + block.signature = junk_signature(); + assert_eq!( + unwrap_err(harness.chain.verify_block_for_gossip(block)), + BlockError::ProposalSignatureInvalid, + "should not import a block with an invalid proposal signature" + ); + + /* + * Block from a future slot. + */ + + let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let block_slot = block.message.slot + 1; + block.message.slot = block_slot; + assert_eq!( + unwrap_err(harness.chain.verify_block_for_gossip(block)), + BlockError::FutureSlot { + present_slot: block_slot - 1, + block_slot + }, + "should not import a block with a future slot" + ); + + /* + * Block from a finalized slot. + */ + + let mut block = CHAIN_SEGMENT[block_index].beacon_block.clone(); + let finalized_slot = harness + .chain + .head_info() + .expect("should get head info") + .finalized_checkpoint + .epoch + .start_slot(E::slots_per_epoch()); + block.message.slot = finalized_slot; + assert_eq!( + unwrap_err(harness.chain.verify_block_for_gossip(block)), + BlockError::WouldRevertFinalizedSlot { + block_slot: finalized_slot, + finalized_slot + }, + "should not import a block with a finalized slot" + ); +} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index f7d3abe46..c5a855fc0 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -7,7 +7,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; use beacon_chain::BeaconSnapshot; -use beacon_chain::{AttestationProcessingOutcome, StateSkipConfig}; +use beacon_chain::{AttestationProcessingOutcome, AttestationType, StateSkipConfig}; use rand::Rng; use sloggers::{null::NullLoggerBuilder, Build}; use std::collections::HashMap; @@ -312,7 +312,7 @@ fn epoch_boundary_state_attestation_processing() { .epoch; let res = harness .chain - .process_attestation_internal(attestation.clone()); + .process_attestation_internal(attestation.clone(), AttestationType::Aggregated); let current_epoch = harness.chain.epoch().expect("should get epoch"); let attestation_epoch = attestation.data.target.epoch; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 6d4c5244f..aecddd2dc 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -3,13 +3,10 @@ #[macro_use] extern crate lazy_static; -use beacon_chain::AttestationProcessingOutcome; -use beacon_chain::{ - test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, OP_POOL_DB_KEY, - }, - BlockProcessingOutcome, +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType, OP_POOL_DB_KEY, }; +use beacon_chain::{AttestationProcessingOutcome, AttestationType}; use operation_pool::PersistedOperationPool; use state_processing::{ per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError, @@ -452,7 +449,9 @@ fn attestations_with_increasing_slots() { for attestation in attestations { let attestation_epoch = attestation.data.target.epoch; - let res = harness.chain.process_attestation(attestation); + let res = harness + .chain + .process_attestation(attestation, AttestationType::Aggregated); if attestation_epoch + 1 < current_epoch { assert_eq!( @@ -562,15 +561,13 @@ fn run_skip_slot_test(skip_slots: u64) { .head() .expect("should get head") .beacon_block - .clone() + .clone(), ), - Ok(BlockProcessingOutcome::Processed { - block_root: harness_a - .chain - .head() - .expect("should get head") - .beacon_block_root - }) + Ok(harness_a + .chain + .head() + .expect("should get head") + .beacon_block_root) ); harness_b diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index ea119557e..a6a242161 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "client" -version = "0.1.0" +version = "0.2.0" authors = ["Age Manning "] edition = "2018" @@ -12,6 +12,7 @@ toml = "^0.5" beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } network = { path = "../network" } +timer = { path = "../timer" } eth2-libp2p = { path = "../eth2-libp2p" } rest_api = { path = "../rest_api" } parking_lot = "0.9.0" @@ -29,7 +30,6 @@ slog = { version = "2.5.2", features = ["max_level_trace"] } slog-async = "2.3.0" tokio = "0.1.22" dirs = "2.0.2" -exit-future = "0.1.4" futures = "0.1.29" reqwest = "0.9.22" url = "2.1.0" diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index eac55d309..ad6443bd6 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -12,10 +12,10 @@ use beacon_chain::{ use environment::RuntimeContext; use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2_config::Eth2Config; -use exit_future::Signal; +use eth2_libp2p::NetworkGlobals; use futures::{future, Future, IntoFuture}; use genesis::{interop_genesis_state, Eth1GenesisService}; -use network::{NetworkConfig, NetworkMessage, Service as NetworkService}; +use network::{NetworkConfig, NetworkMessage, NetworkService}; use slog::info; use ssz::Decode; use std::net::SocketAddr; @@ -51,10 +51,10 @@ pub struct ClientBuilder { beacon_chain_builder: Option>, beacon_chain: Option>>, eth1_service: Option, - exit_signals: Vec, + exit_channels: Vec>, event_handler: Option, - libp2p_network: Option>>, - libp2p_network_send: Option>, + network_globals: Option>>, + network_send: Option>>, http_listen_addr: Option, websocket_listen_addr: Option, eth_spec_instance: T::EthSpec, @@ -85,10 +85,10 @@ where beacon_chain_builder: None, beacon_chain: None, eth1_service: None, - exit_signals: vec![], + exit_channels: vec![], event_handler: None, - libp2p_network: None, - libp2p_network_send: None, + network_globals: None, + network_send: None, http_listen_addr: None, websocket_listen_addr: None, eth_spec_instance, @@ -120,6 +120,7 @@ where let runtime_context = self.runtime_context.clone(); let eth_spec_instance = self.eth_spec_instance.clone(); let data_dir = config.data_dir.clone(); + let disabled_forks = config.disabled_forks.clone(); future::ok(()) .and_then(move |()| { @@ -141,7 +142,8 @@ where .store(store) .store_migrator(store_migrator) .data_dir(data_dir) - .custom_spec(spec.clone()); + .custom_spec(spec.clone()) + .disabled_forks(disabled_forks); Ok((builder, spec, context)) }) @@ -237,24 +239,55 @@ where }) } - /// Immediately starts the libp2p networking stack. - pub fn libp2p_network(mut self, config: &NetworkConfig) -> Result { + /// Immediately starts the networking stack. + pub fn network(mut self, config: &NetworkConfig) -> Result { let beacon_chain = self .beacon_chain .clone() - .ok_or_else(|| "libp2p_network requires a beacon chain")?; + .ok_or_else(|| "network requires a beacon chain")?; let context = self .runtime_context .as_ref() - .ok_or_else(|| "libp2p_network requires a runtime_context")? + .ok_or_else(|| "network requires a runtime_context")? .service_context("network".into()); - let (network, network_send) = - NetworkService::new(beacon_chain, config, &context.executor, context.log) - .map_err(|e| format!("Failed to start libp2p network: {:?}", e))?; + let (network_globals, network_send, network_exit) = + NetworkService::start(beacon_chain, config, &context.executor, context.log) + .map_err(|e| format!("Failed to start network: {:?}", e))?; - self.libp2p_network = Some(network); - self.libp2p_network_send = Some(network_send); + self.network_globals = Some(network_globals); + self.network_send = Some(network_send); + self.exit_channels.push(network_exit); + + Ok(self) + } + + /// Immediately starts the timer service. + fn timer(mut self) -> Result { + let context = self + .runtime_context + .as_ref() + .ok_or_else(|| "node timer requires a runtime_context")? + .service_context("node_timer".into()); + let beacon_chain = self + .beacon_chain + .clone() + .ok_or_else(|| "node timer requires a beacon chain")?; + let milliseconds_per_slot = self + .chain_spec + .as_ref() + .ok_or_else(|| "node timer requires a chain spec".to_string())? + .milliseconds_per_slot; + + let timer_exit = timer::spawn( + &context.executor, + beacon_chain, + milliseconds_per_slot, + context.log, + ) + .map_err(|e| format!("Unable to start node timer: {}", e))?; + + self.exit_channels.push(timer_exit); Ok(self) } @@ -274,21 +307,21 @@ where .as_ref() .ok_or_else(|| "http_server requires a runtime_context")? .service_context("http".into()); - let network = self - .libp2p_network + let network_globals = self + .network_globals .clone() .ok_or_else(|| "http_server requires a libp2p network")?; let network_send = self - .libp2p_network_send + .network_send .clone() .ok_or_else(|| "http_server requires a libp2p network sender")?; let network_info = rest_api::NetworkInfo { - network_service: network, + network_globals, network_chan: network_send, }; - let (exit_signal, listening_addr) = rest_api::start_server( + let (exit_channel, listening_addr) = rest_api::start_server( &client_config.rest_api, &context.executor, beacon_chain, @@ -304,7 +337,7 @@ where ) .map_err(|e| format!("Failed to start HTTP API: {:?}", e))?; - self.exit_signals.push(exit_signal); + self.exit_channels.push(exit_channel); self.http_listen_addr = Some(listening_addr); Ok(self) @@ -321,8 +354,8 @@ where .beacon_chain .clone() .ok_or_else(|| "slot_notifier requires a beacon chain")?; - let network = self - .libp2p_network + let network_globals = self + .network_globals .clone() .ok_or_else(|| "slot_notifier requires a libp2p network")?; let milliseconds_per_slot = self @@ -331,10 +364,15 @@ where .ok_or_else(|| "slot_notifier requires a chain spec".to_string())? .milliseconds_per_slot; - let exit_signal = spawn_notifier(context, beacon_chain, network, milliseconds_per_slot) - .map_err(|e| format!("Unable to start slot notifier: {}", e))?; + let exit_channel = spawn_notifier( + context, + beacon_chain, + network_globals, + milliseconds_per_slot, + ) + .map_err(|e| format!("Unable to start slot notifier: {}", e))?; - self.exit_signals.push(exit_signal); + self.exit_channels.push(exit_channel); Ok(self) } @@ -349,10 +387,10 @@ where { Client { beacon_chain: self.beacon_chain, - libp2p_network: self.libp2p_network, + network_globals: self.network_globals, http_listen_addr: self.http_listen_addr, websocket_listen_addr: self.websocket_listen_addr, - _exit_signals: self.exit_signals, + _exit_channels: self.exit_channels, } } } @@ -392,7 +430,8 @@ where self.beacon_chain_builder = None; self.event_handler = None; - Ok(self) + // a beacon chain requires a timer + self.timer() } } @@ -422,7 +461,7 @@ where .ok_or_else(|| "websocket_event_handler requires a runtime_context")? .service_context("ws".into()); - let (sender, exit_signal, listening_addr): ( + let (sender, exit_channel, listening_addr): ( WebSocketSender, Option<_>, Option<_>, @@ -434,8 +473,8 @@ where (WebSocketSender::dummy(), None, None) }; - if let Some(signal) = exit_signal { - self.exit_signals.push(signal); + if let Some(channel) = exit_channel { + self.exit_channels.push(channel); } self.event_handler = Some(sender); self.websocket_listen_addr = listening_addr; @@ -641,8 +680,8 @@ where self.eth1_service = None; let exit = { - let (tx, rx) = exit_future::signal(); - self.exit_signals.push(tx); + let (tx, rx) = tokio::sync::oneshot::channel(); + self.exit_channels.push(tx); rx }; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 9f3bd598f..067005a4a 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -53,6 +53,8 @@ pub struct Config { /// This is the method used for the 2019 client interop in Canada. pub dummy_eth1_backend: bool, pub sync_eth1_chain: bool, + /// A list of hard-coded forks that will be disabled. + pub disabled_forks: Vec, #[serde(skip)] /// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined /// via the CLI at runtime, instead of from a configuration file saved to disk. @@ -81,6 +83,7 @@ impl Default for Config { dummy_eth1_backend: false, sync_eth1_chain: false, eth1: <_>::default(), + disabled_forks: Vec::new(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 0d5155c6f..7f665b9cb 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -8,9 +8,7 @@ pub mod builder; pub mod error; use beacon_chain::BeaconChain; -use eth2_libp2p::{Enr, Multiaddr}; -use exit_future::Signal; -use network::Service as NetworkService; +use eth2_libp2p::{Enr, Multiaddr, NetworkGlobals}; use std::net::SocketAddr; use std::sync::Arc; @@ -24,11 +22,11 @@ pub use eth2_config::Eth2Config; /// Holds references to running services, cleanly shutting them down when dropped. pub struct Client { beacon_chain: Option>>, - libp2p_network: Option>>, + network_globals: Option>>, http_listen_addr: Option, websocket_listen_addr: Option, - /// Exit signals will "fire" when dropped, causing each service to exit gracefully. - _exit_signals: Vec, + /// Exit channels will complete/error when dropped, causing each service to exit gracefully. + _exit_channels: Vec>, } impl Client { @@ -49,16 +47,16 @@ impl Client { /// Returns the port of the client's libp2p stack, if it was started. pub fn libp2p_listen_port(&self) -> Option { - self.libp2p_network.as_ref().map(|n| n.listen_port()) + self.network_globals.as_ref().map(|n| n.listen_port_tcp()) } /// Returns the list of libp2p addresses the client is listening to. pub fn libp2p_listen_addresses(&self) -> Option> { - self.libp2p_network.as_ref().map(|n| n.listen_multiaddrs()) + self.network_globals.as_ref().map(|n| n.listen_multiaddrs()) } /// Returns the local libp2p ENR of this node, for network discovery. pub fn enr(&self) -> Option { - self.libp2p_network.as_ref()?.local_enr() + self.network_globals.as_ref().map(|n| n.local_enr()) } } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 85e15049e..9ce68bede 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,9 +1,8 @@ use crate::metrics; use beacon_chain::{BeaconChain, BeaconChainTypes}; use environment::RuntimeContext; -use exit_future::Signal; +use eth2_libp2p::NetworkGlobals; use futures::{Future, Stream}; -use network::Service as NetworkService; use parking_lot::Mutex; use slog::{debug, error, info, warn}; use slot_clock::SlotClock; @@ -30,9 +29,9 @@ const SPEEDO_OBSERVATIONS: usize = 4; pub fn spawn_notifier( context: RuntimeContext, beacon_chain: Arc>, - network: Arc>, + network: Arc>, milliseconds_per_slot: u64, -) -> Result { +) -> Result, String> { let log_1 = context.log.clone(); let log_2 = context.log.clone(); let log_3 = context.log.clone(); @@ -59,6 +58,7 @@ pub fn spawn_notifier( let log = log_2.clone(); let connected_peer_count = network.connected_peers(); + let sync_state = network.sync_state(); let head_info = beacon_chain.head_info() .map_err(|e| error!( @@ -68,7 +68,6 @@ pub fn spawn_notifier( ))?; let head_slot = head_info.slot; - let head_epoch = head_slot.epoch(T::EthSpec::slots_per_epoch()); let current_slot = beacon_chain.slot().map_err(|e| { error!( log, @@ -102,15 +101,17 @@ pub fn spawn_notifier( "head_block" => format!("{}", head_root), "head_slot" => head_slot, "current_slot" => current_slot, + "sync_state" =>format!("{}", sync_state) ); - if head_epoch + 1 < current_epoch { + + // Log if we are syncing + if sync_state.is_syncing() { let distance = format!( "{} slots ({})", head_distance.as_u64(), slot_distance_pretty(head_distance, slot_duration) ); - info!( log, "Syncing"; @@ -119,15 +120,21 @@ pub fn spawn_notifier( "speed" => sync_speed_pretty(speedo.slots_per_second()), "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(current_slot)), ); - - return Ok(()); - }; - - macro_rules! not_quite_synced_log { - ($message: expr) => { + } else { + if sync_state.is_synced() { info!( log_2, - $message; + "Synced"; + "peers" => peer_count_pretty(connected_peer_count), + "finalized_root" => format!("{}", finalized_root), + "finalized_epoch" => finalized_epoch, + "epoch" => current_epoch, + "slot" => current_slot, + ); + } else { + info!( + log_2, + "Searching for peers"; "peers" => peer_count_pretty(connected_peer_count), "finalized_root" => format!("{}", finalized_root), "finalized_epoch" => finalized_epoch, @@ -136,23 +143,6 @@ pub fn spawn_notifier( ); } } - - if head_epoch + 1 == current_epoch { - not_quite_synced_log!("Synced to previous epoch") - } else if head_slot != current_slot { - not_quite_synced_log!("Synced to current epoch") - } else { - info!( - log_2, - "Synced"; - "peers" => peer_count_pretty(connected_peer_count), - "finalized_root" => format!("{}", finalized_root), - "finalized_epoch" => finalized_epoch, - "epoch" => current_epoch, - "slot" => current_slot, - ); - }; - Ok(()) }) .then(move |result| { @@ -167,10 +157,11 @@ pub fn spawn_notifier( Ok(()) } } }); - let (exit_signal, exit) = exit_future::signal(); + let (exit_signal, exit) = tokio::sync::oneshot::channel(); + context .executor - .spawn(exit.until(interval_future).map(|_| ())); + .spawn(interval_future.select(exit).map(|_| ()).map_err(|_| ())); Ok(exit_signal) } diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 03e592940..a4bb72431 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth1" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" @@ -8,7 +8,7 @@ edition = "2018" eth1_test_rig = { path = "../../tests/eth1_test_rig" } environment = { path = "../../lighthouse/environment" } toml = "^0.5" -web3 = "0.8.0" +web3 = "0.10.0" [dependencies] reqwest = "0.9" @@ -26,7 +26,6 @@ parking_lot = "0.7" slog = "^2.2.3" tokio = "0.1.22" state_processing = { path = "../../eth2/state_processing" } -exit-future = "0.1.4" libflate = "0.1" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics"} lazy_static = "1.4.0" diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index be47d4627..1d54c09ea 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -6,7 +6,6 @@ use crate::{ inner::{DepositUpdater, Inner}, DepositLog, }; -use exit_future::Exit; use futures::{ future::{loop_fn, Loop}, stream, Future, Stream, @@ -314,7 +313,10 @@ impl Service { /// - Err(_) if there is an error. /// /// Emits logs for debugging and errors. - pub fn auto_update(&self, exit: Exit) -> impl Future { + pub fn auto_update( + &self, + exit: tokio::sync::oneshot::Receiver<()>, + ) -> impl Future { let service = self.clone(); let log = self.log.clone(); let update_interval = Duration::from_millis(self.config().auto_update_interval_millis); @@ -360,7 +362,7 @@ impl Service { }) }); - exit.until(loop_future).map(|_: Option<()>| ()) + loop_future.select(exit).map(|_| ()).map_err(|_| ()) } /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index edee55307..51802b3d2 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2-libp2p" -version = "0.1.0" +version = "0.2.0" authors = ["Age Manning "] edition = "2018" @@ -8,10 +8,11 @@ edition = "2018" hex = "0.3" # rust-libp2p is presently being sourced from a Sigma Prime fork of the # `libp2p/rust-libp2p` repository. -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "49c95c4c4242f1c9f08558a3daac5e9ecac290d5" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "49c95c4c4242f1c9f08558a3daac5e9ecac290d5", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "37b7e9349cf3e724da02bbd4b5dd6c054c2d56d3" } types = { path = "../../eth2/types" } -serde = "1.0.102" +hashmap_delay = { path = "../../eth2/utils/hashmap_delay" } +eth2_ssz_types = { path = "../../eth2/utils/ssz_types" } +serde = { version = "1.0.102", features = ["derive"] } serde_derive = "1.0.102" eth2_ssz = "0.1.2" eth2_ssz_derive = "0.1.0" @@ -31,6 +32,8 @@ lru = "0.4.3" parking_lot = "0.9.0" sha2 = "0.8.0" base64 = "0.11.0" +snap = "1" +void = "1.0.2" [dev-dependencies] slog-stdlog = "4.0.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 66cf6f0a7..4762e17dc 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -1,228 +1,207 @@ -use crate::discovery::Discovery; -use crate::rpc::{RPCEvent, RPCMessage, RPC}; -use crate::{error, GossipTopic, NetworkConfig, NetworkGlobals, Topic, TopicHash}; -use enr::Enr; +use crate::discovery::{enr::Eth2Enr, Discovery}; +use crate::peer_manager::{PeerManager, PeerManagerEvent}; +use crate::rpc::*; +use crate::types::{GossipEncoding, GossipKind, GossipTopic}; +use crate::{error, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash}; use futures::prelude::*; use libp2p::{ - core::identity::Keypair, + core::{identity::Keypair, ConnectedPoint}, discv5::Discv5Event, gossipsub::{Gossipsub, GossipsubEvent, MessageId}, identify::{Identify, IdentifyEvent}, - ping::{Ping, PingConfig, PingEvent}, swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; use lru::LruCache; -use slog::{debug, o}; -use std::num::NonZeroU32; +use slog::{crit, debug, o, warn}; +use std::marker::PhantomData; use std::sync::Arc; -use std::time::Duration; +use types::{EnrForkId, EthSpec, SubnetId}; -const MAX_IDENTIFY_ADDRESSES: usize = 20; +const MAX_IDENTIFY_ADDRESSES: usize = 10; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] -pub struct Behaviour { +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] +pub struct Behaviour { /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. - eth2_rpc: RPC, + eth2_rpc: RPC, /// Keep regular connection to peers and disconnect if absent. - // TODO: Remove Libp2p ping in favour of discv5 ping. - ping: Ping, // TODO: Using id for initial interop. This will be removed by mainnet. /// Provides IP addresses and peer information. identify: Identify, /// Discovery behaviour. - discovery: Discovery, + discovery: Discovery, + /// The peer manager that keeps track of peer's reputation and status. + #[behaviour(ignore)] + peer_manager: PeerManager, /// The events generated by this behaviour to be consumed in the swarm poll. #[behaviour(ignore)] - events: Vec, + events: Vec>, + /// The current meta data of the node, so respond to pings and get metadata + #[behaviour(ignore)] + meta_data: MetaData, /// A cache of recently seen gossip messages. This is used to filter out any possible /// duplicates that may still be seen over gossipsub. #[behaviour(ignore)] + // TODO: Remove this seen_gossip_messages: LruCache, + /// A collections of variables accessible outside the network service. + #[behaviour(ignore)] + network_globals: Arc>, + #[behaviour(ignore)] + /// Keeps track of the current EnrForkId for upgrading gossipsub topics. + // NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick + // lookups for every gossipsub message send. + enr_fork_id: EnrForkId, #[behaviour(ignore)] /// Logger for behaviour actions. log: slog::Logger, } -impl Behaviour { +/// Implements the combined behaviour for the libp2p service. +impl Behaviour { pub fn new( local_key: &Keypair, net_conf: &NetworkConfig, - network_globals: Arc, + network_globals: Arc>, log: &slog::Logger, ) -> error::Result { let local_peer_id = local_key.public().into_peer_id(); let behaviour_log = log.new(o!()); - let ping_config = PingConfig::new() - .with_timeout(Duration::from_secs(30)) - .with_interval(Duration::from_secs(20)) - .with_max_failures(NonZeroU32::new(2).expect("2 != 0")) - .with_keep_alive(false); - let identify = Identify::new( "lighthouse/libp2p".into(), version::version(), local_key.public(), ); + let enr_fork_id = network_globals + .local_enr + .read() + .eth2() + .expect("Local ENR must have a fork id"); + + let attnets = network_globals + .local_enr + .read() + .bitfield::() + .expect("Local ENR must have subnet bitfield"); + + let meta_data = MetaData { + seq_number: 1, + attnets, + }; + Ok(Behaviour { eth2_rpc: RPC::new(log.clone()), gossipsub: Gossipsub::new(local_peer_id, net_conf.gs_config.clone()), - discovery: Discovery::new(local_key, net_conf, network_globals, log)?, - ping: Ping::new(ping_config), + discovery: Discovery::new(local_key, net_conf, network_globals.clone(), log)?, identify, + peer_manager: PeerManager::new(network_globals.clone(), log), events: Vec::new(), seen_gossip_messages: LruCache::new(100_000), + meta_data, + network_globals, + enr_fork_id, log: behaviour_log, }) } - pub fn discovery(&self) -> &Discovery { + /// Obtain a reference to the discovery protocol. + pub fn discovery(&self) -> &Discovery { &self.discovery } + /// Obtain a reference to the gossipsub protocol. pub fn gs(&self) -> &Gossipsub { &self.gossipsub } -} -// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour -impl NetworkBehaviourEventProcess - for Behaviour -{ - fn inject_event(&mut self, event: GossipsubEvent) { - match event { - GossipsubEvent::Message(propagation_source, id, gs_msg) => { - let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); - - // Note: We are keeping track here of the peer that sent us the message, not the - // peer that originally published the message. - if self.seen_gossip_messages.put(id.clone(), ()).is_none() { - // if this message isn't a duplicate, notify the network - self.events.push(BehaviourEvent::GossipMessage { - id, - source: propagation_source, - topics: gs_msg.topics, - message: msg, - }); - } else { - debug!(self.log, "A duplicate message was received"; "message" => format!("{:?}", msg)); - } - } - GossipsubEvent::Subscribed { peer_id, topic } => { - self.events - .push(BehaviourEvent::PeerSubscribed(peer_id, topic)); - } - GossipsubEvent::Unsubscribed { .. } => {} - } - } -} - -impl NetworkBehaviourEventProcess - for Behaviour -{ - fn inject_event(&mut self, event: RPCMessage) { - match event { - RPCMessage::PeerDialed(peer_id) => { - self.events.push(BehaviourEvent::PeerDialed(peer_id)) - } - RPCMessage::PeerDisconnected(peer_id) => { - self.events.push(BehaviourEvent::PeerDisconnected(peer_id)) - } - RPCMessage::RPC(peer_id, rpc_event) => { - self.events.push(BehaviourEvent::RPC(peer_id, rpc_event)) - } - } - } -} - -impl NetworkBehaviourEventProcess - for Behaviour -{ - fn inject_event(&mut self, _event: PingEvent) { - // not interested in ping responses at the moment. - } -} - -impl Behaviour { - /// Consumes the events list when polled. - fn poll( - &mut self, - ) -> Async> { - if !self.events.is_empty() { - return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); - } - - Async::NotReady - } -} - -impl NetworkBehaviourEventProcess - for Behaviour -{ - fn inject_event(&mut self, event: IdentifyEvent) { - match event { - IdentifyEvent::Received { - peer_id, - mut info, - observed_addr, - } => { - if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { - debug!( - self.log, - "More than 20 addresses have been identified, truncating" - ); - info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); - } - debug!(self.log, "Identified Peer"; "peer" => format!("{}", peer_id), - "protocol_version" => info.protocol_version, - "agent_version" => info.agent_version, - "listening_ addresses" => format!("{:?}", info.listen_addrs), - "observed_address" => format!("{:?}", observed_addr), - "protocols" => format!("{:?}", info.protocols) - ); - } - IdentifyEvent::Sent { .. } => {} - IdentifyEvent::Error { .. } => {} - } - } -} - -impl NetworkBehaviourEventProcess - for Behaviour -{ - fn inject_event(&mut self, _event: Discv5Event) { - // discv5 has no events to inject - } -} - -/// Implements the combined behaviour for the libp2p service. -impl Behaviour { /* Pubsub behaviour functions */ + /// Subscribes to a gossipsub topic kind, letting the network service determine the + /// encoding and fork version. + pub fn subscribe_kind(&mut self, kind: GossipKind) -> bool { + let gossip_topic = GossipTopic::new( + kind, + GossipEncoding::default(), + self.enr_fork_id.fork_digest, + ); + self.subscribe(gossip_topic) + } + + /// Unsubscribes from a gossipsub topic kind, letting the network service determine the + /// encoding and fork version. + pub fn unsubscribe_kind(&mut self, kind: GossipKind) -> bool { + let gossip_topic = GossipTopic::new( + kind, + GossipEncoding::default(), + self.enr_fork_id.fork_digest, + ); + self.unsubscribe(gossip_topic) + } + + /// Subscribes to a specific subnet id; + pub fn subscribe_to_subnet(&mut self, subnet_id: SubnetId) -> bool { + let topic = GossipTopic::new( + subnet_id.into(), + GossipEncoding::default(), + self.enr_fork_id.fork_digest, + ); + self.subscribe(topic) + } + + /// Un-Subscribes from a specific subnet id; + pub fn unsubscribe_from_subnet(&mut self, subnet_id: SubnetId) -> bool { + let topic = GossipTopic::new( + subnet_id.into(), + GossipEncoding::default(), + self.enr_fork_id.fork_digest, + ); + self.unsubscribe(topic) + } + /// Subscribes to a gossipsub topic. - pub fn subscribe(&mut self, topic: Topic) -> bool { - self.gossipsub.subscribe(topic) + fn subscribe(&mut self, topic: GossipTopic) -> bool { + // update the network globals + self.network_globals + .gossipsub_subscriptions + .write() + .insert(topic.clone()); + + let topic_str: String = topic.clone().into(); + debug!(self.log, "Subscribed to topic"; "topic" => topic_str); + self.gossipsub.subscribe(topic.into()) } /// Unsubscribe from a gossipsub topic. - pub fn unsubscribe(&mut self, topic: Topic) -> bool { - self.gossipsub.unsubscribe(topic) + fn unsubscribe(&mut self, topic: GossipTopic) -> bool { + // update the network globals + self.network_globals + .gossipsub_subscriptions + .write() + .remove(&topic); + // unsubscribe from the topic + self.gossipsub.unsubscribe(topic.into()) } - /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) { - let message_data = message.into_data(); - for topic in topics { - self.gossipsub.publish(topic, message_data.clone()); + /// Publishes a list of messages on the pubsub (gossipsub) behaviour, choosing the encoding. + pub fn publish(&mut self, messages: Vec>) { + for message in messages { + for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) { + match message.encode(GossipEncoding::default()) { + Ok(message_data) => { + self.gossipsub.publish(&topic.into(), message_data); + } + Err(e) => crit!(self.log, "Could not publish message"; "error" => e), + } + } } } @@ -236,15 +215,11 @@ impl Behaviour { /* Eth2 RPC behaviour functions */ /// Sends an RPC Request/Response via the RPC protocol. - pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { self.eth2_rpc.send_rpc(peer_id, rpc_event); } /* Discovery / Peer management functions */ - /// Return the list of currently connected peers. - pub fn connected_peers(&self) -> usize { - self.discovery.connected_peers() - } /// Notify discovery that the peer has been banned. pub fn peer_banned(&mut self, peer_id: PeerId) { @@ -265,18 +240,322 @@ impl Behaviour { pub fn add_enr(&mut self, enr: Enr) { self.discovery.add_enr(enr); } + + /// Updates a subnet value to the ENR bitfield. + /// + /// The `value` is `true` if a subnet is being added and false otherwise. + pub fn update_enr_subnet(&mut self, subnet_id: SubnetId, value: bool) { + if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { + crit!(self.log, "Could not update ENR bitfield"; "error" => e); + } + // update the local meta data which informs our peers of the update during PINGS + self.update_metadata(); + } + + /// A request to search for peers connected to a long-lived subnet. + pub fn peers_request(&mut self, subnet_id: SubnetId) { + self.discovery.peers_request(subnet_id); + } + + /// Updates the local ENR's "eth2" field with the latest EnrForkId. + pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { + self.discovery.update_eth2_enr(enr_fork_id.clone()); + + // unsubscribe from all gossip topics and re-subscribe to their new fork counterparts + let subscribed_topics = self + .network_globals + .gossipsub_subscriptions + .read() + .iter() + .cloned() + .collect::>(); + + // unsubscribe from all topics + for topic in &subscribed_topics { + self.unsubscribe(topic.clone()); + } + + // re-subscribe modifying the fork version + for mut topic in subscribed_topics { + *topic.digest() = enr_fork_id.fork_digest; + self.subscribe(topic); + } + + // update the local reference + self.enr_fork_id = enr_fork_id; + } + + /* Private internal functions */ + + /// Updates the current meta data of the node. + fn update_metadata(&mut self) { + self.meta_data.seq_number += 1; + self.meta_data.attnets = self + .discovery + .local_enr() + .bitfield::() + .expect("Local discovery must have bitfield"); + } + + /// Sends a PING/PONG request/response to a peer. + fn send_ping(&mut self, id: RequestId, peer_id: PeerId) { + let pong_response = RPCEvent::Response( + id, + RPCErrorResponse::Success(RPCResponse::Pong(crate::rpc::methods::Ping { + data: self.meta_data.seq_number, + })), + ); + self.send_rpc(peer_id, pong_response); + } + + /// Sends a METADATA request to a peer. + fn send_meta_data_request(&mut self, peer_id: PeerId) { + let metadata_request = + RPCEvent::Request(RequestId::from(0usize), RPCRequest::MetaData(PhantomData)); + self.send_rpc(peer_id, metadata_request); + } + + /// Sends a METADATA response to a peer. + fn send_meta_data_response(&mut self, id: RequestId, peer_id: PeerId) { + let metadata_response = RPCEvent::Response( + id, + RPCErrorResponse::Success(RPCResponse::MetaData(self.meta_data.clone())), + ); + self.send_rpc(peer_id, metadata_response); + } +} + +// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour +impl + NetworkBehaviourEventProcess for Behaviour +{ + fn inject_event(&mut self, event: GossipsubEvent) { + match event { + GossipsubEvent::Message(propagation_source, id, gs_msg) => { + // Note: We are keeping track here of the peer that sent us the message, not the + // peer that originally published the message. + if self.seen_gossip_messages.put(id.clone(), ()).is_none() { + match PubsubMessage::decode(&gs_msg.topics, &gs_msg.data) { + Err(e) => { + debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e)) + } + Ok(msg) => { + // if this message isn't a duplicate, notify the network + self.events.push(BehaviourEvent::PubsubMessage { + id, + source: propagation_source, + topics: gs_msg.topics, + message: msg, + }); + } + } + } else { + match PubsubMessage::::decode(&gs_msg.topics, &gs_msg.data) { + Err(e) => { + debug!(self.log, "Could not decode gossipsub message"; "error" => format!("{}", e)) + } + Ok(msg) => { + crit!(self.log, "A duplicate gossipsub message was received"; "message_source" => format!("{}", gs_msg.source), "propagated_peer" => format!("{}",propagation_source), "message" => format!("{}", msg)); + } + } + } + } + GossipsubEvent::Subscribed { peer_id, topic } => { + self.events + .push(BehaviourEvent::PeerSubscribed(peer_id, topic)); + } + GossipsubEvent::Unsubscribed { .. } => {} + } + } +} + +impl + NetworkBehaviourEventProcess> for Behaviour +{ + fn inject_event(&mut self, event: RPCMessage) { + match event { + // TODO: These are temporary methods to give access to injected behaviour + // events to the + // peer manager. After a behaviour re-write remove these: + RPCMessage::PeerConnectedHack(peer_id, connected_point) => { + match connected_point { + ConnectedPoint::Dialer { .. } => self.peer_manager.connect_outgoing(&peer_id), + ConnectedPoint::Listener { .. } => self.peer_manager.connect_ingoing(&peer_id), + }; + + // Find ENR info about a peer if possible. + if let Some(enr) = self.discovery.enr_of_peer(&peer_id) { + let bitfield = match enr.bitfield::() { + Ok(v) => v, + Err(e) => { + warn!(self.log, "Peer has invalid ENR bitfield"; + "peer_id" => format!("{}", peer_id), + "error" => format!("{:?}", e)); + return; + } + }; + + // use this as a baseline, until we get the actual meta-data + let meta_data = MetaData { + seq_number: 0, + attnets: bitfield, + }; + // TODO: Shift to the peer manager + self.network_globals + .peers + .write() + .add_metadata(&peer_id, meta_data); + } + } + RPCMessage::PeerDisconnectedHack(peer_id, _connected_point) => { + self.peer_manager.notify_disconnect(&peer_id) + } + + RPCMessage::PeerDialed(peer_id) => { + self.events.push(BehaviourEvent::PeerDialed(peer_id)) + } + RPCMessage::PeerDisconnected(peer_id) => { + self.events.push(BehaviourEvent::PeerDisconnected(peer_id)) + } + RPCMessage::RPC(peer_id, rpc_event) => { + // The METADATA and PING RPC responses are handled within the behaviour and not + // propagated + // TODO: Improve the RPC types to better handle this logic discrepancy + match rpc_event { + RPCEvent::Request(id, RPCRequest::Ping(ping)) => { + // inform the peer manager and send the response + self.peer_manager.ping_request(&peer_id, ping.data); + self.send_ping(id, peer_id); + } + RPCEvent::Request(id, RPCRequest::MetaData(_)) => { + // send the requested meta-data + self.send_meta_data_response(id, peer_id); + } + RPCEvent::Response(_, RPCErrorResponse::Success(RPCResponse::Pong(ping))) => { + self.peer_manager.pong_response(&peer_id, ping.data); + } + RPCEvent::Response( + _, + RPCErrorResponse::Success(RPCResponse::MetaData(meta_data)), + ) => { + self.peer_manager.meta_data_response(&peer_id, meta_data); + } + RPCEvent::Request(_, RPCRequest::Status(_)) + | RPCEvent::Response(_, RPCErrorResponse::Success(RPCResponse::Status(_))) => { + // inform the peer manager that we have received a status from a peer + self.peer_manager.peer_statusd(&peer_id); + // propagate the STATUS message upwards + self.events.push(BehaviourEvent::RPC(peer_id, rpc_event)); + } + _ => { + // propagate all other RPC messages upwards + self.events.push(BehaviourEvent::RPC(peer_id, rpc_event)) + } + } + } + } + } +} + +impl Behaviour { + /// Consumes the events list when polled. + fn poll( + &mut self, + ) -> Async>> { + // check the peer manager for events + loop { + match self.peer_manager.poll() { + Ok(Async::Ready(Some(event))) => match event { + PeerManagerEvent::Status(peer_id) => { + // it's time to status. We don't keep a beacon chain reference here, so we inform + // the network to send a status to this peer + return Async::Ready(NetworkBehaviourAction::GenerateEvent( + BehaviourEvent::StatusPeer(peer_id), + )); + } + PeerManagerEvent::Ping(peer_id) => { + // send a ping to this peer + self.send_ping(RequestId::from(0usize), peer_id); + } + PeerManagerEvent::MetaData(peer_id) => { + self.send_meta_data_request(peer_id); + } + PeerManagerEvent::_DisconnectPeer(_peer_id) => { + //TODO: Implement + } + PeerManagerEvent::_BanPeer(_peer_id) => { + //TODO: Implement + } + }, + Ok(Async::NotReady) => break, + Ok(Async::Ready(None)) | Err(_) => { + crit!(self.log, "Error polling peer manager"); + break; + } + } + } + + if !self.events.is_empty() { + return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + } + + Async::NotReady + } +} + +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: IdentifyEvent) { + match event { + IdentifyEvent::Received { + peer_id, + mut info, + observed_addr, + } => { + if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { + debug!( + self.log, + "More than 10 addresses have been identified, truncating" + ); + info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); + } + // send peer info to the peer manager. + self.peer_manager.identify(&peer_id, &info); + + debug!(self.log, "Identified Peer"; "peer" => format!("{}", peer_id), + "protocol_version" => info.protocol_version, + "agent_version" => info.agent_version, + "listening_ addresses" => format!("{:?}", info.listen_addrs), + "observed_address" => format!("{:?}", observed_addr), + "protocols" => format!("{:?}", info.protocols) + ); + } + IdentifyEvent::Sent { .. } => {} + IdentifyEvent::Error { .. } => {} + } + } +} + +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, _event: Discv5Event) { + // discv5 has no events to inject + } } /// The types of events than can be obtained from polling the behaviour. -pub enum BehaviourEvent { +#[derive(Debug)] +pub enum BehaviourEvent { /// A received RPC event and the peer that it was received from. - RPC(PeerId, RPCEvent), + RPC(PeerId, RPCEvent), /// We have completed an initial connection to a new peer. PeerDialed(PeerId), /// A peer has disconnected. PeerDisconnected(PeerId), /// A gossipsub message has been received. - GossipMessage { + PubsubMessage { /// The gossipsub message id. Used when propagating blocks after validation. id: MessageId, /// The peer from which we received this message, not the peer that published it. @@ -284,60 +563,10 @@ pub enum BehaviourEvent { /// The topics that this message was sent on. topics: Vec, /// The message itself. - message: PubsubMessage, + message: PubsubMessage, }, /// Subscribed to peer for given topic PeerSubscribed(PeerId, TopicHash), -} - -/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. These are encoded and -/// decoded upstream. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum PubsubMessage { - /// Gossipsub message providing notification of a new block. - Block(Vec), - /// Gossipsub message providing notification of a new attestation. - Attestation(Vec), - /// Gossipsub message providing notification of a voluntary exit. - VoluntaryExit(Vec), - /// Gossipsub message providing notification of a new proposer slashing. - ProposerSlashing(Vec), - /// Gossipsub message providing notification of a new attester slashing. - AttesterSlashing(Vec), - /// Gossipsub message from an unknown topic. - Unknown(Vec), -} - -impl PubsubMessage { - /* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will - * need to be modified. - * - * Also note that a message can be associated with many topics. As soon as one of the topics is - * known we match. If none of the topics are known we return an unknown state. - */ - fn from_topics(topics: &[TopicHash], data: Vec) -> Self { - for topic in topics { - match GossipTopic::from(topic.as_str()) { - GossipTopic::BeaconBlock => return PubsubMessage::Block(data), - GossipTopic::BeaconAttestation => return PubsubMessage::Attestation(data), - GossipTopic::VoluntaryExit => return PubsubMessage::VoluntaryExit(data), - GossipTopic::ProposerSlashing => return PubsubMessage::ProposerSlashing(data), - GossipTopic::AttesterSlashing => return PubsubMessage::AttesterSlashing(data), - GossipTopic::Shard => return PubsubMessage::Unknown(data), - GossipTopic::Unknown(_) => continue, - } - } - PubsubMessage::Unknown(data) - } - - fn into_data(self) -> Vec { - match self { - PubsubMessage::Block(data) - | PubsubMessage::Attestation(data) - | PubsubMessage::VoluntaryExit(data) - | PubsubMessage::ProposerSlashing(data) - | PubsubMessage::AttesterSlashing(data) - | PubsubMessage::Unknown(data) => data, - } - } + /// Inform the network to send a Status to this peer. + StatusPeer(PeerId), } diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index d0ec62df2..be3b11d37 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -1,5 +1,6 @@ -use crate::topics::GossipTopic; -use enr::Enr; +use crate::types::GossipKind; +use crate::Enr; +use libp2p::discv5::{Discv5Config, Discv5ConfigBuilder}; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId}; use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; @@ -7,6 +8,8 @@ use sha2::{Digest, Sha256}; use std::path::PathBuf; use std::time::Duration; +pub const GOSSIP_MAX_SIZE: usize = 1_048_576; + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] /// Network configuration for lighthouse. @@ -20,13 +23,19 @@ pub struct Config { /// The TCP port that libp2p listens on. pub libp2p_port: u16, - /// The address to broadcast to peers about which address we are listening on. None indicates - /// that no discovery address has been set in the CLI args. - pub discovery_address: Option, - /// UDP port that discovery listens on. pub discovery_port: u16, + /// The address to broadcast to peers about which address we are listening on. None indicates + /// that no discovery address has been set in the CLI args. + pub enr_address: Option, + + /// The udp port to broadcast to peers in order to reach back for discovery. + pub enr_udp_port: Option, + + /// The tcp port to broadcast to peers in order to reach back for libp2p services. + pub enr_tcp_port: Option, + /// Target number of connected peers. pub max_peers: usize, @@ -40,6 +49,10 @@ pub struct Config { #[serde(skip)] pub gs_config: GossipsubConfig, + /// Discv5 configuration parameters. + #[serde(skip)] + pub discv5_config: Discv5Config, + /// List of nodes to initially connect to. pub boot_nodes: Vec, @@ -50,7 +63,7 @@ pub struct Config { pub client_version: String, /// List of extra topics to initially subscribe to as strings. - pub topics: Vec, + pub topics: Vec, /// Introduces randomization in network propagation of messages. This should only be set for /// testing purposes and will likely be removed in future versions. @@ -67,11 +80,11 @@ impl Default for Config { // The default topics that we will initially subscribe to let topics = vec![ - GossipTopic::BeaconBlock, - GossipTopic::BeaconAttestation, - GossipTopic::VoluntaryExit, - GossipTopic::ProposerSlashing, - GossipTopic::AttesterSlashing, + GossipKind::BeaconBlock, + GossipKind::BeaconAggregateAndProof, + GossipKind::VoluntaryExit, + GossipKind::ProposerSlashing, + GossipKind::AttesterSlashing, ]; // The function used to generate a gossipsub message id @@ -83,23 +96,43 @@ impl Default for Config { )) }; + // gossipsub configuration + // Note: The topics by default are sent as plain strings. Hashes are an optional + // parameter. + let gs_config = GossipsubConfigBuilder::new() + .max_transmit_size(GOSSIP_MAX_SIZE) + .heartbeat_interval(Duration::from_secs(20)) // TODO: Reduce for mainnet + .manual_propagation() // require validation before propagation + .no_source_id() + .message_id_fn(gossip_message_id) + .build(); + + // discv5 configuration + let discv5_config = Discv5ConfigBuilder::new() + .request_timeout(Duration::from_secs(4)) + .request_retries(2) + .enr_update(true) // update IP based on PONG responses + .enr_peer_update_min(2) // prevents NAT's should be raised for mainnet + .query_parallelism(5) + .query_timeout(Duration::from_secs(60)) + .query_peer_timeout(Duration::from_secs(2)) + .ip_limit(false) // limits /24 IP's in buckets. Enable for mainnet + .ping_interval(Duration::from_secs(300)) + .build(); + + // NOTE: Some of these get overridden by the corresponding CLI default values. Config { network_dir, - listen_address: "127.0.0.1".parse().expect("valid ip address"), + listen_address: "0.0.0.0".parse().expect("valid ip address"), libp2p_port: 9000, - discovery_address: None, discovery_port: 9000, - max_peers: 10, + enr_address: None, + enr_udp_port: None, + enr_tcp_port: None, + max_peers: 50, secret_key_hex: None, - // Note: The topics by default are sent as plain strings. Hashes are an optional - // parameter. - gs_config: GossipsubConfigBuilder::new() - .max_transmit_size(1_048_576) - .heartbeat_interval(Duration::from_secs(20)) // TODO: Reduce for mainnet - .manual_propagation() // require validation before propagation - .no_source_id() - .message_id_fn(gossip_message_id) - .build(), + gs_config, + discv5_config, boot_nodes: vec![], libp2p_nodes: vec![], client_version: version::version(), diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs deleted file mode 100644 index 235fd7194..000000000 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ /dev/null @@ -1,412 +0,0 @@ -use crate::metrics; -use crate::{error, NetworkConfig, NetworkGlobals}; -/// This manages the discovery and management of peers. -/// -/// Currently using discv5 for peer discovery. -/// -use futures::prelude::*; -use libp2p::core::{identity::Keypair, ConnectedPoint, Multiaddr, PeerId}; -use libp2p::discv5::{Discv5, Discv5Event}; -use libp2p::enr::{Enr, EnrBuilder, NodeId}; -use libp2p::multiaddr::Protocol; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use slog::{debug, info, warn}; -use std::collections::HashSet; -use std::fs::File; -use std::io::prelude::*; -use std::path::Path; -use std::str::FromStr; -use std::sync::{atomic::Ordering, Arc}; -use std::time::{Duration, Instant}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio::timer::Delay; - -/// Maximum seconds before searching for extra peers. -const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 120; -/// Initial delay between peer searches. -const INITIAL_SEARCH_DELAY: u64 = 5; -/// Local ENR storage filename. -const ENR_FILENAME: &str = "enr.dat"; - -/// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5 -/// libp2p protocol. -pub struct Discovery { - /// The currently banned peers. - banned_peers: HashSet, - - /// The target number of connected peers on the libp2p interface. - max_peers: usize, - - /// The directory where the ENR is stored. - enr_dir: String, - - /// The delay between peer discovery searches. - peer_discovery_delay: Delay, - - /// Tracks the last discovery delay. The delay is doubled each round until the max - /// time is reached. - past_discovery_delay: u64, - - /// The TCP port for libp2p. Used to convert an updated IP address to a multiaddr. Note: This - /// assumes that the external TCP port is the same as the internal TCP port if behind a NAT. - //TODO: Improve NAT handling limit the above restriction - tcp_port: u16, - - /// The discovery behaviour used to discover new peers. - discovery: Discv5, - - /// A collection of network constants that can be read from other threads. - network_globals: Arc, - - /// Logger for the discovery behaviour. - log: slog::Logger, -} - -impl Discovery { - pub fn new( - local_key: &Keypair, - config: &NetworkConfig, - network_globals: Arc, - log: &slog::Logger, - ) -> error::Result { - let log = log.clone(); - - // checks if current ENR matches that found on disk - let local_enr = load_enr(local_key, config, &log)?; - - *network_globals.local_enr.write() = Some(local_enr.clone()); - - let enr_dir = match config.network_dir.to_str() { - Some(path) => String::from(path), - None => String::from(""), - }; - - info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq()); - debug!(log, "Discv5 Node ID Initialised"; "node_id" => format!("{}",local_enr.node_id())); - - // the last parameter enables IP limiting. 2 Nodes on the same /24 subnet per bucket and 10 - // nodes on the same /24 subnet per table. - // TODO: IP filtering is currently disabled for the DHT. Enable for production - let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address, false) - .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; - - // Add bootnodes to routing table - for bootnode_enr in config.boot_nodes.clone() { - debug!( - log, - "Adding node to routing table"; - "node_id" => format!("{}", - bootnode_enr.node_id()) - ); - discovery.add_enr(bootnode_enr); - } - - Ok(Self { - banned_peers: HashSet::new(), - max_peers: config.max_peers, - peer_discovery_delay: Delay::new(Instant::now()), - past_discovery_delay: INITIAL_SEARCH_DELAY, - tcp_port: config.libp2p_port, - discovery, - network_globals, - log, - enr_dir, - }) - } - - /// Return the nodes local ENR. - pub fn local_enr(&self) -> &Enr { - self.discovery.local_enr() - } - - /// Manually search for peers. This restarts the discovery round, sparking multiple rapid - /// queries. - pub fn discover_peers(&mut self) { - self.past_discovery_delay = INITIAL_SEARCH_DELAY; - self.find_peers(); - } - - /// Add an ENR to the routing table of the discovery mechanism. - pub fn add_enr(&mut self, enr: Enr) { - self.discovery.add_enr(enr); - } - - /// The current number of connected libp2p peers. - pub fn connected_peers(&self) -> usize { - self.network_globals.connected_peers.load(Ordering::Relaxed) - } - - /// The current number of connected libp2p peers. - pub fn connected_peer_set(&self) -> Vec { - self.network_globals - .connected_peer_set - .read() - .iter() - .cloned() - .collect::>() - } - - /// The peer has been banned. Add this peer to the banned list to prevent any future - /// re-connections. - // TODO: Remove the peer from the DHT if present - pub fn peer_banned(&mut self, peer_id: PeerId) { - self.banned_peers.insert(peer_id); - } - - pub fn peer_unbanned(&mut self, peer_id: &PeerId) { - self.banned_peers.remove(peer_id); - } - - /// Returns an iterator over all enr entries in the DHT. - pub fn enr_entries(&mut self) -> impl Iterator { - self.discovery.enr_entries() - } - - /// Search for new peers using the underlying discovery mechanism. - fn find_peers(&mut self) { - // pick a random NodeId - let random_node = NodeId::random(); - debug!(self.log, "Searching for peers"); - self.discovery.find_node(random_node); - } -} - -// Redirect all behaviour events to underlying discovery behaviour. -impl NetworkBehaviour for Discovery -where - TSubstream: AsyncRead + AsyncWrite, -{ - type ProtocolsHandler = as NetworkBehaviour>::ProtocolsHandler; - type OutEvent = as NetworkBehaviour>::OutEvent; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - NetworkBehaviour::new_handler(&mut self.discovery) - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - // Let discovery track possible known peers. - self.discovery.addresses_of_peer(peer_id) - } - - fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) { - self.network_globals - .connected_peer_set - .write() - .insert(peer_id); - self.network_globals.connected_peers.store( - self.network_globals.connected_peer_set.read().len(), - Ordering::Relaxed, - ); - // TODO: Drop peers if over max_peer limit - - metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); - } - - fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) { - self.network_globals - .connected_peer_set - .write() - .remove(peer_id); - self.network_globals.connected_peers.store( - self.network_globals.connected_peer_set.read().len(), - Ordering::Relaxed, - ); - - metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); - metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); - } - - fn inject_replaced( - &mut self, - _peer_id: PeerId, - _closed: ConnectedPoint, - _opened: ConnectedPoint, - ) { - // discv5 doesn't implement - } - - fn inject_node_event( - &mut self, - _peer_id: PeerId, - _event: ::OutEvent, - ) { - // discv5 doesn't implement - } - - fn poll( - &mut self, - params: &mut impl PollParameters, - ) -> Async< - NetworkBehaviourAction< - ::InEvent, - Self::OutEvent, - >, - > { - // search for peers if it is time - loop { - match self.peer_discovery_delay.poll() { - Ok(Async::Ready(_)) => { - if self.network_globals.connected_peers.load(Ordering::Relaxed) < self.max_peers - { - self.find_peers(); - } - // Set to maximum, and update to earlier, once we get our results back. - self.peer_discovery_delay.reset( - Instant::now() + Duration::from_secs(MAX_TIME_BETWEEN_PEER_SEARCHES), - ); - } - Ok(Async::NotReady) => break, - Err(e) => { - warn!(self.log, "Discovery peer search failed"; "error" => format!("{:?}", e)); - } - } - } - - // Poll discovery - loop { - match self.discovery.poll(params) { - Async::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { - match event { - Discv5Event::Discovered(_enr) => { - // not concerned about FINDNODE results, rather the result of an entire - // query. - } - Discv5Event::SocketUpdated(socket) => { - info!(self.log, "Address updated"; "ip" => format!("{}",socket.ip()), "udp_port" => format!("{}", socket.port())); - metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); - let mut address = Multiaddr::from(socket.ip()); - address.push(Protocol::Tcp(self.tcp_port)); - let enr = self.discovery.local_enr(); - save_enr_to_disc(Path::new(&self.enr_dir), enr, &self.log); - - return Async::Ready(NetworkBehaviourAction::ReportObservedAddr { - address, - }); - } - Discv5Event::FindNodeResult { closer_peers, .. } => { - debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len()); - // update the time to the next query - if self.past_discovery_delay < MAX_TIME_BETWEEN_PEER_SEARCHES { - self.past_discovery_delay *= 2; - } - let delay = std::cmp::max( - self.past_discovery_delay, - MAX_TIME_BETWEEN_PEER_SEARCHES, - ); - self.peer_discovery_delay - .reset(Instant::now() + Duration::from_secs(delay)); - - if closer_peers.is_empty() { - debug!(self.log, "Discovery random query found no peers"); - } - for peer_id in closer_peers { - // if we need more peers, attempt a connection - - if self.network_globals.connected_peers.load(Ordering::Relaxed) - < self.max_peers - && self - .network_globals - .connected_peer_set - .read() - .get(&peer_id) - .is_none() - && !self.banned_peers.contains(&peer_id) - { - debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id)); - return Async::Ready(NetworkBehaviourAction::DialPeer { - peer_id, - }); - } - } - } - _ => {} - } - } - // discv5 does not output any other NetworkBehaviourAction - Async::Ready(_) => {} - Async::NotReady => break, - } - } - Async::NotReady - } -} - -/// Loads an ENR from file if it exists and matches the current NodeId and sequence number. If none -/// exists, generates a new one. -/// -/// If an ENR exists, with the same NodeId and IP address, we use the disk-generated one as its -/// ENR sequence will be equal or higher than a newly generated one. -fn load_enr( - local_key: &Keypair, - config: &NetworkConfig, - log: &slog::Logger, -) -> Result { - // Build the local ENR. - // Note: Discovery should update the ENR record's IP to the external IP as seen by the - // majority of our peers. - let mut local_enr = EnrBuilder::new("v4") - .ip(config - .discovery_address - .unwrap_or_else(|| "127.0.0.1".parse().expect("valid ip"))) - .tcp(config.libp2p_port) - .udp(config.discovery_port) - .build(&local_key) - .map_err(|e| format!("Could not build Local ENR: {:?}", e))?; - - let enr_f = config.network_dir.join(ENR_FILENAME); - if let Ok(mut enr_file) = File::open(enr_f.clone()) { - let mut enr_string = String::new(); - match enr_file.read_to_string(&mut enr_string) { - Err(_) => debug!(log, "Could not read ENR from file"), - Ok(_) => { - match Enr::from_str(&enr_string) { - Ok(enr) => { - if enr.node_id() == local_enr.node_id() { - if (config.discovery_address.is_none() - || enr.ip().map(Into::into) == config.discovery_address) - && enr.tcp() == Some(config.libp2p_port) - && enr.udp() == Some(config.discovery_port) - { - debug!(log, "ENR loaded from file"; "file" => format!("{:?}", enr_f)); - // the stored ENR has the same configuration, use it - return Ok(enr); - } - - // same node id, different configuration - update the sequence number - let new_seq_no = enr.seq().checked_add(1).ok_or_else(|| "ENR sequence number on file is too large. Remove it to generate a new NodeId")?; - local_enr.set_seq(new_seq_no, local_key).map_err(|e| { - format!("Could not update ENR sequence number: {:?}", e) - })?; - debug!(log, "ENR sequence number increased"; "seq" => new_seq_no); - } - } - Err(e) => { - warn!(log, "ENR from file could not be decoded"; "error" => format!("{:?}", e)); - } - } - } - } - } - - save_enr_to_disc(&config.network_dir, &local_enr, log); - - Ok(local_enr) -} - -fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) { - let _ = std::fs::create_dir_all(dir); - match File::create(dir.join(Path::new(ENR_FILENAME))) - .and_then(|mut f| f.write_all(&enr.to_base64().as_bytes())) - { - Ok(_) => { - debug!(log, "ENR written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e) - ); - } - } -} diff --git a/beacon_node/eth2-libp2p/src/discovery/enr.rs b/beacon_node/eth2-libp2p/src/discovery/enr.rs new file mode 100644 index 000000000..edd08bc9a --- /dev/null +++ b/beacon_node/eth2-libp2p/src/discovery/enr.rs @@ -0,0 +1,172 @@ +//! Helper functions and an extension trait for Ethereum 2 ENRs. + +pub use libp2p::{core::identity::Keypair, discv5::enr::CombinedKey}; + +use super::ENR_FILENAME; +use crate::types::{Enr, EnrBitfield}; +use crate::NetworkConfig; +use libp2p::discv5::enr::EnrBuilder; +use slog::{debug, warn}; +use ssz::{Decode, Encode}; +use ssz_types::BitVector; +use std::convert::TryInto; +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; +use std::str::FromStr; +use types::{EnrForkId, EthSpec}; + +/// The ENR field specifying the fork id. +pub const ETH2_ENR_KEY: &'static str = "eth2"; +/// The ENR field specifying the subnet bitfield. +pub const BITFIELD_ENR_KEY: &'static str = "attnets"; + +/// Extension trait for ENR's within Eth2. +pub trait Eth2Enr { + /// The subnet bitfield associated with the ENR. + fn bitfield(&self) -> Result, &'static str>; + + fn eth2(&self) -> Result; +} + +impl Eth2Enr for Enr { + fn bitfield(&self) -> Result, &'static str> { + let bitfield_bytes = self + .get(BITFIELD_ENR_KEY) + .ok_or_else(|| "ENR bitfield non-existent")?; + + BitVector::::from_ssz_bytes(bitfield_bytes) + .map_err(|_| "Could not decode the ENR SSZ bitfield") + } + + fn eth2(&self) -> Result { + let eth2_bytes = self + .get(ETH2_ENR_KEY) + .ok_or_else(|| "ENR has no eth2 field")?; + + EnrForkId::from_ssz_bytes(eth2_bytes).map_err(|_| "Could not decode EnrForkId") + } +} + +/// Loads an ENR from file if it exists and matches the current NodeId and sequence number. If none +/// exists, generates a new one. +/// +/// If an ENR exists, with the same NodeId, this function checks to see if the loaded ENR from +/// disk is suitable to use, otherwise we increment our newly generated ENR's sequence number. +pub fn build_or_load_enr( + local_key: Keypair, + config: &NetworkConfig, + enr_fork_id: EnrForkId, + log: &slog::Logger, +) -> Result { + // Build the local ENR. + // Note: Discovery should update the ENR record's IP to the external IP as seen by the + // majority of our peers, if the CLI doesn't expressly forbid it. + let enr_key: CombinedKey = local_key + .try_into() + .map_err(|_| "Invalid key type for ENR records")?; + + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; + + let enr_f = config.network_dir.join(ENR_FILENAME); + if let Ok(mut enr_file) = File::open(enr_f.clone()) { + let mut enr_string = String::new(); + match enr_file.read_to_string(&mut enr_string) { + Err(_) => debug!(log, "Could not read ENR from file"), + Ok(_) => { + match Enr::from_str(&enr_string) { + Ok(disk_enr) => { + // if the same node id, then we may need to update our sequence number + if local_enr.node_id() == disk_enr.node_id() { + if compare_enr(&local_enr, &disk_enr) { + debug!(log, "ENR loaded from disk"; "file" => format!("{:?}", enr_f)); + // the stored ENR has the same configuration, use it + return Ok(disk_enr); + } + + // same node id, different configuration - update the sequence number + let new_seq_no = disk_enr.seq().checked_add(1).ok_or_else(|| "ENR sequence number on file is too large. Remove it to generate a new NodeId")?; + local_enr.set_seq(new_seq_no, &enr_key).map_err(|e| { + format!("Could not update ENR sequence number: {:?}", e) + })?; + debug!(log, "ENR sequence number increased"; "seq" => new_seq_no); + } + } + Err(e) => { + warn!(log, "ENR from file could not be decoded"; "error" => format!("{:?}", e)); + } + } + } + } + } + + save_enr_to_disk(&config.network_dir, &local_enr, log); + + Ok(local_enr) +} + +/// Builds a lighthouse ENR given a `NetworkConfig`. +pub fn build_enr( + enr_key: &CombinedKey, + config: &NetworkConfig, + enr_fork_id: EnrForkId, +) -> Result { + let mut builder = EnrBuilder::new("v4"); + if let Some(enr_address) = config.enr_address { + builder.ip(enr_address); + } + if let Some(udp_port) = config.enr_udp_port { + builder.udp(udp_port); + } + // we always give it our listening tcp port + // TODO: Add uPnP support to map udp and tcp ports + let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port); + builder.tcp(tcp_port); + + // set the `eth2` field on our ENR + builder.add_value(ETH2_ENR_KEY.into(), enr_fork_id.as_ssz_bytes()); + + // set the "attnets" field on our ENR + let bitfield = BitVector::::new(); + + builder.add_value(BITFIELD_ENR_KEY.into(), bitfield.as_ssz_bytes()); + + builder + .tcp(config.libp2p_port) + .build(enr_key) + .map_err(|e| format!("Could not build Local ENR: {:?}", e)) +} + +/// Defines the conditions under which we use the locally built ENR or the one stored on disk. +/// If this function returns true, we use the `disk_enr`. +fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { + // take preference over disk_enr address if one is not specified + (local_enr.ip().is_none() || local_enr.ip() == disk_enr.ip()) + // tcp ports must match + && local_enr.tcp() == disk_enr.tcp() + // must match on the same fork + && local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY) + // take preference over disk udp port if one is not specified + && (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp()) + // we need the BITFIELD_ENR_KEY key to match, otherwise we use a new ENR. This will likely only + // be true for non-validating nodes + && local_enr.get(BITFIELD_ENR_KEY) == disk_enr.get(BITFIELD_ENR_KEY) +} + +/// Saves an ENR to disk +pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) { + let _ = std::fs::create_dir_all(dir); + match File::create(dir.join(Path::new(ENR_FILENAME))) + .and_then(|mut f| f.write_all(&enr.to_base64().as_bytes())) + { + Ok(_) => { + debug!(log, "ENR written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e) + ); + } + } +} diff --git a/beacon_node/eth2-libp2p/src/discovery/mod.rs b/beacon_node/eth2-libp2p/src/discovery/mod.rs new file mode 100644 index 000000000..2cd40b8a1 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/discovery/mod.rs @@ -0,0 +1,489 @@ +///! This manages the discovery and management of peers. +pub(crate) mod enr; + +// Allow external use of the lighthouse ENR builder +pub use enr::{build_enr, CombinedKey, Keypair}; + +use crate::metrics; +use crate::{error, Enr, NetworkConfig, NetworkGlobals}; +use enr::{Eth2Enr, BITFIELD_ENR_KEY, ETH2_ENR_KEY}; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, Multiaddr, PeerId}; +use libp2p::discv5::enr::NodeId; +use libp2p::discv5::{Discv5, Discv5Event}; +use libp2p::multiaddr::Protocol; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; +use slog::{crit, debug, info, warn}; +use ssz::{Decode, Encode}; +use ssz_types::BitVector; +use std::collections::{HashSet, VecDeque}; +use std::net::SocketAddr; +use std::path::Path; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::timer::Delay; +use types::{EnrForkId, EthSpec, SubnetId}; + +/// Maximum seconds before searching for extra peers. +const MAX_TIME_BETWEEN_PEER_SEARCHES: u64 = 120; +/// Initial delay between peer searches. +const INITIAL_SEARCH_DELAY: u64 = 5; +/// Local ENR storage filename. +pub const ENR_FILENAME: &str = "enr.dat"; +/// Number of peers we'd like to have connected to a given long-lived subnet. +const TARGET_SUBNET_PEERS: u64 = 3; + +/// Lighthouse discovery behaviour. This provides peer management and discovery using the Discv5 +/// libp2p protocol. +pub struct Discovery { + /// Events to be processed by the behaviour. + events: VecDeque>, + + /// The currently banned peers. + banned_peers: HashSet, + + /// The target number of connected peers on the libp2p interface. + max_peers: usize, + + /// The directory where the ENR is stored. + enr_dir: String, + + /// The delay between peer discovery searches. + peer_discovery_delay: Delay, + + /// Tracks the last discovery delay. The delay is doubled each round until the max + /// time is reached. + past_discovery_delay: u64, + + /// The TCP port for libp2p. Used to convert an updated IP address to a multiaddr. Note: This + /// assumes that the external TCP port is the same as the internal TCP port if behind a NAT. + //TODO: Improve NAT handling limit the above restriction + tcp_port: u16, + + /// The discovery behaviour used to discover new peers. + discovery: Discv5, + + /// A collection of network constants that can be read from other threads. + network_globals: Arc>, + + /// Logger for the discovery behaviour. + log: slog::Logger, +} + +impl Discovery { + pub fn new( + local_key: &Keypair, + config: &NetworkConfig, + network_globals: Arc>, + log: &slog::Logger, + ) -> error::Result { + let log = log.clone(); + + let enr_dir = match config.network_dir.to_str() { + Some(path) => String::from(path), + None => String::from(""), + }; + + let local_enr = network_globals.local_enr.read().clone(); + + info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> format!("{}",local_enr.node_id()), "ip" => format!("{:?}", local_enr.ip()), "udp"=> format!("{:?}", local_enr.udp()), "tcp" => format!("{:?}", local_enr.tcp())); + + let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port); + + let mut discovery = Discv5::new( + local_enr, + local_key.clone(), + config.discv5_config.clone(), + listen_socket, + ) + .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; + + // Add bootnodes to routing table + for bootnode_enr in config.boot_nodes.clone() { + debug!( + log, + "Adding node to routing table"; + "node_id" => format!("{}", bootnode_enr.node_id()), + "peer_id" => format!("{}", bootnode_enr.peer_id()), + "ip" => format!("{:?}", bootnode_enr.ip()), + "udp" => format!("{:?}", bootnode_enr.udp()), + "tcp" => format!("{:?}", bootnode_enr.tcp()) + ); + let _ = discovery.add_enr(bootnode_enr).map_err(|e| { + warn!( + log, + "Could not add peer to the local routing table"; + "error" => format!("{}", e) + ) + }); + } + + Ok(Self { + events: VecDeque::with_capacity(16), + banned_peers: HashSet::new(), + max_peers: config.max_peers, + peer_discovery_delay: Delay::new(Instant::now()), + past_discovery_delay: INITIAL_SEARCH_DELAY, + tcp_port: config.libp2p_port, + discovery, + network_globals, + log, + enr_dir, + }) + } + + /// Return the nodes local ENR. + pub fn local_enr(&self) -> &Enr { + self.discovery.local_enr() + } + + /// Manually search for peers. This restarts the discovery round, sparking multiple rapid + /// queries. + pub fn discover_peers(&mut self) { + self.past_discovery_delay = INITIAL_SEARCH_DELAY; + self.find_peers(); + } + + /// Add an ENR to the routing table of the discovery mechanism. + pub fn add_enr(&mut self, enr: Enr) { + let _ = self.discovery.add_enr(enr).map_err(|e| { + warn!( + self.log, + "Could not add peer to the local routing table"; + "error" => format!("{}", e) + ) + }); + } + + /// The peer has been banned. Add this peer to the banned list to prevent any future + /// re-connections. + // TODO: Remove the peer from the DHT if present + pub fn peer_banned(&mut self, peer_id: PeerId) { + self.banned_peers.insert(peer_id); + } + + pub fn peer_unbanned(&mut self, peer_id: &PeerId) { + self.banned_peers.remove(peer_id); + } + + /// Returns an iterator over all enr entries in the DHT. + pub fn enr_entries(&mut self) -> impl Iterator { + self.discovery.enr_entries() + } + + /// Returns the ENR of a known peer if it exists. + pub fn enr_of_peer(&mut self, peer_id: &PeerId) -> Option { + self.discovery.enr_of_peer(peer_id) + } + + /// Adds/Removes a subnet from the ENR Bitfield + pub fn update_enr_bitfield(&mut self, subnet_id: SubnetId, value: bool) -> Result<(), String> { + let id = *subnet_id as usize; + + let local_enr = self.discovery.local_enr(); + let mut current_bitfield = local_enr.bitfield::()?; + + if id >= current_bitfield.len() { + return Err(format!( + "Subnet id: {} is outside the ENR bitfield length: {}", + id, + current_bitfield.len() + )); + } + + if current_bitfield + .get(id) + .map_err(|_| String::from("Subnet ID out of bounds"))? + == value + { + return Err(format!( + "Subnet id: {} already in the local ENR already has value: {}", + id, value + )); + } + + // set the subnet bitfield in the ENR + current_bitfield + .set(id, value) + .map_err(|_| String::from("Subnet ID out of bounds, could not set subnet ID"))?; + + // insert the bitfield into the ENR record + let _ = self + .discovery + .enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes()); + + // replace the global version + *self.network_globals.local_enr.write() = self.discovery.local_enr().clone(); + Ok(()) + } + + /// Updates the `eth2` field of our local ENR. + pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) { + // to avoid having a reference to the spec constant, for the logging we assume + // FAR_FUTURE_EPOCH is u64::max_value() + let next_fork_epoch_log = if enr_fork_id.next_fork_epoch == u64::max_value() { + String::from("No other fork") + } else { + format!("{:?}", enr_fork_id.next_fork_epoch) + }; + + info!(self.log, "Updating the ENR fork version"; + "fork_digest" => format!("{:?}", enr_fork_id.fork_digest), + "next_fork_version" => format!("{:?}", enr_fork_id.next_fork_version), + "next_fork_epoch" => next_fork_epoch_log, + ); + + let _ = self + .discovery + .enr_insert(ETH2_ENR_KEY.into(), enr_fork_id.as_ssz_bytes()) + .map_err(|e| { + warn!( + self.log, + "Could not update eth2 ENR field"; + "error" => format!("{:?}", e) + ) + }); + + // replace the global version with discovery version + *self.network_globals.local_enr.write() = self.discovery.local_enr().clone(); + } + + /// A request to find peers on a given subnet. + // TODO: This logic should be improved with added sophistication in peer management + // This currently checks for currently connected peers and if we don't have + // PEERS_WANTED_BEFORE_DISCOVERY connected to a given subnet we search for more. + pub fn peers_request(&mut self, subnet_id: SubnetId) { + let peers_on_subnet = self + .network_globals + .peers + .read() + .peers_on_subnet(&subnet_id) + .count() as u64; + + if peers_on_subnet < TARGET_SUBNET_PEERS { + let target_peers = TARGET_SUBNET_PEERS - peers_on_subnet; + debug!(self.log, "Searching for peers for subnet"; + "subnet_id" => *subnet_id, + "connected_peers_on_subnet" => peers_on_subnet, + "target_subnet_peers" => TARGET_SUBNET_PEERS, + "peers_to_find" => target_peers + ); + + let log_clone = self.log.clone(); + + let subnet_predicate = move |enr: &Enr| { + if let Some(bitfield_bytes) = enr.get(BITFIELD_ENR_KEY) { + let bitfield = match BitVector::::from_ssz_bytes( + bitfield_bytes, + ) { + Ok(v) => v, + Err(e) => { + warn!(log_clone, "Could not decode ENR bitfield for peer"; "peer_id" => format!("{}", enr.peer_id()), "error" => format!("{:?}", e)); + return false; + } + }; + + return bitfield.get(*subnet_id as usize).unwrap_or_else(|_| { + debug!(log_clone, "Peer found but not on desired subnet"; "peer_id" => format!("{}", enr.peer_id())); + false + }); + } + false + }; + + // start the query + self.start_query(subnet_predicate, target_peers as usize); + } else { + debug!(self.log, "Discovery ignored"; + "reason" => "Already connected to desired peers", + "connected_peers_on_subnet" => peers_on_subnet, + "target_subnet_peers" => TARGET_SUBNET_PEERS, + ); + } + } + + /* Internal Functions */ + + /// Run a standard query to search for more peers. + /// + /// This searches for the standard kademlia bucket size (16) peers. + fn find_peers(&mut self) { + debug!(self.log, "Searching for peers"); + self.start_query(|_| true, 16); + } + + /// Search for a specified number of new peers using the underlying discovery mechanism. + /// + /// This can optionally search for peers for a given predicate. Regardless of the predicate + /// given, this will only search for peers on the same enr_fork_id as specified in the local + /// ENR. + fn start_query(&mut self, enr_predicate: F, num_nodes: usize) + where + F: Fn(&Enr) -> bool + Send + 'static + Clone, + { + // pick a random NodeId + let random_node = NodeId::random(); + + let enr_fork_id = match self.local_enr().eth2() { + Ok(v) => v, + Err(e) => { + crit!(self.log, "Local ENR has no fork id"; "error" => e); + return; + } + }; + // predicate for finding nodes with a matching fork + let eth2_fork_predicate = move |enr: &Enr| enr.eth2() == Ok(enr_fork_id.clone()); + let predicate = move |enr: &Enr| eth2_fork_predicate(enr) && enr_predicate(enr); + + // general predicate + self.discovery + .find_enr_predicate(random_node, predicate, num_nodes); + } +} + +// Redirect all behaviour events to underlying discovery behaviour. +impl NetworkBehaviour for Discovery +where + TSubstream: AsyncRead + AsyncWrite, +{ + type ProtocolsHandler = as NetworkBehaviour>::ProtocolsHandler; + type OutEvent = as NetworkBehaviour>::OutEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + NetworkBehaviour::new_handler(&mut self.discovery) + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + // Let discovery track possible known peers. + self.discovery.addresses_of_peer(peer_id) + } + + fn inject_connected(&mut self, _peer_id: PeerId, _endpoint: ConnectedPoint) {} + + fn inject_disconnected(&mut self, _peer_id: &PeerId, _endpoint: ConnectedPoint) {} + + fn inject_replaced( + &mut self, + _peer_id: PeerId, + _closed: ConnectedPoint, + _opened: ConnectedPoint, + ) { + // discv5 doesn't implement + } + + fn inject_node_event( + &mut self, + _peer_id: PeerId, + _event: ::OutEvent, + ) { + // discv5 doesn't implement + } + + fn poll( + &mut self, + params: &mut impl PollParameters, + ) -> Async< + NetworkBehaviourAction< + ::InEvent, + Self::OutEvent, + >, + > { + // search for peers if it is time + loop { + match self.peer_discovery_delay.poll() { + Ok(Async::Ready(_)) => { + if self.network_globals.connected_peers() < self.max_peers { + self.find_peers(); + } + // Set to maximum, and update to earlier, once we get our results back. + self.peer_discovery_delay.reset( + Instant::now() + Duration::from_secs(MAX_TIME_BETWEEN_PEER_SEARCHES), + ); + } + Ok(Async::NotReady) => break, + Err(e) => { + warn!(self.log, "Discovery peer search failed"; "error" => format!("{:?}", e)); + } + } + } + + // Poll discovery + loop { + match self.discovery.poll(params) { + Async::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { + match event { + Discv5Event::Discovered(_enr) => { + // peers that get discovered during a query but are not contactable or + // don't match a predicate can end up here. For debugging purposes we + // log these to see if we are unnecessarily dropping discovered peers + /* + if enr.eth2() == self.local_enr().eth2() { + trace!(self.log, "Peer found in process of query"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); + } else { + // this is temporary warning for debugging the DHT + warn!(self.log, "Found peer during discovery not on correct fork"; "peer_id" => format!("{}", enr.peer_id()), "tcp_socket" => enr.tcp_socket()); + } + */ + } + Discv5Event::SocketUpdated(socket) => { + info!(self.log, "Address updated"; "ip" => format!("{}",socket.ip()), "udp_port" => format!("{}", socket.port())); + metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); + let mut address = Multiaddr::from(socket.ip()); + address.push(Protocol::Tcp(self.tcp_port)); + let enr = self.discovery.local_enr(); + enr::save_enr_to_disk(Path::new(&self.enr_dir), enr, &self.log); + + return Async::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + }); + } + Discv5Event::FindNodeResult { closer_peers, .. } => { + debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len()); + // update the time to the next query + if self.past_discovery_delay < MAX_TIME_BETWEEN_PEER_SEARCHES { + self.past_discovery_delay *= 2; + } + let delay = std::cmp::max( + self.past_discovery_delay, + MAX_TIME_BETWEEN_PEER_SEARCHES, + ); + self.peer_discovery_delay + .reset(Instant::now() + Duration::from_secs(delay)); + + for peer_id in closer_peers { + // if we need more peers, attempt a connection + + if self.network_globals.connected_or_dialing_peers() + < self.max_peers + && !self + .network_globals + .peers + .read() + .is_connected_or_dialing(&peer_id) + && !self.banned_peers.contains(&peer_id) + { + debug!(self.log, "Connecting to discovered peer"; "peer_id"=> format!("{:?}", peer_id)); + self.network_globals.peers.write().dialing_peer(&peer_id); + self.events + .push_back(NetworkBehaviourAction::DialPeer { peer_id }); + } + } + } + _ => {} + } + } + // discv5 does not output any other NetworkBehaviourAction + Async::Ready(_) => {} + Async::NotReady => break, + } + } + + // process any queued events + if let Some(event) = self.events.pop_front() { + return Async::Ready(event); + } + + Async::NotReady + } +} diff --git a/beacon_node/eth2-libp2p/src/globals.rs b/beacon_node/eth2-libp2p/src/globals.rs deleted file mode 100644 index 901550034..000000000 --- a/beacon_node/eth2-libp2p/src/globals.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! A collection of variables that are accessible outside of the network thread itself. -use crate::{Enr, Multiaddr, PeerId}; -use parking_lot::RwLock; -use std::collections::HashSet; -use std::sync::atomic::AtomicUsize; - -pub struct NetworkGlobals { - /// The current local ENR. - pub local_enr: RwLock>, - /// The local peer_id. - pub peer_id: RwLock, - /// Listening multiaddrs. - pub listen_multiaddrs: RwLock>, - /// Current number of connected libp2p peers. - pub connected_peers: AtomicUsize, - /// The collection of currently connected peers. - pub connected_peer_set: RwLock>, -} - -impl NetworkGlobals { - pub fn new(peer_id: PeerId) -> Self { - NetworkGlobals { - local_enr: RwLock::new(None), - peer_id: RwLock::new(peer_id), - listen_multiaddrs: RwLock::new(Vec::new()), - connected_peers: AtomicUsize::new(0), - connected_peer_set: RwLock::new(HashSet::new()), - } - } -} diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index d09921b2d..9230a4afb 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -7,26 +7,19 @@ extern crate lazy_static; pub mod behaviour; mod config; -mod discovery; -pub mod error; -mod globals; +pub mod discovery; mod metrics; +mod peer_manager; pub mod rpc; mod service; -mod topics; +pub mod types; -pub use behaviour::PubsubMessage; +pub use crate::types::{error, Enr, GossipTopic, NetworkGlobals, PubsubMessage}; +pub use behaviour::BehaviourEvent; pub use config::Config as NetworkConfig; -pub use globals::NetworkGlobals; -pub use libp2p::enr::Enr; pub use libp2p::gossipsub::{MessageId, Topic, TopicHash}; -pub use libp2p::multiaddr; -pub use libp2p::Multiaddr; -pub use libp2p::{ - gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, - PeerId, Swarm, -}; +pub use libp2p::{multiaddr, Multiaddr}; +pub use libp2p::{PeerId, Swarm}; +pub use peer_manager::{PeerDB, PeerInfo, PeerSyncStatus, SyncInfo}; pub use rpc::RPCEvent; -pub use service::Libp2pEvent; -pub use service::Service; -pub use topics::GossipTopic; +pub use service::{Service, NETWORK_KEY_FILENAME}; diff --git a/beacon_node/eth2-libp2p/src/peer_manager/client.rs b/beacon_node/eth2-libp2p/src/peer_manager/client.rs new file mode 100644 index 000000000..3ba68faaa --- /dev/null +++ b/beacon_node/eth2-libp2p/src/peer_manager/client.rs @@ -0,0 +1,139 @@ +//! Known Ethereum 2.0 clients and their fingerprints. +//! +//! Currently using identify to fingerprint. + +use libp2p::identify::IdentifyInfo; +use serde::Serialize; + +/// Various client and protocol information related to a node. +#[derive(Clone, Debug, Serialize)] +pub struct Client { + /// The client's name (Ex: lighthouse, prism, nimbus, etc) + pub kind: ClientKind, + /// The client's version. + pub version: String, + /// The OS version of the client. + pub os_version: String, + /// The libp2p protocol version. + pub protocol_version: String, + /// Identify agent string + pub agent_string: Option, +} + +#[derive(Clone, Debug, Serialize)] +pub enum ClientKind { + /// A lighthouse node (the best kind). + Lighthouse, + /// A Nimbus node. + Nimbus, + /// A Teku node. + Teku, + /// A Prysm node. + Prysm, + /// An unknown client. + Unknown, +} + +impl Default for Client { + fn default() -> Self { + Client { + kind: ClientKind::Unknown, + version: "unknown".into(), + os_version: "unknown".into(), + protocol_version: "unknown".into(), + agent_string: None, + } + } +} + +impl Client { + /// Builds a `Client` from `IdentifyInfo`. + pub fn from_identify_info(info: &IdentifyInfo) -> Self { + let (kind, version, os_version) = client_from_agent_version(&info.agent_version); + + Client { + kind, + version, + os_version, + protocol_version: info.protocol_version.clone(), + agent_string: Some(info.agent_version.clone()), + } + } +} + +impl std::fmt::Display for Client { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.kind { + ClientKind::Lighthouse => write!( + f, + "Lighthouse: version: {}, os_version: {}", + self.version, self.os_version + ), + ClientKind::Teku => write!( + f, + "Teku: version: {}, os_version: {}", + self.version, self.os_version + ), + ClientKind::Nimbus => write!( + f, + "Nimbus: version: {}, os_version: {}", + self.version, self.os_version + ), + ClientKind::Prysm => write!( + f, + "Prysm: version: {}, os_version: {}", + self.version, self.os_version + ), + ClientKind::Unknown => { + if let Some(agent_string) = &self.agent_string { + write!(f, "Unknown: {}", agent_string) + } else { + write!(f, "Unknown") + } + } + } + } +} + +// helper function to identify clients from their agent_version. Returns the client +// kind and it's associated version and the OS kind. +fn client_from_agent_version(agent_version: &str) -> (ClientKind, String, String) { + let mut agent_split = agent_version.split("/"); + match agent_split.next() { + Some("Lighthouse") => { + let kind = ClientKind::Lighthouse; + let mut version = String::from("unknown"); + let mut os_version = version.clone(); + if let Some(agent_version) = agent_split.next() { + version = agent_version.into(); + if let Some(agent_os_version) = agent_split.next() { + os_version = agent_os_version.into(); + } + } + (kind, version, os_version) + } + Some("teku") => { + let kind = ClientKind::Teku; + let mut version = String::from("unknown"); + let mut os_version = version.clone(); + if let Some(_) = agent_split.next() { + if let Some(agent_version) = agent_split.next() { + version = agent_version.into(); + if let Some(agent_os_version) = agent_split.next() { + os_version = agent_os_version.into(); + } + } + } + (kind, version, os_version) + } + Some("github.com") => { + let kind = ClientKind::Prysm; + let unknown = String::from("unknown"); + (kind, unknown.clone(), unknown) + } + _ => { + let unknown = String::from("unknown"); + (ClientKind::Unknown, unknown.clone(), unknown) + } + } +} diff --git a/beacon_node/eth2-libp2p/src/peer_manager/mod.rs b/beacon_node/eth2-libp2p/src/peer_manager/mod.rs new file mode 100644 index 000000000..0b9d1b5c4 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/peer_manager/mod.rs @@ -0,0 +1,341 @@ +//! Implementation of a Lighthouse's peer management system. + +pub use self::peerdb::*; +use crate::metrics; +use crate::rpc::MetaData; +use crate::{NetworkGlobals, PeerId}; +use futures::prelude::*; +use futures::Stream; +use hashmap_delay::HashSetDelay; +use libp2p::identify::IdentifyInfo; +use slog::{crit, debug, error, warn}; +use smallvec::SmallVec; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use types::EthSpec; + +mod client; +mod peer_info; +mod peer_sync_status; +mod peerdb; + +pub use peer_info::PeerInfo; +pub use peer_sync_status::{PeerSyncStatus, SyncInfo}; +/// The minimum reputation before a peer is disconnected. +// Most likely this needs tweaking +const _MINIMUM_REPUTATION_BEFORE_BAN: Rep = 20; +/// The time in seconds between re-status's peers. +const STATUS_INTERVAL: u64 = 300; +/// The time in seconds between PING events. We do not send a ping if the other peer as PING'd us within +/// this time frame (Seconds) +const PING_INTERVAL: u64 = 30; + +/// The main struct that handles peer's reputation and connection status. +pub struct PeerManager { + /// Storage of network globals to access the PeerDB. + network_globals: Arc>, + /// A queue of events that the `PeerManager` is waiting to produce. + events: SmallVec<[PeerManagerEvent; 5]>, + /// A collection of peers awaiting to be Ping'd. + ping_peers: HashSetDelay, + /// A collection of peers awaiting to be Status'd. + status_peers: HashSetDelay, + /// Last updated moment. + last_updated: Instant, + /// The logger associated with the `PeerManager`. + log: slog::Logger, +} + +/// A collection of actions a peer can perform which will adjust its reputation +/// Each variant has an associated reputation change. +pub enum PeerAction { + /// The peer timed out on an RPC request/response. + _TimedOut = -10, + /// The peer sent and invalid request/response or encoding. + _InvalidMessage = -20, + /// The peer sent something objectively malicious. + _Malicious = -50, + /// Received an expected message. + _ValidMessage = 20, + /// Peer disconnected. + Disconnected = -30, +} + +/// The events that the PeerManager outputs (requests). +pub enum PeerManagerEvent { + /// Sends a STATUS to a peer. + Status(PeerId), + /// Sends a PING to a peer. + Ping(PeerId), + /// Request METADATA from a peer. + MetaData(PeerId), + /// The peer should be disconnected. + _DisconnectPeer(PeerId), + /// The peer should be disconnected and banned. + _BanPeer(PeerId), +} + +impl PeerManager { + pub fn new(network_globals: Arc>, log: &slog::Logger) -> Self { + PeerManager { + network_globals, + events: SmallVec::new(), + last_updated: Instant::now(), + ping_peers: HashSetDelay::new(Duration::from_secs(PING_INTERVAL)), + status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)), + log: log.clone(), + } + } + + /* Public accessible functions */ + + /// A ping request has been received. + // NOTE: The behaviour responds with a PONG automatically + // TODO: Update last seen + pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) { + if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { + // received a ping + // reset the to-ping timer for this peer + self.ping_peers.insert(peer_id.clone()); + + // if the sequence number is unknown send update the meta data of the peer. + if let Some(meta_data) = &peer_info.meta_data { + if meta_data.seq_number < seq { + debug!(self.log, "Requesting new metadata from peer"; "peer_id" => format!("{}", peer_id), "known_seq_no" => meta_data.seq_number, "ping_seq_no" => seq); + self.events + .push(PeerManagerEvent::MetaData(peer_id.clone())); + } + } else { + // if we don't know the meta-data, request it + debug!(self.log, "Requesting first metadata from peer"; "peer_id" => format!("{}", peer_id)); + self.events + .push(PeerManagerEvent::MetaData(peer_id.clone())); + } + } else { + crit!(self.log, "Received a PING from an unknown peer"; "peer_id" => format!("{}", peer_id)); + } + } + + /// A PONG has been returned from a peer. + // TODO: Update last seen + pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) { + if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) { + // received a pong + + // if the sequence number is unknown send update the meta data of the peer. + if let Some(meta_data) = &peer_info.meta_data { + if meta_data.seq_number < seq { + debug!(self.log, "Requesting new metadata from peer"; "peer_id" => format!("{}", peer_id), "known_seq_no" => meta_data.seq_number, "pong_seq_no" => seq); + self.events + .push(PeerManagerEvent::MetaData(peer_id.clone())); + } + } else { + // if we don't know the meta-data, request it + debug!(self.log, "Requesting first metadata from peer"; "peer_id" => format!("{}", peer_id)); + self.events + .push(PeerManagerEvent::MetaData(peer_id.clone())); + } + } else { + crit!(self.log, "Received a PONG from an unknown peer"; "peer_id" => format!("{}", peer_id)); + } + } + + /// Received a metadata response from a peer. + // TODO: Update last seen + pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { + if let Some(known_meta_data) = &peer_info.meta_data { + if known_meta_data.seq_number < meta_data.seq_number { + debug!(self.log, "Updating peer's metadata"; "peer_id" => format!("{}", peer_id), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); + } else { + warn!(self.log, "Received old metadata"; "peer_id" => format!("{}", peer_id), "known_seq_no" => known_meta_data.seq_number, "new_seq_no" => meta_data.seq_number); + } + } else { + // we have no meta-data for this peer, update + debug!(self.log, "Obtained peer's metadata"; "peer_id" => format!("{}", peer_id), "new_seq_no" => meta_data.seq_number); + peer_info.meta_data = Some(meta_data); + } + } else { + crit!(self.log, "Received METADATA from an unknown peer"; "peer_id" => format!("{}", peer_id)); + } + } + + /// A STATUS message has been received from a peer. This resets the status timer. + pub fn peer_statusd(&mut self, peer_id: &PeerId) { + self.status_peers.insert(peer_id.clone()); + } + + /// Checks the reputation of a peer and if it is too low, bans it and + /// sends the corresponding event. Informs if it got banned + fn _gets_banned(&mut self, peer_id: &PeerId) -> bool { + // if the peer was already banned don't inform again + let mut peerdb = self.network_globals.peers.write(); + + if let Some(connection_status) = peerdb.connection_status(peer_id) { + if peerdb.reputation(peer_id) < _MINIMUM_REPUTATION_BEFORE_BAN + && !connection_status.is_banned() + { + peerdb.ban(peer_id); + self.events + .push(PeerManagerEvent::_BanPeer(peer_id.clone())); + return true; + } + } + false + } + + /// Requests that a peer get disconnected. + pub fn _disconnect_peer(&mut self, peer_id: &PeerId) { + self.events + .push(PeerManagerEvent::_DisconnectPeer(peer_id.clone())); + } + + /// Updates the state of the peer as disconnected. + pub fn notify_disconnect(&mut self, peer_id: &PeerId) { + self.update_reputations(); + { + let mut peerdb = self.network_globals.peers.write(); + peerdb.disconnect(peer_id); + peerdb.add_reputation(peer_id, PeerAction::Disconnected as Rep); + } + + // remove the ping and status timer for the peer + self.ping_peers.remove(peer_id); + self.status_peers.remove(peer_id); + metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); + metrics::set_gauge( + &metrics::PEERS_CONNECTED, + self.network_globals.connected_peers() as i64, + ); + } + + /// Sets a peer as connected as long as their reputation allows it + /// Informs if the peer was accepted + pub fn connect_ingoing(&mut self, peer_id: &PeerId) -> bool { + self.connect_peer(peer_id, false) + } + + /// Sets a peer as connected as long as their reputation allows it + /// Informs if the peer was accepted + pub fn connect_outgoing(&mut self, peer_id: &PeerId) -> bool { + self.connect_peer(peer_id, true) + } + + /// Provides a given peer's reputation if it exists. + pub fn _get_peer_rep(&self, peer_id: &PeerId) -> Rep { + self.network_globals.peers.read().reputation(peer_id) + } + + /// Updates the reputation of known peers according to their connection + /// status and the time that has passed. + pub fn update_reputations(&mut self) { + let now = Instant::now(); + let elapsed = (now - self.last_updated).as_secs(); + // 0 seconds means now - last_updated < 0, but (most likely) not = 0. + // In this case, do nothing (updating last_updated would propagate + // rounding errors) + if elapsed > 0 { + self.last_updated = now; + // TODO decide how reputations change with time. If they get too low + // set the peers as banned + } + } + + /// Reports a peer for some action. + /// + /// If the peer doesn't exist, log a warning and insert defaults. + pub fn _report_peer(&mut self, peer_id: &PeerId, action: PeerAction) { + self.update_reputations(); + self.network_globals + .peers + .write() + .add_reputation(peer_id, action as Rep); + self.update_reputations(); + } + + /// Updates `PeerInfo` with `identify` information. + pub fn identify(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { + peer_info.client = client::Client::from_identify_info(info); + peer_info.listening_addresses = info.listen_addrs.clone(); + } else { + crit!(self.log, "Received an Identify response from an unknown peer"; "peer_id" => format!("{}", peer_id)); + } + } + + /* Internal functions */ + + /// Registers a peer as connected. The `ingoing` parameter determines if the peer is being + /// dialed or connecting to us. + /// + /// This is called by `connect_ingoing` and `connect_outgoing`. + /// + /// This informs if the peer was accepted in to the db or not. + // TODO: Drop peers if over max_peer limit + fn connect_peer(&mut self, peer_id: &PeerId, outgoing: bool) -> bool { + // TODO: Call this on a timer + self.update_reputations(); + + { + let mut peerdb = self.network_globals.peers.write(); + if peerdb.connection_status(peer_id).map(|c| c.is_banned()) == Some(true) { + // don't connect if the peer is banned + return false; + } + + if outgoing { + peerdb.connect_outgoing(peer_id); + } else { + peerdb.connect_outgoing(peer_id); + } + } + + // start a ping and status timer for the peer + self.ping_peers.insert(peer_id.clone()); + self.status_peers.insert(peer_id.clone()); + + // increment prometheus metrics + metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); + metrics::set_gauge( + &metrics::PEERS_CONNECTED, + self.network_globals.connected_peers() as i64, + ); + + true + } + + /// Notifies the peer manager that this peer is being dialed. + pub fn _dialing_peer(&mut self, peer_id: &PeerId) { + self.network_globals.peers.write().dialing_peer(peer_id); + } +} + +impl Stream for PeerManager { + type Item = PeerManagerEvent; + type Error = (); + + fn poll(&mut self) -> Poll, Self::Error> { + // poll the timeouts for pings and status' + while let Async::Ready(Some(peer_id)) = self.ping_peers.poll().map_err(|e| { + error!(self.log, "Failed to check for peers to ping"; "error" => format!("{}",e)); + })? { + debug!(self.log, "Pinging peer"; "peer_id" => format!("{}", peer_id)); + self.events.push(PeerManagerEvent::Ping(peer_id)); + } + + while let Async::Ready(Some(peer_id)) = self.status_peers.poll().map_err(|e| { + error!(self.log, "Failed to check for peers to status"; "error" => format!("{}",e)); + })? { + debug!(self.log, "Sending Status to peer"; "peer_id" => format!("{}", peer_id)); + self.events.push(PeerManagerEvent::Status(peer_id)); + } + + if !self.events.is_empty() { + return Ok(Async::Ready(Some(self.events.remove(0)))); + } else { + self.events.shrink_to_fit(); + } + + Ok(Async::NotReady) + } +} diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs b/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs new file mode 100644 index 000000000..611766a16 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/peer_manager/peer_info.rs @@ -0,0 +1,217 @@ +use super::client::Client; +use super::peerdb::{Rep, DEFAULT_REPUTATION}; +use super::PeerSyncStatus; +use crate::rpc::MetaData; +use crate::Multiaddr; +use serde::{ + ser::{SerializeStructVariant, Serializer}, + Serialize, +}; +use std::time::Instant; +use types::{EthSpec, SubnetId}; +use PeerConnectionStatus::*; + +/// Information about a given connected peer. +#[derive(Clone, Debug, Serialize)] +#[serde(bound = "T: EthSpec")] +pub struct PeerInfo { + /// The connection status of the peer + _status: PeerStatus, + /// The peers reputation + pub reputation: Rep, + /// Client managing this peer + pub client: Client, + /// Connection status of this peer + pub connection_status: PeerConnectionStatus, + /// The known listening addresses of this peer. + pub listening_addresses: Vec, + /// The current syncing state of the peer. The state may be determined after it's initial + /// connection. + pub sync_status: PeerSyncStatus, + /// The ENR subnet bitfield of the peer. This may be determined after it's initial + /// connection. + pub meta_data: Option>, +} + +impl Default for PeerInfo { + fn default() -> PeerInfo { + PeerInfo { + _status: Default::default(), + reputation: DEFAULT_REPUTATION, + client: Client::default(), + connection_status: Default::default(), + listening_addresses: vec![], + sync_status: PeerSyncStatus::Unknown, + meta_data: None, + } + } +} + +impl PeerInfo { + /// Returns if the peer is subscribed to a given `SubnetId` + pub fn on_subnet(&self, subnet_id: SubnetId) -> bool { + if let Some(meta_data) = &self.meta_data { + return meta_data + .attnets + .get(*subnet_id as usize) + .unwrap_or_else(|_| false); + } + false + } +} + +#[derive(Clone, Debug, Serialize)] +/// The current health status of the peer. +pub enum PeerStatus { + /// The peer is healthy. + Healthy, + /// The peer is clogged. It has not been responding to requests on time. + _Clogged, +} + +impl Default for PeerStatus { + fn default() -> Self { + PeerStatus::Healthy + } +} + +/// Connection Status of the peer. +#[derive(Debug, Clone)] +pub enum PeerConnectionStatus { + /// The peer is connected. + Connected { + /// number of ingoing connections. + n_in: u8, + /// number of outgoing connections. + n_out: u8, + }, + /// The peer has disconnected. + Disconnected { + /// last time the peer was connected or discovered. + since: Instant, + }, + /// The peer has been banned and is disconnected. + Banned { + /// moment when the peer was banned. + since: Instant, + }, + /// We are currently dialing this peer. + Dialing { + /// time since we last communicated with the peer. + since: Instant, + }, +} + +/// Serialization for http requests. +impl Serialize for PeerConnectionStatus { + fn serialize(&self, serializer: S) -> Result { + match self { + Connected { n_in, n_out } => { + let mut s = serializer.serialize_struct_variant("", 0, "Connected", 2)?; + s.serialize_field("in", n_in)?; + s.serialize_field("out", n_out)?; + s.end() + } + Disconnected { since } => { + let mut s = serializer.serialize_struct_variant("", 1, "Disconnected", 1)?; + s.serialize_field("since", &since.elapsed().as_secs())?; + s.end() + } + Banned { since } => { + let mut s = serializer.serialize_struct_variant("", 2, "Banned", 1)?; + s.serialize_field("since", &since.elapsed().as_secs())?; + s.end() + } + Dialing { since } => { + let mut s = serializer.serialize_struct_variant("", 3, "Dialing", 1)?; + s.serialize_field("since", &since.elapsed().as_secs())?; + s.end() + } + } + } +} + +impl Default for PeerConnectionStatus { + fn default() -> Self { + PeerConnectionStatus::Dialing { + since: Instant::now(), + } + } +} + +impl PeerConnectionStatus { + /// Checks if the status is connected + pub fn is_connected(&self) -> bool { + match self { + PeerConnectionStatus::Connected { .. } => true, + _ => false, + } + } + + /// Checks if the status is connected + pub fn is_dialing(&self) -> bool { + match self { + PeerConnectionStatus::Dialing { .. } => true, + _ => false, + } + } + + /// Checks if the status is banned + pub fn is_banned(&self) -> bool { + match self { + PeerConnectionStatus::Banned { .. } => true, + _ => false, + } + } + + /// Checks if the status is disconnected + pub fn is_disconnected(&self) -> bool { + match self { + Disconnected { .. } => true, + _ => false, + } + } + + /// Modifies the status to Connected and increases the number of ingoing + /// connections by one + pub fn connect_ingoing(&mut self) { + match self { + Connected { n_in, .. } => *n_in += 1, + Disconnected { .. } | Banned { .. } | Dialing { .. } => { + *self = Connected { n_in: 1, n_out: 0 } + } + } + } + + /// Modifies the status to Connected and increases the number of outgoing + /// connections by one + pub fn connect_outgoing(&mut self) { + match self { + Connected { n_out, .. } => *n_out += 1, + Disconnected { .. } | Banned { .. } | Dialing { .. } => { + *self = Connected { n_in: 0, n_out: 1 } + } + } + } + + /// Modifies the status to Disconnected and sets the last seen instant to now + pub fn disconnect(&mut self) { + *self = Disconnected { + since: Instant::now(), + }; + } + + /// Modifies the status to Banned + pub fn ban(&mut self) { + *self = Banned { + since: Instant::now(), + }; + } + + pub fn connections(&self) -> (u8, u8) { + match self { + Connected { n_in, n_out } => (*n_in, *n_out), + _ => (0, 0), + } + } +} diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peer_sync_status.rs b/beacon_node/eth2-libp2p/src/peer_manager/peer_sync_status.rs new file mode 100644 index 000000000..0ce4f6ce9 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/peer_manager/peer_sync_status.rs @@ -0,0 +1,104 @@ +//! Handles individual sync status for peers. + +use serde::Serialize; +use types::{Epoch, Hash256, Slot}; + +#[derive(Clone, Debug, Serialize)] +/// The current sync status of the peer. +pub enum PeerSyncStatus { + /// At the current state as our node or ahead of us. + Synced { info: SyncInfo }, + /// The peer has greater knowledge about the canonical chain than we do. + Advanced { info: SyncInfo }, + /// Is behind our current head and not useful for block downloads. + Behind { info: SyncInfo }, + /// Not currently known as a STATUS handshake has not occurred. + Unknown, +} + +/// This is stored inside the PeerSyncStatus and is very similar to `PeerSyncInfo` in the +/// `Network` crate. +#[derive(Clone, Debug, Serialize)] +pub struct SyncInfo { + pub status_head_slot: Slot, + pub status_head_root: Hash256, + pub status_finalized_epoch: Epoch, + pub status_finalized_root: Hash256, +} + +impl PeerSyncStatus { + /// Returns true if the peer has advanced knowledge of the chain. + pub fn is_advanced(&self) -> bool { + match self { + PeerSyncStatus::Advanced { .. } => true, + _ => false, + } + } + + /// Returns true if the peer is up to date with the current chain. + pub fn is_synced(&self) -> bool { + match self { + PeerSyncStatus::Synced { .. } => true, + _ => false, + } + } + + /// Returns true if the peer is behind the current chain. + pub fn is_behind(&self) -> bool { + match self { + PeerSyncStatus::Behind { .. } => true, + _ => false, + } + } + + /// Updates the sync state given a fully synced peer. + /// Returns true if the state has changed. + pub fn update_synced(&mut self, info: SyncInfo) -> bool { + let new_state = PeerSyncStatus::Synced { info }; + + match self { + PeerSyncStatus::Synced { .. } => { + *self = new_state; + false // state was not updated + } + _ => { + *self = new_state; + true + } + } + } + + /// Updates the sync state given a peer that is further ahead in the chain than us. + /// Returns true if the state has changed. + pub fn update_ahead(&mut self, info: SyncInfo) -> bool { + let new_state = PeerSyncStatus::Advanced { info }; + + match self { + PeerSyncStatus::Advanced { .. } => { + *self = new_state; + false // state was not updated + } + _ => { + *self = new_state; + true + } + } + } + + /// Updates the sync state given a peer that is behind us in the chain. + /// Returns true if the state has changed. + pub fn update_behind(&mut self, info: SyncInfo) -> bool { + let new_state = PeerSyncStatus::Behind { info }; + + match self { + PeerSyncStatus::Behind { .. } => { + *self = new_state; + false // state was not updated + } + _ => { + *self = new_state; + true + } + } + } +} diff --git a/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs b/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs new file mode 100644 index 000000000..066fa3736 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/peer_manager/peerdb.rs @@ -0,0 +1,499 @@ +use super::peer_info::{PeerConnectionStatus, PeerInfo}; +use super::peer_sync_status::PeerSyncStatus; +use crate::rpc::methods::MetaData; +use crate::PeerId; +use slog::{crit, warn}; +use std::collections::HashMap; +use std::time::Instant; +use types::{EthSpec, SubnetId}; + +/// A peer's reputation. +pub type Rep = i32; + +/// Max number of disconnected nodes to remember +const MAX_DC_PEERS: usize = 30; +/// The default starting reputation for an unknown peer. +pub const DEFAULT_REPUTATION: Rep = 50; + +/// Storage of known peers, their reputation and information +pub struct PeerDB { + /// The collection of known connected peers, their status and reputation + peers: HashMap>, + /// Tracking of number of disconnected nodes + n_dc: usize, + /// PeerDB's logger + log: slog::Logger, +} + +impl PeerDB { + pub fn new(log: &slog::Logger) -> Self { + Self { + log: log.clone(), + n_dc: 0, + peers: HashMap::new(), + } + } + + /* Getters */ + + /// Gives the reputation of a peer, or DEFAULT_REPUTATION if it is unknown. + pub fn reputation(&self, peer_id: &PeerId) -> Rep { + self.peers + .get(peer_id) + .map_or(DEFAULT_REPUTATION, |info| info.reputation) + } + + /// Returns an iterator over all peers in the db. + pub fn peers(&self) -> impl Iterator)> { + self.peers.iter() + } + + /// Gives the ids of all known peers. + pub fn peer_ids(&self) -> impl Iterator { + self.peers.keys() + } + + /// Returns a peer's info, if known. + pub fn peer_info(&self, peer_id: &PeerId) -> Option<&PeerInfo> { + self.peers.get(peer_id) + } + + /// Returns a mutable reference to a peer's info if known. + pub fn peer_info_mut(&mut self, peer_id: &PeerId) -> Option<&mut PeerInfo> { + self.peers.get_mut(peer_id) + } + + /// Returns true if the peer is synced at least to our current head. + pub fn peer_synced(&self, peer_id: &PeerId) -> bool { + match self.peers.get(peer_id).map(|info| &info.sync_status) { + Some(PeerSyncStatus::Synced { .. }) => true, + Some(_) => false, + None => false, + } + } + + /// Gives the ids of all known connected peers. + pub fn connected_peers(&self) -> impl Iterator)> { + self.peers + .iter() + .filter(|(_, info)| info.connection_status.is_connected()) + } + + /// Gives the ids of all known connected peers. + pub fn connected_peer_ids(&self) -> impl Iterator { + self.peers + .iter() + .filter(|(_, info)| info.connection_status.is_connected()) + .map(|(peer_id, _)| peer_id) + } + + /// Connected or dialing peers + pub fn connected_or_dialing_peers(&self) -> impl Iterator { + self.peers + .iter() + .filter(|(_, info)| { + info.connection_status.is_connected() || info.connection_status.is_dialing() + }) + .map(|(peer_id, _)| peer_id) + } + + /// Gives the `peer_id` of all known connected and synced peers. + pub fn synced_peers(&self) -> impl Iterator { + self.peers + .iter() + .filter(|(_, info)| { + if info.sync_status.is_synced() || info.sync_status.is_advanced() { + return info.connection_status.is_connected(); + } + false + }) + .map(|(peer_id, _)| peer_id) + } + + /// Gives an iterator of all peers on a given subnet. + pub fn peers_on_subnet(&self, subnet_id: &SubnetId) -> impl Iterator { + let subnet_id_filter = subnet_id.clone(); + self.peers + .iter() + .filter(move |(_, info)| { + info.connection_status.is_connected() && info.on_subnet(subnet_id_filter) + }) + .map(|(peer_id, _)| peer_id) + } + + /// Gives the ids of all known disconnected peers. + pub fn disconnected_peers(&self) -> impl Iterator { + self.peers + .iter() + .filter(|(_, info)| info.connection_status.is_disconnected()) + .map(|(peer_id, _)| peer_id) + } + + /// Gives the ids of all known banned peers. + pub fn banned_peers(&self) -> impl Iterator { + self.peers + .iter() + .filter(|(_, info)| info.connection_status.is_banned()) + .map(|(peer_id, _)| peer_id) + } + + /// Returns a vector containing peers (their ids and info), sorted by + /// reputation from highest to lowest, and filtered using `is_status` + pub fn best_peers_by_status(&self, is_status: F) -> Vec<(&PeerId, &PeerInfo)> + where + F: Fn(&PeerConnectionStatus) -> bool, + { + let mut by_status = self + .peers + .iter() + .filter(|(_, info)| is_status(&info.connection_status)) + .collect::>(); + by_status.sort_by_key(|(_, info)| Rep::max_value() - info.reputation); + by_status + } + + /// Returns the peer with highest reputation that satisfies `is_status` + pub fn best_by_status(&self, is_status: F) -> Option<&PeerId> + where + F: Fn(&PeerConnectionStatus) -> bool, + { + self.peers + .iter() + .filter(|(_, info)| is_status(&info.connection_status)) + .max_by_key(|(_, info)| info.reputation) + .map(|(id, _)| id) + } + + /// Returns the peer's connection status. Returns unknown if the peer is not in the DB. + pub fn connection_status(&self, peer_id: &PeerId) -> Option { + self.peer_info(peer_id) + .map(|info| info.connection_status.clone()) + } + + /// Returns if the peer is already connected. + pub fn is_connected(&self, peer_id: &PeerId) -> bool { + if let Some(PeerConnectionStatus::Connected { .. }) = self.connection_status(peer_id) { + true + } else { + false + } + } + + /// If we are connected or currently dialing the peer returns true. + pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool { + match self.connection_status(peer_id) { + Some(PeerConnectionStatus::Connected { .. }) + | Some(PeerConnectionStatus::Dialing { .. }) => true, + _ => false, + } + } + + /* Setters */ + + /// A peer is being dialed. + pub fn dialing_peer(&mut self, peer_id: &PeerId) { + let info = self + .peers + .entry(peer_id.clone()) + .or_insert_with(|| Default::default()); + + if info.connection_status.is_disconnected() { + self.n_dc -= 1; + } + info.connection_status = PeerConnectionStatus::Dialing { + since: Instant::now(), + }; + } + + /// Sets a peer as connected with an ingoing connection. + pub fn connect_ingoing(&mut self, peer_id: &PeerId) { + let info = self + .peers + .entry(peer_id.clone()) + .or_insert_with(|| Default::default()); + + if info.connection_status.is_disconnected() { + self.n_dc -= 1; + } + info.connection_status.connect_ingoing(); + } + + /// Sets a peer as connected with an outgoing connection. + pub fn connect_outgoing(&mut self, peer_id: &PeerId) { + let info = self + .peers + .entry(peer_id.clone()) + .or_insert_with(|| Default::default()); + + if info.connection_status.is_disconnected() { + self.n_dc -= 1; + } + info.connection_status.connect_outgoing(); + } + + /// Sets the peer as disconnected. + pub fn disconnect(&mut self, peer_id: &PeerId) { + let log_ref = &self.log; + let info = self.peers.entry(peer_id.clone()).or_insert_with(|| { + warn!(log_ref, "Disconnecting unknown peer"; + "peer_id" => format!("{:?}",peer_id)); + PeerInfo::default() + }); + + if !info.connection_status.is_disconnected() { + info.connection_status.disconnect(); + self.n_dc += 1; + } + self.shrink_to_fit(); + } + + /// Drops the peers with the lowest reputation so that the number of + /// disconnected peers is less than MAX_DC_PEERS + pub fn shrink_to_fit(&mut self) { + // for caution, but the difference should never be > 1 + while self.n_dc > MAX_DC_PEERS { + let to_drop = self + .peers + .iter() + .filter(|(_, info)| info.connection_status.is_disconnected()) + .min_by_key(|(_, info)| info.reputation) + .map(|(id, _)| id.clone()) + .unwrap(); // should be safe since n_dc > MAX_DC_PEERS > 0 + self.peers.remove(&to_drop); + self.n_dc -= 1; + } + } + + /// Sets a peer as banned + pub fn ban(&mut self, peer_id: &PeerId) { + let log_ref = &self.log; + let info = self.peers.entry(peer_id.clone()).or_insert_with(|| { + warn!(log_ref, "Banning unknown peer"; + "peer_id" => format!("{:?}",peer_id)); + PeerInfo::default() + }); + if info.connection_status.is_disconnected() { + self.n_dc -= 1; + } + info.connection_status.ban(); + } + + /// Add the meta data of a peer. + pub fn add_metadata(&mut self, peer_id: &PeerId, meta_data: MetaData) { + if let Some(peer_info) = self.peers.get_mut(peer_id) { + peer_info.meta_data = Some(meta_data); + } else { + warn!(self.log, "Tried to add meta data for a non-existant peer"; "peer_id" => format!("{}", peer_id)); + } + } + + /// Sets the reputation of peer. + pub fn set_reputation(&mut self, peer_id: &PeerId, rep: Rep) { + if let Some(peer_info) = self.peers.get_mut(peer_id) { + peer_info.reputation = rep; + } else { + crit!(self.log, "Tried to modify reputation for an unknown peer"; "peer_id" => format!("{}",peer_id)); + } + } + + /// Sets the syncing status of a peer. + pub fn set_sync_status(&mut self, peer_id: &PeerId, sync_status: PeerSyncStatus) { + if let Some(peer_info) = self.peers.get_mut(peer_id) { + peer_info.sync_status = sync_status; + } else { + crit!(self.log, "Tried to the sync status for an unknown peer"; "peer_id" => format!("{}",peer_id)); + } + } + + /// Adds to a peer's reputation by `change`. If the reputation exceeds Rep's + /// upper (lower) bounds, it stays at the maximum (minimum) value. + pub fn add_reputation(&mut self, peer_id: &PeerId, change: Rep) { + let log_ref = &self.log; + let info = self.peers.entry(peer_id.clone()).or_insert_with(|| { + warn!(log_ref, "Adding to the reputation of an unknown peer"; + "peer_id" => format!("{:?}",peer_id)); + PeerInfo::default() + }); + info.reputation = info.reputation.saturating_add(change); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use slog::{o, Drain}; + use types::MinimalEthSpec; + type M = MinimalEthSpec; + + pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { + let decorator = slog_term::TermDecorator::new().build(); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain).build().fuse(); + + if enabled { + slog::Logger::root(drain.filter_level(level).fuse(), o!()) + } else { + slog::Logger::root(drain.filter(|_| false).fuse(), o!()) + } + } + + fn get_db() -> PeerDB { + let log = build_log(slog::Level::Debug, true); + PeerDB::new(&log) + } + + #[test] + fn test_peer_connected_successfully() { + let mut pdb = get_db(); + let random_peer = PeerId::random(); + + let (n_in, n_out) = (10, 20); + for _ in 0..n_in { + pdb.connect_ingoing(&random_peer); + } + for _ in 0..n_out { + pdb.connect_outgoing(&random_peer); + } + + // the peer is known + let peer_info = pdb.peer_info(&random_peer); + assert!(peer_info.is_some()); + // this is the only peer + assert_eq!(pdb.peers().count(), 1); + // the peer has the default reputation + assert_eq!(pdb.reputation(&random_peer), DEFAULT_REPUTATION); + // it should be connected, and therefore not counted as disconnected + assert_eq!(pdb.n_dc, 0); + assert!(peer_info.unwrap().connection_status.is_connected()); + assert_eq!( + peer_info.unwrap().connection_status.connections(), + (n_in, n_out) + ); + } + + #[test] + fn test_set_reputation() { + let mut pdb = get_db(); + let random_peer = PeerId::random(); + pdb.connect_ingoing(&random_peer); + + let mut rep = Rep::min_value(); + pdb.set_reputation(&random_peer, rep); + assert_eq!(pdb.reputation(&random_peer), rep); + + rep = Rep::max_value(); + pdb.set_reputation(&random_peer, rep); + assert_eq!(pdb.reputation(&random_peer), rep); + + rep = Rep::max_value() / 100; + pdb.set_reputation(&random_peer, rep); + assert_eq!(pdb.reputation(&random_peer), rep); + } + + #[test] + fn test_reputation_change() { + let mut pdb = get_db(); + + // 0 change does not change de reputation + let random_peer = PeerId::random(); + let change: Rep = 0; + pdb.connect_ingoing(&random_peer); + pdb.add_reputation(&random_peer, change); + assert_eq!(pdb.reputation(&random_peer), DEFAULT_REPUTATION); + + // overflowing change is capped + let random_peer = PeerId::random(); + let change = Rep::max_value(); + pdb.connect_ingoing(&random_peer); + pdb.add_reputation(&random_peer, change); + assert_eq!(pdb.reputation(&random_peer), Rep::max_value()); + } + + #[test] + fn test_disconnected_are_bounded() { + let mut pdb = get_db(); + + for _ in 0..MAX_DC_PEERS + 1 { + let p = PeerId::random(); + pdb.connect_ingoing(&p); + } + assert_eq!(pdb.n_dc, 0); + + for p in pdb.connected_peer_ids().cloned().collect::>() { + pdb.disconnect(&p); + } + + assert_eq!(pdb.n_dc, MAX_DC_PEERS); + } + + #[test] + fn test_best_peers() { + let mut pdb = get_db(); + + let p0 = PeerId::random(); + let p1 = PeerId::random(); + let p2 = PeerId::random(); + pdb.connect_ingoing(&p0); + pdb.connect_ingoing(&p1); + pdb.connect_ingoing(&p2); + pdb.set_reputation(&p0, 70); + pdb.set_reputation(&p1, 100); + pdb.set_reputation(&p2, 50); + + let best_peers = pdb.best_peers_by_status(PeerConnectionStatus::is_connected); + assert!(vec![&p1, &p0, &p2] + .into_iter() + .eq(best_peers.into_iter().map(|p| p.0))); + } + + #[test] + fn test_the_best_peer() { + let mut pdb = get_db(); + + let p0 = PeerId::random(); + let p1 = PeerId::random(); + let p2 = PeerId::random(); + pdb.connect_ingoing(&p0); + pdb.connect_ingoing(&p1); + pdb.connect_ingoing(&p2); + pdb.set_reputation(&p0, 70); + pdb.set_reputation(&p1, 100); + pdb.set_reputation(&p2, 50); + + let the_best = pdb.best_by_status(PeerConnectionStatus::is_connected); + assert!(the_best.is_some()); + // Consistency check + let best_peers = pdb.best_peers_by_status(PeerConnectionStatus::is_connected); + assert_eq!(the_best, best_peers.into_iter().map(|p| p.0).next()); + } + + #[test] + fn test_disconnected_consistency() { + let mut pdb = get_db(); + + let random_peer = PeerId::random(); + + pdb.connect_ingoing(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + + pdb.connect_ingoing(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + pdb.disconnect(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + + pdb.connect_outgoing(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + pdb.disconnect(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + + pdb.ban(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + pdb.disconnect(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + + pdb.disconnect(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + pdb.disconnect(&random_peer); + assert_eq!(pdb.n_dc, pdb.disconnected_peers().count()); + } +} diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs index cc247911f..48b537c7b 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs @@ -3,7 +3,9 @@ use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse}; use libp2p::bytes::BufMut; use libp2p::bytes::BytesMut; +use std::marker::PhantomData; use tokio::codec::{Decoder, Encoder}; +use types::EthSpec; pub trait OutboundCodec: Encoder + Decoder { type ErrorType; @@ -17,43 +19,53 @@ pub trait OutboundCodec: Encoder + Decoder { /* Global Inbound Codec */ // This deals with Decoding RPC Requests from other peers and encoding our responses -pub struct BaseInboundCodec +pub struct BaseInboundCodec where TCodec: Encoder + Decoder, + TSpec: EthSpec, { /// Inner codec for handling various encodings inner: TCodec, + phantom: PhantomData, } -impl BaseInboundCodec +impl BaseInboundCodec where TCodec: Encoder + Decoder, + TSpec: EthSpec, { pub fn new(codec: TCodec) -> Self { - BaseInboundCodec { inner: codec } + BaseInboundCodec { + inner: codec, + phantom: PhantomData, + } } } /* Global Outbound Codec */ // This deals with Decoding RPC Responses from other peers and encoding our requests -pub struct BaseOutboundCodec +pub struct BaseOutboundCodec where TOutboundCodec: OutboundCodec, + TSpec: EthSpec, { /// Inner codec for handling various encodings. inner: TOutboundCodec, /// Keeps track of the current response code for a chunk. current_response_code: Option, + phantom: PhantomData, } -impl BaseOutboundCodec +impl BaseOutboundCodec where + TSpec: EthSpec, TOutboundCodec: OutboundCodec, { pub fn new(codec: TOutboundCodec) -> Self { BaseOutboundCodec { inner: codec, current_response_code: None, + phantom: PhantomData, } } } @@ -63,11 +75,12 @@ where /* Base Inbound Codec */ // This Encodes RPC Responses sent to external peers -impl Encoder for BaseInboundCodec +impl Encoder for BaseInboundCodec where - TCodec: Decoder + Encoder, + TSpec: EthSpec, + TCodec: Decoder + Encoder>, { - type Item = RPCErrorResponse; + type Item = RPCErrorResponse; type Error = ::Error; fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { @@ -82,11 +95,12 @@ where } // This Decodes RPC Requests from external peers -impl Decoder for BaseInboundCodec +impl Decoder for BaseInboundCodec where - TCodec: Encoder + Decoder, + TSpec: EthSpec, + TCodec: Encoder + Decoder>, { - type Item = RPCRequest; + type Item = RPCRequest; type Error = ::Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -97,11 +111,12 @@ where /* Base Outbound Codec */ // This Encodes RPC Requests sent to external peers -impl Encoder for BaseOutboundCodec +impl Encoder for BaseOutboundCodec where - TCodec: OutboundCodec + Encoder, + TSpec: EthSpec, + TCodec: OutboundCodec + Encoder>, { - type Item = RPCRequest; + type Item = RPCRequest; type Error = ::Error; fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { @@ -110,11 +125,12 @@ where } // This decodes RPC Responses received from external peers -impl Decoder for BaseOutboundCodec +impl Decoder for BaseOutboundCodec where - TCodec: OutboundCodec + Decoder, + TSpec: EthSpec, + TCodec: OutboundCodec + Decoder>, { - type Item = RPCErrorResponse; + type Item = RPCErrorResponse; type Error = ::Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -130,7 +146,7 @@ where }); let inner_result = { - if RPCErrorResponse::is_response(response_code) { + if RPCErrorResponse::::is_response(response_code) { // decode an actual response and mutates the buffer if enough bytes have been read // returning the result. self.inner diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs b/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs index b9993d4b3..f1b7f74da 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/mod.rs @@ -1,62 +1,71 @@ pub(crate) mod base; pub(crate) mod ssz; +pub(crate) mod ssz_snappy; use self::base::{BaseInboundCodec, BaseOutboundCodec}; use self::ssz::{SSZInboundCodec, SSZOutboundCodec}; +use self::ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec}; use crate::rpc::protocol::RPCError; use crate::rpc::{RPCErrorResponse, RPCRequest}; use libp2p::bytes::BytesMut; use tokio::codec::{Decoder, Encoder}; +use types::EthSpec; // Known types of codecs -pub enum InboundCodec { - SSZ(BaseInboundCodec), +pub enum InboundCodec { + SSZSnappy(BaseInboundCodec, TSpec>), + SSZ(BaseInboundCodec, TSpec>), } -pub enum OutboundCodec { - SSZ(BaseOutboundCodec), +pub enum OutboundCodec { + SSZSnappy(BaseOutboundCodec, TSpec>), + SSZ(BaseOutboundCodec, TSpec>), } -impl Encoder for InboundCodec { - type Item = RPCErrorResponse; +impl Encoder for InboundCodec { + type Item = RPCErrorResponse; type Error = RPCError; fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { match self { InboundCodec::SSZ(codec) => codec.encode(item, dst), + InboundCodec::SSZSnappy(codec) => codec.encode(item, dst), } } } -impl Decoder for InboundCodec { - type Item = RPCRequest; +impl Decoder for InboundCodec { + type Item = RPCRequest; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match self { InboundCodec::SSZ(codec) => codec.decode(src), + InboundCodec::SSZSnappy(codec) => codec.decode(src), } } } -impl Encoder for OutboundCodec { - type Item = RPCRequest; +impl Encoder for OutboundCodec { + type Item = RPCRequest; type Error = RPCError; fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { match self { OutboundCodec::SSZ(codec) => codec.encode(item, dst), + OutboundCodec::SSZSnappy(codec) => codec.encode(item, dst), } } } -impl Decoder for OutboundCodec { - type Item = RPCErrorResponse; +impl Decoder for OutboundCodec { + type Item = RPCErrorResponse; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match self { OutboundCodec::SSZ(codec) => codec.decode(src), + OutboundCodec::SSZSnappy(codec) => codec.decode(src), } } } diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index b377f5972..bd8cbfd9f 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -1,52 +1,54 @@ use crate::rpc::methods::*; use crate::rpc::{ codec::base::OutboundCodec, - protocol::{ - ProtocolId, RPCError, RPC_BLOCKS_BY_RANGE, RPC_BLOCKS_BY_ROOT, RPC_GOODBYE, RPC_STATUS, - }, + protocol::{Encoding, Protocol, ProtocolId, RPCError, Version}, }; use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse}; use libp2p::bytes::{BufMut, Bytes, BytesMut}; use ssz::{Decode, Encode}; +use std::marker::PhantomData; use tokio::codec::{Decoder, Encoder}; +use types::{EthSpec, SignedBeaconBlock}; use unsigned_varint::codec::UviBytes; /* Inbound Codec */ -pub struct SSZInboundCodec { +pub struct SSZInboundCodec { inner: UviBytes, protocol: ProtocolId, + phantom: PhantomData, } -impl SSZInboundCodec { +impl SSZInboundCodec { pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { let mut uvi_codec = UviBytes::default(); uvi_codec.set_max_len(max_packet_size); // this encoding only applies to ssz. - debug_assert!(protocol.encoding.as_str() == "ssz"); + debug_assert_eq!(protocol.encoding, Encoding::SSZ); SSZInboundCodec { inner: uvi_codec, protocol, + phantom: PhantomData, } } } // Encoder for inbound streams: Encodes RPC Responses sent to peers. -impl Encoder for SSZInboundCodec { - type Item = RPCErrorResponse; +impl Encoder for SSZInboundCodec { + type Item = RPCErrorResponse; type Error = RPCError; fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { let bytes = match item { - RPCErrorResponse::Success(resp) => { - match resp { - RPCResponse::Status(res) => res.as_ssz_bytes(), - RPCResponse::BlocksByRange(res) => res, // already raw bytes - RPCResponse::BlocksByRoot(res) => res, // already raw bytes - } - } + RPCErrorResponse::Success(resp) => match resp { + RPCResponse::Status(res) => res.as_ssz_bytes(), + RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), + RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::Pong(res) => res.data.as_ssz_bytes(), + RPCResponse::MetaData(res) => res.as_ssz_bytes(), + }, RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(), RPCErrorResponse::ServerError(err) => err.as_ssz_bytes(), RPCErrorResponse::Unknown(err) => err.as_ssz_bytes(), @@ -70,38 +72,49 @@ impl Encoder for SSZInboundCodec { } // Decoder for inbound streams: Decodes RPC requests from peers -impl Decoder for SSZInboundCodec { - type Item = RPCRequest; +impl Decoder for SSZInboundCodec { + type Item = RPCRequest; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match self.inner.decode(src).map_err(RPCError::from) { - Ok(Some(packet)) => match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( + Ok(Some(packet)) => match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( &packet, )?))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_GOODBYE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( + Protocol::Goodbye => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( &packet, )?))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::BlocksByRange( + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRange( BlocksByRangeRequest::from_ssz_bytes(&packet)?, ))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest { + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest { block_roots: Vec::from_ssz_bytes(&packet)?, }))), - _ => unreachable!("Cannot negotiate an unknown version"), }, - _ => unreachable!("Cannot negotiate an unknown protocol"), + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Ping(Ping { + data: u64::from_ssz_bytes(&packet)?, + }))), + }, + Protocol::MetaData => match self.protocol.version { + Version::V1 => { + if packet.len() > 0 { + Err(RPCError::Custom( + "Get metadata request should be empty".into(), + )) + } else { + Ok(Some(RPCRequest::MetaData(PhantomData))) + } + } + }, }, Ok(None) => Ok(None), Err(e) => Err(e), @@ -111,29 +124,31 @@ impl Decoder for SSZInboundCodec { /* Outbound Codec: Codec for initiating RPC requests */ -pub struct SSZOutboundCodec { +pub struct SSZOutboundCodec { inner: UviBytes, protocol: ProtocolId, + phantom: PhantomData, } -impl SSZOutboundCodec { +impl SSZOutboundCodec { pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { let mut uvi_codec = UviBytes::default(); uvi_codec.set_max_len(max_packet_size); // this encoding only applies to ssz. - debug_assert!(protocol.encoding.as_str() == "ssz"); + debug_assert_eq!(protocol.encoding, Encoding::SSZ); SSZOutboundCodec { inner: uvi_codec, protocol, + phantom: PhantomData, } } } // Encoder for outbound streams: Encodes RPC Requests to peers -impl Encoder for SSZOutboundCodec { - type Item = RPCRequest; +impl Encoder for SSZOutboundCodec { + type Item = RPCRequest; type Error = RPCError; fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { @@ -142,6 +157,8 @@ impl Encoder for SSZOutboundCodec { RPCRequest::Goodbye(req) => req.as_ssz_bytes(), RPCRequest::BlocksByRange(req) => req.as_ssz_bytes(), RPCRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), + RPCRequest::Ping(req) => req.as_ssz_bytes(), + RPCRequest::MetaData(_) => return Ok(()), // no metadata to encode }; // length-prefix self.inner @@ -155,8 +172,8 @@ impl Encoder for SSZOutboundCodec { // The majority of the decoding has now been pushed upstream due to the changing specification. // We prefer to decode blocks and attestations with extra knowledge about the chain to perform // faster verification checks before decoding entire blocks/attestations. -impl Decoder for SSZOutboundCodec { - type Item = RPCResponse; +impl Decoder for SSZOutboundCodec { + type Item = RPCResponse; type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -164,23 +181,35 @@ impl Decoder for SSZOutboundCodec { // the object is empty. We return the empty object if this is the case // clear the buffer and return an empty object src.clear(); - match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Err(RPCError::Custom( + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( "Status stream terminated unexpectedly".into(), )), // cannot have an empty HELLO message. The stream has terminated unexpectedly - _ => unreachable!("Cannot negotiate an unknown version"), }, - RPC_GOODBYE => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRange(Vec::new()))), - _ => unreachable!("Cannot negotiate an unknown version"), + Protocol::Goodbye => { + Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")) + } + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( + "Status stream terminated unexpectedly, empty block".into(), + )), // cannot have an empty block message. }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRoot(Vec::new()))), - _ => unreachable!("Cannot negotiate an unknown version"), + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( + "Status stream terminated unexpectedly, empty block".into(), + )), // cannot have an empty block message. + }, + Protocol::Ping => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( + "PING stream terminated unexpectedly".into(), + )), // cannot have an empty block message. + }, + Protocol::MetaData => match self.protocol.version { + Version::V1 => Err(RPCError::Custom( + "Metadata stream terminated unexpectedly".into(), + )), // cannot have an empty block message. }, - _ => unreachable!("Cannot negotiate an unknown protocol"), } } else { match self.inner.decode(src).map_err(RPCError::from) { @@ -188,25 +217,35 @@ impl Decoder for SSZOutboundCodec { // take the bytes from the buffer let raw_bytes = packet.take(); - match self.protocol.message_name.as_str() { - RPC_STATUS => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes( - &raw_bytes, - )?))), - _ => unreachable!("Cannot negotiate an unknown version"), + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Status( + StatusMessage::from_ssz_bytes(&raw_bytes)?, + ))), }, - RPC_GOODBYE => { + Protocol::Goodbye => { Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")) } - RPC_BLOCKS_BY_RANGE => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRange(raw_bytes.to_vec()))), - _ => unreachable!("Cannot negotiate an unknown version"), + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?, + )))), }, - RPC_BLOCKS_BY_ROOT => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BlocksByRoot(raw_bytes.to_vec()))), - _ => unreachable!("Cannot negotiate an unknown version"), + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::from_ssz_bytes(&raw_bytes)?, + )))), + }, + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Pong(Ping { + data: u64::from_ssz_bytes(&raw_bytes)?, + }))), + }, + Protocol::MetaData => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::MetaData( + MetaData::from_ssz_bytes(&raw_bytes)?, + ))), }, - _ => unreachable!("Cannot negotiate an unknown protocol"), } } Ok(None) => Ok(None), // waiting for more bytes @@ -216,7 +255,7 @@ impl Decoder for SSZOutboundCodec { } } -impl OutboundCodec for SSZOutboundCodec { +impl OutboundCodec for SSZOutboundCodec { type ErrorType = ErrorMessage; fn decode_error(&mut self, src: &mut BytesMut) -> Result, RPCError> { diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs new file mode 100644 index 000000000..e2f0db1ff --- /dev/null +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz_snappy.rs @@ -0,0 +1,357 @@ +use crate::rpc::methods::*; +use crate::rpc::{ + codec::base::OutboundCodec, + protocol::{Encoding, Protocol, ProtocolId, RPCError, Version}, +}; +use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse}; +use libp2p::bytes::BytesMut; +use snap::read::FrameDecoder; +use snap::write::FrameEncoder; +use ssz::{Decode, Encode}; +use std::io::Cursor; +use std::io::ErrorKind; +use std::io::{Read, Write}; +use std::marker::PhantomData; +use tokio::codec::{Decoder, Encoder}; +use types::{EthSpec, SignedBeaconBlock}; +use unsigned_varint::codec::Uvi; + +/* Inbound Codec */ + +pub struct SSZSnappyInboundCodec { + protocol: ProtocolId, + inner: Uvi, + len: Option, + /// Maximum bytes that can be sent in one req/resp chunked responses. + max_packet_size: usize, + phantom: PhantomData, +} + +impl SSZSnappyInboundCodec { + pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { + let uvi_codec = Uvi::default(); + // this encoding only applies to ssz_snappy. + debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); + + SSZSnappyInboundCodec { + inner: uvi_codec, + protocol, + len: None, + phantom: PhantomData, + max_packet_size, + } + } +} + +// Encoder for inbound streams: Encodes RPC Responses sent to peers. +impl Encoder for SSZSnappyInboundCodec { + type Item = RPCErrorResponse; + type Error = RPCError; + + fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + let bytes = match item { + RPCErrorResponse::Success(resp) => match resp { + RPCResponse::Status(res) => res.as_ssz_bytes(), + RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(), + RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), + RPCResponse::Pong(res) => res.data.as_ssz_bytes(), + RPCResponse::MetaData(res) => res.as_ssz_bytes(), + }, + RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(), + RPCErrorResponse::ServerError(err) => err.as_ssz_bytes(), + RPCErrorResponse::Unknown(err) => err.as_ssz_bytes(), + RPCErrorResponse::StreamTermination(_) => { + unreachable!("Code error - attempting to encode a stream termination") + } + }; + // SSZ encoded bytes should be within `max_packet_size` + if bytes.len() > self.max_packet_size { + return Err(RPCError::Custom( + "attempting to encode data > max_packet_size".into(), + )); + } + // Inserts the length prefix of the uncompressed bytes into dst + // encoded as a unsigned varint + self.inner + .encode(bytes.len(), dst) + .map_err(RPCError::from)?; + + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&bytes).map_err(RPCError::from)?; + writer.flush().map_err(RPCError::from)?; + + // Write compressed bytes to `dst` + dst.extend_from_slice(writer.get_ref()); + Ok(()) + } +} + +// Decoder for inbound streams: Decodes RPC requests from peers +impl Decoder for SSZSnappyInboundCodec { + type Item = RPCRequest; + type Error = RPCError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if self.len.is_none() { + // Decode the length of the uncompressed bytes from an unsigned varint + match self.inner.decode(src).map_err(RPCError::from)? { + Some(length) => { + self.len = Some(length); + } + None => return Ok(None), // need more bytes to decode length + } + }; + + let length = self.len.expect("length should be Some"); + + // Should not attempt to decode rpc chunks with length > max_packet_size + if length > self.max_packet_size { + return Err(RPCError::Custom( + "attempting to decode data > max_packet_size".into(), + )); + } + let mut reader = FrameDecoder::new(Cursor::new(&src)); + let mut decoded_buffer = vec![0; length]; + + match reader.read_exact(&mut decoded_buffer) { + Ok(()) => { + // `n` is how many bytes the reader read in the compressed stream + let n = reader.get_ref().position(); + self.len = None; + src.split_to(n as usize); + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Status(StatusMessage::from_ssz_bytes( + &decoded_buffer, + )?))), + }, + Protocol::Goodbye => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Goodbye( + GoodbyeReason::from_ssz_bytes(&decoded_buffer)?, + ))), + }, + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRange( + BlocksByRangeRequest::from_ssz_bytes(&decoded_buffer)?, + ))), + }, + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::BlocksByRoot(BlocksByRootRequest { + block_roots: Vec::from_ssz_bytes(&decoded_buffer)?, + }))), + }, + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCRequest::Ping(Ping::from_ssz_bytes( + &decoded_buffer, + )?))), + }, + Protocol::MetaData => match self.protocol.version { + Version::V1 => { + if decoded_buffer.len() > 0 { + Err(RPCError::Custom( + "Get metadata request should be empty".into(), + )) + } else { + Ok(Some(RPCRequest::MetaData(PhantomData))) + } + } + }, + } + } + Err(e) => match e.kind() { + // Haven't received enough bytes to decode yet + // TODO: check if this is the only Error variant where we return `Ok(None)` + ErrorKind::UnexpectedEof => { + return Ok(None); + } + _ => return Err(e).map_err(RPCError::from), + }, + } + } +} + +/* Outbound Codec: Codec for initiating RPC requests */ +pub struct SSZSnappyOutboundCodec { + inner: Uvi, + len: Option, + protocol: ProtocolId, + /// Maximum bytes that can be sent in one req/resp chunked responses. + max_packet_size: usize, + phantom: PhantomData, +} + +impl SSZSnappyOutboundCodec { + pub fn new(protocol: ProtocolId, max_packet_size: usize) -> Self { + let uvi_codec = Uvi::default(); + // this encoding only applies to ssz_snappy. + debug_assert_eq!(protocol.encoding, Encoding::SSZSnappy); + + SSZSnappyOutboundCodec { + inner: uvi_codec, + protocol, + max_packet_size, + len: None, + phantom: PhantomData, + } + } +} + +// Encoder for outbound streams: Encodes RPC Requests to peers +impl Encoder for SSZSnappyOutboundCodec { + type Item = RPCRequest; + type Error = RPCError; + + fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> { + let bytes = match item { + RPCRequest::Status(req) => req.as_ssz_bytes(), + RPCRequest::Goodbye(req) => req.as_ssz_bytes(), + RPCRequest::BlocksByRange(req) => req.as_ssz_bytes(), + RPCRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(), + RPCRequest::Ping(req) => req.as_ssz_bytes(), + RPCRequest::MetaData(_) => return Ok(()), // no metadata to encode + }; + // SSZ encoded bytes should be within `max_packet_size` + if bytes.len() > self.max_packet_size { + return Err(RPCError::Custom( + "attempting to encode data > max_packet_size".into(), + )); + } + + // Inserts the length prefix of the uncompressed bytes into dst + // encoded as a unsigned varint + self.inner + .encode(bytes.len(), dst) + .map_err(RPCError::from)?; + + let mut writer = FrameEncoder::new(Vec::new()); + writer.write_all(&bytes).map_err(RPCError::from)?; + writer.flush().map_err(RPCError::from)?; + + // Write compressed bytes to `dst` + dst.extend_from_slice(writer.get_ref()); + Ok(()) + } +} + +// Decoder for outbound streams: Decodes RPC responses from peers. +// +// The majority of the decoding has now been pushed upstream due to the changing specification. +// We prefer to decode blocks and attestations with extra knowledge about the chain to perform +// faster verification checks before decoding entire blocks/attestations. +impl Decoder for SSZSnappyOutboundCodec { + type Item = RPCResponse; + type Error = RPCError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if self.len.is_none() { + // Decode the length of the uncompressed bytes from an unsigned varint + match self.inner.decode(src).map_err(RPCError::from)? { + Some(length) => { + self.len = Some(length as usize); + } + None => return Ok(None), // need more bytes to decode length + } + }; + + let length = self.len.expect("length should be Some"); + + // Should not attempt to decode rpc chunks with length > max_packet_size + if length > self.max_packet_size { + return Err(RPCError::Custom( + "attempting to decode data > max_packet_size".into(), + )); + } + let mut reader = FrameDecoder::new(Cursor::new(&src)); + let mut decoded_buffer = vec![0; length]; + match reader.read_exact(&mut decoded_buffer) { + Ok(()) => { + // `n` is how many bytes the reader read in the compressed stream + let n = reader.get_ref().position(); + self.len = None; + src.split_to(n as usize); + match self.protocol.message_name { + Protocol::Status => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Status( + StatusMessage::from_ssz_bytes(&decoded_buffer)?, + ))), + }, + Protocol::Goodbye => { + Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")) + } + Protocol::BlocksByRange => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRange(Box::new( + SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?, + )))), + }, + Protocol::BlocksByRoot => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::BlocksByRoot(Box::new( + SignedBeaconBlock::from_ssz_bytes(&decoded_buffer)?, + )))), + }, + Protocol::Ping => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::Pong(Ping { + data: u64::from_ssz_bytes(&decoded_buffer)?, + }))), + }, + Protocol::MetaData => match self.protocol.version { + Version::V1 => Ok(Some(RPCResponse::MetaData(MetaData::from_ssz_bytes( + &decoded_buffer, + )?))), + }, + } + } + Err(e) => match e.kind() { + // Haven't received enough bytes to decode yet + // TODO: check if this is the only Error variant where we return `Ok(None)` + ErrorKind::UnexpectedEof => { + return Ok(None); + } + _ => return Err(e).map_err(RPCError::from), + }, + } + } +} + +impl OutboundCodec for SSZSnappyOutboundCodec { + type ErrorType = ErrorMessage; + + fn decode_error(&mut self, src: &mut BytesMut) -> Result, RPCError> { + if self.len.is_none() { + // Decode the length of the uncompressed bytes from an unsigned varint + match self.inner.decode(src).map_err(RPCError::from)? { + Some(length) => { + self.len = Some(length as usize); + } + None => return Ok(None), // need more bytes to decode length + } + }; + + let length = self.len.expect("length should be Some"); + + // Should not attempt to decode rpc chunks with length > max_packet_size + if length > self.max_packet_size { + return Err(RPCError::Custom( + "attempting to decode data > max_packet_size".into(), + )); + } + let mut reader = FrameDecoder::new(Cursor::new(&src)); + let mut decoded_buffer = vec![0; length]; + match reader.read_exact(&mut decoded_buffer) { + Ok(()) => { + // `n` is how many bytes the reader read in the compressed stream + let n = reader.get_ref().position(); + self.len = None; + src.split_to(n as usize); + Ok(Some(ErrorMessage::from_ssz_bytes(&decoded_buffer)?)) + } + Err(e) => match e.kind() { + // Haven't received enough bytes to decode yet + // TODO: check if this is the only Error variant where we return `Ok(None)` + ErrorKind::UnexpectedEof => { + return Ok(None); + } + _ => return Err(e).map_err(RPCError::from), + }, + } + } +} diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 72c0379d4..d8ff541c9 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -12,12 +12,13 @@ use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeError}; use libp2p::swarm::protocols_handler::{ KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, }; -use slog::{crit, debug, error, warn}; +use slog::{crit, debug, error, trace, warn}; use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::time::{Duration, Instant}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::timer::{delay_queue, DelayQueue}; +use types::EthSpec; //TODO: Implement close() on the substream types to improve the poll code. //TODO: Implement check_timeout() on the substream types @@ -36,42 +37,50 @@ type InboundRequestId = RequestId; type OutboundRequestId = RequestId; /// Implementation of `ProtocolsHandler` for the RPC protocol. -pub struct RPCHandler +pub struct RPCHandler where TSubstream: AsyncRead + AsyncWrite, + TSpec: EthSpec, { /// The upgrade for inbound substreams. - listen_protocol: SubstreamProtocol, + listen_protocol: SubstreamProtocol>, /// If something bad happened and we should shut down the handler with an error. pending_error: Vec<(RequestId, ProtocolsHandlerUpgrErr)>, /// Queue of events to produce in `poll()`. - events_out: SmallVec<[RPCEvent; 4]>, + events_out: SmallVec<[RPCEvent; 4]>, /// Queue of outbound substreams to open. - dial_queue: SmallVec<[RPCEvent; 4]>, + dial_queue: SmallVec<[RPCEvent; 4]>, /// Current number of concurrent outbound substreams being opened. dial_negotiated: u32, /// Current inbound substreams awaiting processing. - inbound_substreams: - FnvHashMap, Option)>, + inbound_substreams: FnvHashMap< + InboundRequestId, + ( + InboundSubstreamState, + Option, + ), + >, /// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout. inbound_substreams_delay: DelayQueue, /// Map of outbound substreams that need to be driven to completion. The `RequestId` is /// maintained by the application sending the request. - outbound_substreams: - FnvHashMap, delay_queue::Key)>, + outbound_substreams: FnvHashMap< + OutboundRequestId, + (OutboundSubstreamState, delay_queue::Key), + >, /// Inbound substream `DelayQueue` which keeps track of when an inbound substream will timeout. outbound_substreams_delay: DelayQueue, /// Map of outbound items that are queued as the stream processes them. - queued_outbound_items: FnvHashMap>, + queued_outbound_items: FnvHashMap>>, /// Sequential ID for waiting substreams. For inbound substreams, this is also the inbound request ID. current_inbound_substream_id: RequestId, @@ -97,14 +106,15 @@ where } /// State of an outbound substream. Either waiting for a response, or in the process of sending. -pub enum InboundSubstreamState +pub enum InboundSubstreamState where TSubstream: AsyncRead + AsyncWrite, + TSpec: EthSpec, { /// A response has been sent, pending writing and flush. ResponsePendingSend { /// The substream used to send the response - substream: futures::sink::Send>, + substream: futures::sink::Send>, /// Whether a stream termination is requested. If true the stream will be closed after /// this send. Otherwise it will transition to an idle state until a stream termination is /// requested or a timeout is reached. @@ -112,40 +122,41 @@ where }, /// The response stream is idle and awaiting input from the application to send more chunked /// responses. - ResponseIdle(InboundFramed), + ResponseIdle(InboundFramed), /// The substream is attempting to shutdown. - Closing(InboundFramed), + Closing(InboundFramed), /// Temporary state during processing Poisoned, } -pub enum OutboundSubstreamState { +pub enum OutboundSubstreamState { /// A request has been sent, and we are awaiting a response. This future is driven in the /// handler because GOODBYE requests can be handled and responses dropped instantly. RequestPendingResponse { /// The framed negotiated substream. - substream: OutboundFramed, + substream: OutboundFramed, /// Keeps track of the actual request sent. - request: RPCRequest, + request: RPCRequest, }, /// Closing an outbound substream> - Closing(OutboundFramed), + Closing(OutboundFramed), /// Temporary state during processing Poisoned, } -impl InboundSubstreamState +impl InboundSubstreamState where TSubstream: AsyncRead + AsyncWrite, + TSpec: EthSpec, { /// Moves the substream state to closing and informs the connected peer. The /// `queued_outbound_items` must be given as a parameter to add stream termination messages to /// the outbound queue. - pub fn close(&mut self, outbound_queue: &mut Vec) { + pub fn close(&mut self, outbound_queue: &mut Vec>) { // When terminating a stream, report the stream termination to the requesting user via // an RPC error let error = RPCErrorResponse::ServerError(ErrorMessage { - error_message: b"Request timed out".to_vec(), + error_message: "Request timed out".as_bytes().to_vec(), }); // The stream termination type is irrelevant, this will terminate the @@ -163,16 +174,11 @@ where *self = InboundSubstreamState::ResponsePendingSend { substream, closing } } - InboundSubstreamState::ResponseIdle(mut substream) => { - // check if the stream is already closed - if let Ok(Async::Ready(None)) = substream.poll() { - *self = InboundSubstreamState::Closing(substream); - } else { - *self = InboundSubstreamState::ResponsePendingSend { - substream: substream.send(error), - closing: true, - }; - } + InboundSubstreamState::ResponseIdle(substream) => { + *self = InboundSubstreamState::ResponsePendingSend { + substream: substream.send(error), + closing: true, + }; } InboundSubstreamState::Closing(substream) => { // let the stream close @@ -185,12 +191,13 @@ where } } -impl RPCHandler +impl RPCHandler where TSubstream: AsyncRead + AsyncWrite, + TSpec: EthSpec, { pub fn new( - listen_protocol: SubstreamProtocol, + listen_protocol: SubstreamProtocol>, inactive_timeout: Duration, log: &slog::Logger, ) -> Self { @@ -224,7 +231,7 @@ where /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound /// > substreams, not the ones already being negotiated. - pub fn listen_protocol_ref(&self) -> &SubstreamProtocol { + pub fn listen_protocol_ref(&self) -> &SubstreamProtocol> { &self.listen_protocol } @@ -232,29 +239,30 @@ where /// /// > **Note**: If you modify the protocol, modifications will only applies to future inbound /// > substreams, not the ones already being negotiated. - pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol { + pub fn listen_protocol_mut(&mut self) -> &mut SubstreamProtocol> { &mut self.listen_protocol } /// Opens an outbound substream with a request. - pub fn send_request(&mut self, rpc_event: RPCEvent) { + pub fn send_request(&mut self, rpc_event: RPCEvent) { self.keep_alive = KeepAlive::Yes; self.dial_queue.push(rpc_event); } } -impl ProtocolsHandler for RPCHandler +impl ProtocolsHandler for RPCHandler where TSubstream: AsyncRead + AsyncWrite, + TSpec: EthSpec, { - type InEvent = RPCEvent; - type OutEvent = RPCEvent; + type InEvent = RPCEvent; + type OutEvent = RPCEvent; type Error = ProtocolsHandlerUpgrErr; type Substream = TSubstream; - type InboundProtocol = RPCProtocol; - type OutboundProtocol = RPCRequest; - type OutboundOpenInfo = RPCEvent; // Keep track of the id and the request + type InboundProtocol = RPCProtocol; + type OutboundProtocol = RPCRequest; + type OutboundOpenInfo = RPCEvent; // Keep track of the id and the request fn listen_protocol(&self) -> SubstreamProtocol { self.listen_protocol.clone() @@ -262,7 +270,7 @@ where fn inject_fully_negotiated_inbound( &mut self, - out: >::Output, + out: as InboundUpgrade>::Output, ) { // update the keep alive timeout if there are no more remaining outbound streams if let KeepAlive::Until(_) = self.keep_alive { @@ -294,7 +302,7 @@ where fn inject_fully_negotiated_outbound( &mut self, - out: >::Output, + out: as OutboundUpgrade>::Output, rpc_event: Self::OutboundOpenInfo, ) { self.dial_negotiated -= 1; @@ -310,7 +318,22 @@ where // add the stream to substreams if we expect a response, otherwise drop the stream. match rpc_event { - RPCEvent::Request(id, request) if request.expect_response() => { + RPCEvent::Request(mut id, request) if request.expect_response() => { + // outbound requests can be sent from various aspects of lighthouse which don't + // track request ids. In the future these will be flagged as None, currently they + // are flagged as 0. These can overlap. In this case, we pick the highest request + // Id available + if id == 0 && self.outbound_substreams.get(&id).is_some() { + // have duplicate outbound request with no id. Pick one that will not collide + let mut new_id = std::usize::MAX; + while self.outbound_substreams.get(&new_id).is_some() { + // panic all outbound substreams are full + new_id -= 1; + } + trace!(self.log, "New outbound stream id created"; "id" => new_id); + id = RequestId::from(new_id); + } + // new outbound request. Store the stream and tag the output. let delay_key = self .outbound_substreams_delay @@ -323,7 +346,7 @@ where .outbound_substreams .insert(id, (awaiting_stream, delay_key)) { - warn!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", id)); + crit!(self.log, "Duplicate outbound substream id"; "id" => format!("{:?}", id)); } } _ => { // a response is not expected, drop the stream for all other requests @@ -393,7 +416,7 @@ where } } None => { - debug!(self.log, "Stream has expired. Response not sent"; "response" => format!("{}",response)); + warn!(self.log, "Stream has expired. Response not sent"; "response" => format!("{}",response)); } }; } @@ -465,12 +488,14 @@ where } ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) => { // IO/Decode/Custom Error, report to the application + debug!(self.log, "Upgrade Error"; "error" => format!("{}",err)); return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error(request_id, err), ))); } ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => { // Error during negotiation + debug!(self.log, "Upgrade Error"; "error" => format!("{}",err)); return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error(request_id, RPCError::Custom(format!("{}", err))), ))); @@ -488,10 +513,11 @@ where } // purge expired inbound substreams and send an error - while let Async::Ready(Some(stream_id)) = self - .inbound_substreams_delay - .poll() - .map_err(|_| ProtocolsHandlerUpgrErr::Timer)? + while let Async::Ready(Some(stream_id)) = + self.inbound_substreams_delay.poll().map_err(|e| { + warn!(self.log, "Inbound substream poll failed"; "error" => format!("{:?}", e)); + ProtocolsHandlerUpgrErr::Timer + })? { let rpc_id = stream_id.get_ref(); @@ -509,10 +535,11 @@ where } // purge expired outbound substreams - if let Async::Ready(Some(stream_id)) = self - .outbound_substreams_delay - .poll() - .map_err(|_| ProtocolsHandlerUpgrErr::Timer)? + if let Async::Ready(Some(stream_id)) = + self.outbound_substreams_delay.poll().map_err(|e| { + warn!(self.log, "Outbound substream poll failed"; "error" => format!("{:?}", e)); + ProtocolsHandlerUpgrErr::Timer + })? { self.outbound_substreams.remove(stream_id.get_ref()); // notify the user @@ -748,11 +775,11 @@ where } // Check for new items to send to the peer and update the underlying stream -fn apply_queued_responses( - raw_substream: InboundFramed, - queued_outbound_items: &mut Option<&mut Vec>, +fn apply_queued_responses( + raw_substream: InboundFramed, + queued_outbound_items: &mut Option<&mut Vec>>, new_items_to_send: &mut bool, -) -> InboundSubstreamState { +) -> InboundSubstreamState { match queued_outbound_items { Some(ref mut queue) if !queue.is_empty() => { *new_items_to_send = true; diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 0c16e99cc..c9e86d3ec 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -1,7 +1,9 @@ //! Available RPC methods types and ids. +use crate::types::EnrBitfield; +use serde::Serialize; use ssz_derive::{Decode, Encode}; -use types::{Epoch, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /* Request/Response data structures for RPC methods */ @@ -13,7 +15,7 @@ pub type RequestId = usize; #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct StatusMessage { /// The fork version of the chain we are broadcasting. - pub fork_version: [u8; 4], + pub fork_digest: [u8; 4], /// Latest finalized root. pub finalized_root: Hash256, @@ -28,6 +30,23 @@ pub struct StatusMessage { pub head_slot: Slot, } +/// The PING request/response message. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct Ping { + /// The metadata sequence number. + pub data: u64, +} + +/// The METADATA response structure. +#[derive(Encode, Decode, Clone, Debug, PartialEq, Serialize)] +#[serde(bound = "T: EthSpec")] +pub struct MetaData { + /// A sequential counter indicating when data gets modified. + pub seq_number: u64, + /// The persistent subnet bitfield. + pub attnets: EnrBitfield, +} + /// The reason given for a `Goodbye` message. /// /// Note: any unknown `u64::into(n)` will resolve to `Goodbye::Unknown` for any unknown `n`, @@ -101,9 +120,6 @@ impl ssz::Decode for GoodbyeReason { /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct BlocksByRangeRequest { - /// The hash tree root of a block on the requested chain. - pub head_block_root: Hash256, - /// The starting slot to request blocks. pub start_slot: u64, @@ -129,16 +145,22 @@ pub struct BlocksByRootRequest { // Collection of enums and structs used by the Codecs to encode/decode RPC messages #[derive(Debug, Clone, PartialEq)] -pub enum RPCResponse { +pub enum RPCResponse { /// A HELLO message. Status(StatusMessage), /// A response to a get BLOCKS_BY_RANGE request. A None response signifies the end of the /// batch. - BlocksByRange(Vec), + BlocksByRange(Box>), /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Vec), + BlocksByRoot(Box>), + + /// A PONG response to a PING request. + Pong(Ping), + + /// A response to a META_DATA request. + MetaData(MetaData), } /// Indicates which response is being terminated by a stream termination response. @@ -152,9 +174,9 @@ pub enum ResponseTermination { } #[derive(Debug)] -pub enum RPCErrorResponse { +pub enum RPCErrorResponse { /// The response is a successful. - Success(RPCResponse), + Success(RPCResponse), /// The response was invalid. InvalidRequest(ErrorMessage), @@ -169,7 +191,7 @@ pub enum RPCErrorResponse { StreamTermination(ResponseTermination), } -impl RPCErrorResponse { +impl RPCErrorResponse { /// Used to encode the response in the codec. pub fn as_u8(&self) -> Option { match self { @@ -205,6 +227,8 @@ impl RPCErrorResponse { RPCResponse::Status(_) => false, RPCResponse::BlocksByRange(_) => true, RPCResponse::BlocksByRoot(_) => true, + RPCResponse::Pong(_) => false, + RPCResponse::MetaData(_) => false, }, RPCErrorResponse::InvalidRequest(_) => true, RPCErrorResponse::ServerError(_) => true, @@ -238,21 +262,27 @@ impl ErrorMessage { impl std::fmt::Display for StatusMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Status Message: Fork Version: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_version, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot) + write!(f, "Status Message: Fork Digest: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_digest, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot) } } -impl std::fmt::Display for RPCResponse { +impl std::fmt::Display for RPCResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RPCResponse::Status(status) => write!(f, "{}", status), - RPCResponse::BlocksByRange(_) => write!(f, ""), - RPCResponse::BlocksByRoot(_) => write!(f, ""), + RPCResponse::BlocksByRange(block) => { + write!(f, "BlocksByRange: Block slot: {}", block.message.slot) + } + RPCResponse::BlocksByRoot(block) => { + write!(f, "BlocksByRoot: BLock slot: {}", block.message.slot) + } + RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), + RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number), } } } -impl std::fmt::Display for RPCErrorResponse { +impl std::fmt::Display for RPCErrorResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RPCErrorResponse::Success(res) => write!(f, "{}", res), @@ -279,8 +309,8 @@ impl std::fmt::Display for BlocksByRangeRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, - "Head Block Root: {}, Start Slot: {}, Count: {}, Step: {}", - self.head_block_root, self.start_slot, self.count, self.step + "Start Slot: {}, Count: {}, Step: {}", + self.start_slot, self.count, self.step ) } } diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index bfd87b9dd..b3a250818 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -13,13 +13,15 @@ use libp2p::swarm::{ }; use libp2p::{Multiaddr, PeerId}; pub use methods::{ - ErrorMessage, RPCErrorResponse, RPCResponse, RequestId, ResponseTermination, StatusMessage, + ErrorMessage, MetaData, RPCErrorResponse, RPCResponse, RequestId, ResponseTermination, + StatusMessage, }; pub use protocol::{RPCError, RPCProtocol, RPCRequest}; -use slog::o; +use slog::{debug, o}; use std::marker::PhantomData; use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; +use types::EthSpec; pub(crate) mod codec; mod handler; @@ -28,19 +30,19 @@ mod protocol; /// The return type used in the behaviour and the resultant event from the protocols handler. #[derive(Debug)] -pub enum RPCEvent { +pub enum RPCEvent { /// An inbound/outbound request for RPC protocol. The first parameter is a sequential /// id which tracks an awaiting substream for the response. - Request(RequestId, RPCRequest), + Request(RequestId, RPCRequest), /// A response that is being sent or has been received from the RPC protocol. The first parameter returns /// that which was sent with the corresponding request, the second is a single chunk of a /// response. - Response(RequestId, RPCErrorResponse), + Response(RequestId, RPCErrorResponse), /// An Error occurred. Error(RequestId, RPCError), } -impl RPCEvent { +impl RPCEvent { pub fn id(&self) -> usize { match *self { RPCEvent::Request(id, _) => id, @@ -50,7 +52,7 @@ impl RPCEvent { } } -impl std::fmt::Display for RPCEvent { +impl std::fmt::Display for RPCEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RPCEvent::Request(id, req) => write!(f, "RPC Request(id: {}, {})", id, req), @@ -62,16 +64,16 @@ impl std::fmt::Display for RPCEvent { /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. -pub struct RPC { +pub struct RPC { /// Queue of events to processed. - events: Vec>, + events: Vec, RPCMessage>>, /// Pins the generic substream. marker: PhantomData, /// Slog logger for RPC behaviour. log: slog::Logger, } -impl RPC { +impl RPC { pub fn new(log: slog::Logger) -> Self { let log = log.new(o!("service" => "libp2p_rpc")); RPC { @@ -84,7 +86,7 @@ impl RPC { /// Submits an RPC request. /// /// The peer must be connected for this to succeed. - pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { self.events.push(NetworkBehaviourAction::SendEvent { peer_id, event: rpc_event, @@ -92,16 +94,19 @@ impl RPC { } } -impl NetworkBehaviour for RPC +impl NetworkBehaviour for RPC where TSubstream: AsyncRead + AsyncWrite, + TSpec: EthSpec, { - type ProtocolsHandler = RPCHandler; - type OutEvent = RPCMessage; + type ProtocolsHandler = RPCHandler; + type OutEvent = RPCMessage; fn new_handler(&mut self) -> Self::ProtocolsHandler { RPCHandler::new( - SubstreamProtocol::new(RPCProtocol), + SubstreamProtocol::new(RPCProtocol { + phantom: PhantomData, + }), Duration::from_secs(30), &self.log, ) @@ -113,15 +118,33 @@ where } fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { + // TODO: Remove this on proper peer discovery + self.events.push(NetworkBehaviourAction::GenerateEvent( + RPCMessage::PeerConnectedHack(peer_id.clone(), connected_point.clone()), + )); // if initialised the connection, report this upwards to send the HELLO request if let ConnectedPoint::Dialer { .. } = connected_point { self.events.push(NetworkBehaviourAction::GenerateEvent( - RPCMessage::PeerDialed(peer_id), + RPCMessage::PeerDialed(peer_id.clone()), )); } + + // find the peer's meta-data + debug!(self.log, "Requesting new peer's metadata"; "peer_id" => format!("{}",peer_id)); + let rpc_event = + RPCEvent::Request(RequestId::from(0usize), RPCRequest::MetaData(PhantomData)); + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id, + event: rpc_event, + }); } - fn inject_disconnected(&mut self, peer_id: &PeerId, _: ConnectedPoint) { + fn inject_disconnected(&mut self, peer_id: &PeerId, connected_point: ConnectedPoint) { + // TODO: Remove this on proper peer discovery + self.events.push(NetworkBehaviourAction::GenerateEvent( + RPCMessage::PeerDisconnectedHack(peer_id.clone(), connected_point.clone()), + )); + // inform the rpc handler that the peer has disconnected self.events.push(NetworkBehaviourAction::GenerateEvent( RPCMessage::PeerDisconnected(peer_id.clone()), @@ -157,8 +180,12 @@ where } /// Messages sent to the user from the RPC protocol. -pub enum RPCMessage { - RPC(PeerId, RPCEvent), +pub enum RPCMessage { + RPC(PeerId, RPCEvent), PeerDialed(PeerId), PeerDisconnected(PeerId), + // TODO: This is a hack to give access to connections to peer manager. Remove this once + // behaviour is re-written + PeerConnectedHack(PeerId, ConnectedPoint), + PeerDisconnectedHack(PeerId, ConnectedPoint), } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index 003770af9..76567cf46 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -5,26 +5,26 @@ use crate::rpc::{ codec::{ base::{BaseInboundCodec, BaseOutboundCodec}, ssz::{SSZInboundCodec, SSZOutboundCodec}, + ssz_snappy::{SSZSnappyInboundCodec, SSZSnappyOutboundCodec}, InboundCodec, OutboundCodec, }, methods::ResponseTermination, }; -use futures::{ - future::{self, FutureResult}, - sink, stream, Sink, Stream, -}; +use futures::future::*; +use futures::{future, sink, stream, Sink, Stream}; use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; use std::io; +use std::marker::PhantomData; use std::time::Duration; use tokio::codec::Framed; use tokio::io::{AsyncRead, AsyncWrite}; -use tokio::prelude::*; use tokio::timer::timeout; use tokio::util::FutureExt; use tokio_io_timeout::TimeoutStream; +use types::EthSpec; /// The maximum bytes that can be sent across the RPC. -const MAX_RPC_SIZE: usize = 4_194_304; // 4M +const MAX_RPC_SIZE: usize = 1_048_576; // 1M /// The protocol prefix the RPC protocol id. const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). @@ -34,43 +34,108 @@ const TTFB_TIMEOUT: u64 = 5; const REQUEST_TIMEOUT: u64 = 15; /// Protocol names to be used. -/// The Status protocol name. -pub const RPC_STATUS: &str = "status"; -/// The Goodbye protocol name. -pub const RPC_GOODBYE: &str = "goodbye"; -/// The `BlocksByRange` protocol name. -pub const RPC_BLOCKS_BY_RANGE: &str = "beacon_blocks_by_range"; -/// The `BlocksByRoot` protocol name. -pub const RPC_BLOCKS_BY_ROOT: &str = "beacon_blocks_by_root"; +#[derive(Debug, Clone)] +pub enum Protocol { + /// The Status protocol name. + Status, + /// The Goodbye protocol name. + Goodbye, + /// The `BlocksByRange` protocol name. + BlocksByRange, + /// The `BlocksByRoot` protocol name. + BlocksByRoot, + /// The `Ping` protocol name. + Ping, + /// The `MetaData` protocol name. + MetaData, +} + +/// RPC Versions +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Version { + /// Version 1 of RPC + V1, +} + +/// RPC Encondings supported. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Encoding { + SSZ, + SSZSnappy, +} + +impl std::fmt::Display for Protocol { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let repr = match self { + Protocol::Status => "status", + Protocol::Goodbye => "goodbye", + Protocol::BlocksByRange => "beacon_blocks_by_range", + Protocol::BlocksByRoot => "beacon_blocks_by_root", + Protocol::Ping => "ping", + Protocol::MetaData => "metadata", + }; + f.write_str(repr) + } +} + +impl std::fmt::Display for Encoding { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let repr = match self { + Encoding::SSZ => "ssz", + Encoding::SSZSnappy => "ssz_snappy", + }; + f.write_str(repr) + } +} + +impl std::fmt::Display for Version { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let repr = match self { + Version::V1 => "1", + }; + f.write_str(repr) + } +} #[derive(Debug, Clone)] -pub struct RPCProtocol; +pub struct RPCProtocol { + pub phantom: PhantomData, +} -impl UpgradeInfo for RPCProtocol { +impl UpgradeInfo for RPCProtocol { type Info = ProtocolId; type InfoIter = Vec; + /// The list of supported RPC protocols for Lighthouse. fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new(RPC_STATUS, "1", "ssz"), - ProtocolId::new(RPC_GOODBYE, "1", "ssz"), - ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz"), - ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz"), + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZ), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZ), ] } } /// Tracks the types in a protocol id. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct ProtocolId { /// The rpc message type/name. - pub message_name: String, + pub message_name: Protocol, /// The version of the RPC. - pub version: String, + pub version: Version, /// The encoding of the RPC. - pub encoding: String, + pub encoding: Encoding, /// The protocol id that is formed from the above fields. protocol_id: String, @@ -78,16 +143,16 @@ pub struct ProtocolId { /// An RPC protocol ID. impl ProtocolId { - pub fn new(message_name: &str, version: &str, encoding: &str) -> Self { + pub fn new(message_name: Protocol, version: Version, encoding: Encoding) -> Self { let protocol_id = format!( "{}/{}/{}/{}", PROTOCOL_PREFIX, message_name, version, encoding ); ProtocolId { - message_name: message_name.into(), - version: version.into(), - encoding: encoding.into(), + message_name, + version: version, + encoding, protocol_id, } } @@ -104,27 +169,33 @@ impl ProtocolName for ProtocolId { // The inbound protocol reads the request, decodes it and returns the stream to the protocol // handler to respond to once ready. -pub type InboundOutput = (RPCRequest, InboundFramed); -pub type InboundFramed = Framed>, InboundCodec>; -type FnAndThen = fn( - (Option, InboundFramed), -) -> FutureResult, RPCError>; -type FnMapErr = fn(timeout::Error<(RPCError, InboundFramed)>) -> RPCError; +pub type InboundOutput = (RPCRequest, InboundFramed); +pub type InboundFramed = + Framed>, InboundCodec>; +type FnAndThen = fn( + (Option>, InboundFramed), +) -> FutureResult, RPCError>; +type FnMapErr = + fn(timeout::Error<(RPCError, InboundFramed)>) -> RPCError; -impl InboundUpgrade for RPCProtocol +impl InboundUpgrade for RPCProtocol where TSocket: AsyncRead + AsyncWrite, + TSpec: EthSpec, { - type Output = InboundOutput; + type Output = InboundOutput; type Error = RPCError; - type Future = future::AndThen< - future::MapErr< - timeout::Timeout>>, - FnMapErr, + type Future = future::Either< + FutureResult, RPCError>, + future::AndThen< + future::MapErr< + timeout::Timeout>>, + FnMapErr, + >, + FutureResult, RPCError>, + FnAndThen, >, - FutureResult, RPCError>, - FnAndThen, >; fn upgrade_inbound( @@ -132,25 +203,44 @@ where socket: upgrade::Negotiated, protocol: ProtocolId, ) -> Self::Future { - match protocol.encoding.as_str() { - "ssz" | _ => { + let protocol_name = protocol.message_name.clone(); + let codec = match protocol.encoding { + Encoding::SSZSnappy => { + let ssz_snappy_codec = + BaseInboundCodec::new(SSZSnappyInboundCodec::new(protocol, MAX_RPC_SIZE)); + InboundCodec::SSZSnappy(ssz_snappy_codec) + } + Encoding::SSZ => { let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); - let codec = InboundCodec::SSZ(ssz_codec); - let mut timed_socket = TimeoutStream::new(socket); - timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); - Framed::new(timed_socket, codec) + InboundCodec::SSZ(ssz_codec) + } + }; + let mut timed_socket = TimeoutStream::new(socket); + timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); + + let socket = Framed::new(timed_socket, codec); + + // MetaData requests should be empty, return the stream + match protocol_name { + Protocol::MetaData => futures::future::Either::A(futures::future::ok(( + RPCRequest::MetaData(PhantomData), + socket, + ))), + + _ => futures::future::Either::B( + socket .into_future() .timeout(Duration::from_secs(REQUEST_TIMEOUT)) - .map_err(RPCError::from as FnMapErr) + .map_err(RPCError::from as FnMapErr) .and_then({ |(req, stream)| match req { - Some(req) => futures::future::ok((req, stream)), + Some(request) => futures::future::ok((request, stream)), None => futures::future::err(RPCError::Custom( "Stream terminated early".into(), )), } - } as FnAndThen) - } + } as FnAndThen), + ), } } } @@ -161,14 +251,16 @@ where // `OutboundUpgrade` #[derive(Debug, Clone, PartialEq)] -pub enum RPCRequest { +pub enum RPCRequest { Status(StatusMessage), Goodbye(GoodbyeReason), BlocksByRange(BlocksByRangeRequest), BlocksByRoot(BlocksByRootRequest), + Ping(Ping), + MetaData(PhantomData), } -impl UpgradeInfo for RPCRequest { +impl UpgradeInfo for RPCRequest { type Info = ProtocolId; type InfoIter = Vec; @@ -179,14 +271,34 @@ impl UpgradeInfo for RPCRequest { } /// Implements the encoding per supported protocol for RPCRequest. -impl RPCRequest { +impl RPCRequest { pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Status(_) => vec![ProtocolId::new(RPC_STATUS, "1", "ssz")], - RPCRequest::Goodbye(_) => vec![ProtocolId::new(RPC_GOODBYE, "1", "ssz")], - RPCRequest::BlocksByRange(_) => vec![ProtocolId::new(RPC_BLOCKS_BY_RANGE, "1", "ssz")], - RPCRequest::BlocksByRoot(_) => vec![ProtocolId::new(RPC_BLOCKS_BY_ROOT, "1", "ssz")], + RPCRequest::Status(_) => vec![ + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZ), + ], + RPCRequest::Goodbye(_) => vec![ + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZ), + ], + RPCRequest::BlocksByRange(_) => vec![ + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZ), + ], + RPCRequest::BlocksByRoot(_) => vec![ + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZ), + ], + RPCRequest::Ping(_) => vec![ + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZ), + ], + RPCRequest::MetaData(_) => vec![ + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy), + ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZ), + ], } } @@ -200,6 +312,8 @@ impl RPCRequest { RPCRequest::Goodbye(_) => false, RPCRequest::BlocksByRange(_) => true, RPCRequest::BlocksByRoot(_) => true, + RPCRequest::Ping(_) => true, + RPCRequest::MetaData(_) => true, } } @@ -211,6 +325,8 @@ impl RPCRequest { RPCRequest::Goodbye(_) => false, RPCRequest::BlocksByRange(_) => true, RPCRequest::BlocksByRoot(_) => true, + RPCRequest::Ping(_) => false, + RPCRequest::MetaData(_) => false, } } @@ -224,6 +340,8 @@ impl RPCRequest { RPCRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, RPCRequest::Status(_) => unreachable!(), RPCRequest::Goodbye(_) => unreachable!(), + RPCRequest::Ping(_) => unreachable!(), + RPCRequest::MetaData(_) => unreachable!(), } } } @@ -232,28 +350,35 @@ impl RPCRequest { /* Outbound upgrades */ -pub type OutboundFramed = Framed, OutboundCodec>; +pub type OutboundFramed = + Framed, OutboundCodec>; -impl OutboundUpgrade for RPCRequest +impl OutboundUpgrade for RPCRequest where + TSpec: EthSpec, TSocket: AsyncRead + AsyncWrite, { - type Output = OutboundFramed; + type Output = OutboundFramed; type Error = RPCError; - type Future = sink::Send>; + type Future = sink::Send>; fn upgrade_outbound( self, socket: upgrade::Negotiated, protocol: Self::Info, ) -> Self::Future { - match protocol.encoding.as_str() { - "ssz" | _ => { + let codec = match protocol.encoding { + Encoding::SSZSnappy => { + let ssz_snappy_codec = + BaseOutboundCodec::new(SSZSnappyOutboundCodec::new(protocol, MAX_RPC_SIZE)); + OutboundCodec::SSZSnappy(ssz_snappy_codec) + } + Encoding::SSZ => { let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE)); - let codec = OutboundCodec::SSZ(ssz_codec); - Framed::new(socket, codec).send(self) + OutboundCodec::SSZ(ssz_codec) } - } + }; + Framed::new(socket, codec).send(self) } } @@ -264,6 +389,8 @@ pub enum RPCError { ReadError(upgrade::ReadOneError), /// Error when decoding the raw buffer from ssz. SSZDecodeError(ssz::DecodeError), + /// Snappy error + SnappyError(snap::Error), /// Invalid Protocol ID. InvalidProtocol(&'static str), /// IO Error. @@ -311,6 +438,12 @@ impl From for RPCError { } } +impl From for RPCError { + fn from(err: snap::Error) -> Self { + RPCError::SnappyError(err) + } +} + // Error trait is required for `ProtocolsHandler` impl std::fmt::Display for RPCError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -321,6 +454,7 @@ impl std::fmt::Display for RPCError { RPCError::IoError(ref err) => write!(f, "IO Error: {}", err), RPCError::RPCErrorResponse => write!(f, "RPC Response Error"), RPCError::StreamTimeout => write!(f, "Stream Timeout"), + RPCError::SnappyError(ref err) => write!(f, "Snappy error: {}", err), RPCError::Custom(ref err) => write!(f, "{}", err), } } @@ -331,6 +465,7 @@ impl std::error::Error for RPCError { match *self { RPCError::ReadError(ref err) => Some(err), RPCError::SSZDecodeError(_) => None, + RPCError::SnappyError(ref err) => Some(err), RPCError::InvalidProtocol(_) => None, RPCError::IoError(ref err) => Some(err), RPCError::StreamTimeout => None, @@ -340,13 +475,15 @@ impl std::error::Error for RPCError { } } -impl std::fmt::Display for RPCRequest { +impl std::fmt::Display for RPCRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RPCRequest::Status(status) => write!(f, "Status Message: {}", status), RPCRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), RPCRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req), RPCRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), + RPCRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), + RPCRequest::MetaData(_) => write!(f, "MetaData request"), } } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index f9d5f7210..9b5e4e473 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -1,9 +1,8 @@ -use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage}; -use crate::error; +use crate::behaviour::{Behaviour, BehaviourEvent}; +use crate::discovery::enr; use crate::multiaddr::Protocol; -use crate::rpc::RPCEvent; -use crate::NetworkConfig; -use crate::{NetworkGlobals, Topic, TopicHash}; +use crate::types::{error, GossipKind}; +use crate::{NetworkConfig, NetworkGlobals}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -15,7 +14,6 @@ use libp2p::core::{ upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, ConnectedPoint, }; -use libp2p::gossipsub::MessageId; use libp2p::{core, noise, secio, swarm::NetworkBehaviour, PeerId, Swarm, Transport}; use slog::{crit, debug, error, info, trace, warn}; use std::fs::File; @@ -24,20 +22,21 @@ use std::io::{Error, ErrorKind}; use std::sync::Arc; use std::time::Duration; use tokio::timer::DelayQueue; +use types::{EnrForkId, EthSpec}; type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour>; +type Libp2pBehaviour = Behaviour, TSpec>; -const NETWORK_KEY_FILENAME: &str = "key"; +pub const NETWORK_KEY_FILENAME: &str = "key"; /// The time in milliseconds to wait before banning a peer. This allows for any Goodbye messages to be /// flushed and protocols to be negotiated. const BAN_PEER_WAIT_TIMEOUT: u64 = 200; /// The configuration and state of the libp2p components for the beacon node. -pub struct Service { +pub struct Service { /// The libp2p Swarm handler. //TODO: Make this private - pub swarm: Swarm, + pub swarm: Swarm>, /// This node's PeerId. pub local_peer_id: PeerId, @@ -52,25 +51,36 @@ pub struct Service { pub log: slog::Logger, } -impl Service { +impl Service { pub fn new( config: &NetworkConfig, + enr_fork_id: EnrForkId, log: slog::Logger, - ) -> error::Result<(Arc, Self)> { + ) -> error::Result<(Arc>, Self)> { trace!(log, "Libp2p Service starting"); + // initialise the node's ID let local_keypair = if let Some(hex_bytes) = &config.secret_key_hex { keypair_from_hex(hex_bytes)? } else { load_private_key(config, &log) }; - // load the private key from CLI flag, disk or generate a new one - let local_peer_id = PeerId::from(local_keypair.public()); - info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); + // Create an ENR or load from disk if appropriate + let enr = + enr::build_or_load_enr::(local_keypair.clone(), config, enr_fork_id, &log)?; + let local_peer_id = enr.peer_id(); // set up a collection of variables accessible outside of the network crate - let network_globals = Arc::new(NetworkGlobals::new(local_peer_id.clone())); + let network_globals = Arc::new(NetworkGlobals::new( + enr.clone(), + config.libp2p_port, + config.discovery_port, + &log, + )); + + info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", enr.peer_id())); + debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => config.discovery_port); let mut swarm = { // Set up the transport - tcp/ws with noise/secio and mplex/yamux @@ -128,19 +138,21 @@ impl Service { if let Protocol::Udp(_) = components[1] { continue; } + // inform the peer manager that we are currently dialing this peer + network_globals + .peers + .write() + .dialing_peer(&bootnode_enr.peer_id()); dial_addr(multiaddr); } } - let mut subscribed_topics: Vec = vec![]; - for topic in config.topics.clone() { - let raw_topic: Topic = topic.into(); - let topic_string = raw_topic.no_hash(); - if swarm.subscribe(raw_topic.clone()) { - trace!(log, "Subscribed to topic"; "topic" => format!("{}", topic_string)); - subscribed_topics.push(topic_string.as_str().into()); + let mut subscribed_topics: Vec = vec![]; + for topic_kind in &config.topics { + if swarm.subscribe_kind(topic_kind.clone()) { + subscribed_topics.push(topic_kind.clone()); } else { - warn!(log, "Could not subscribe to topic"; "topic" => format!("{}",topic_string)); + warn!(log, "Could not subscribe to topic"; "topic" => format!("{}",topic_kind)); } } info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics)); @@ -167,43 +179,16 @@ impl Service { } } -impl Stream for Service { - type Item = Libp2pEvent; - type Error = crate::error::Error; +impl Stream for Service { + type Item = BehaviourEvent; + type Error = error::Error; fn poll(&mut self) -> Poll, Self::Error> { loop { match self.swarm.poll() { - Ok(Async::Ready(Some(event))) => match event { - BehaviourEvent::GossipMessage { - id, - source, - topics, - message, - } => { - trace!(self.log, "Gossipsub message received"; "service" => "Swarm"); - return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { - id, - source, - topics, - message, - }))); - } - BehaviourEvent::RPC(peer_id, event) => { - return Ok(Async::Ready(Some(Libp2pEvent::RPC(peer_id, event)))); - } - BehaviourEvent::PeerDialed(peer_id) => { - return Ok(Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id)))); - } - BehaviourEvent::PeerDisconnected(peer_id) => { - return Ok(Async::Ready(Some(Libp2pEvent::PeerDisconnected(peer_id)))); - } - BehaviourEvent::PeerSubscribed(peer_id, topic) => { - return Ok(Async::Ready(Some(Libp2pEvent::PeerSubscribed( - peer_id, topic, - )))); - } - }, + Ok(Async::Ready(Some(event))) => { + return Ok(Async::Ready(Some(event))); + } Ok(Async::Ready(None)) => unreachable!("Swarm stream shouldn't end"), Ok(Async::NotReady) => break, _ => break, @@ -311,26 +296,6 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) transport } -#[derive(Debug)] -/// Events that can be obtained from polling the Libp2p Service. -pub enum Libp2pEvent { - /// An RPC response request has been received on the swarm. - RPC(PeerId, RPCEvent), - /// Initiated the connection to a new peer. - PeerDialed(PeerId), - /// A peer has disconnected. - PeerDisconnected(PeerId), - /// Received pubsub message. - PubsubMessage { - id: MessageId, - source: PeerId, - topics: Vec, - message: PubsubMessage, - }, - /// Subscribed to peer for a topic hash. - PeerSubscribed(PeerId, TopicHash), -} - fn keypair_from_hex(hex_bytes: &str) -> error::Result { let hex_bytes = if hex_bytes.starts_with("0x") { hex_bytes[2..].to_string() diff --git a/beacon_node/eth2-libp2p/src/topics.rs b/beacon_node/eth2-libp2p/src/topics.rs deleted file mode 100644 index 6a7ed5881..000000000 --- a/beacon_node/eth2-libp2p/src/topics.rs +++ /dev/null @@ -1,71 +0,0 @@ -use libp2p::gossipsub::Topic; -use serde_derive::{Deserialize, Serialize}; - -/// The gossipsub topic names. -// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX -// For example /eth2/beacon_block/ssz -pub const TOPIC_PREFIX: &str = "eth2"; -pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; -pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; -pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; -pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; -pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; -pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; -pub const SHARD_TOPIC_PREFIX: &str = "shard"; - -/// Enum that brings these topics into the rust type system. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum GossipTopic { - BeaconBlock, - BeaconAttestation, - VoluntaryExit, - ProposerSlashing, - AttesterSlashing, - Shard, - Unknown(String), -} - -impl From<&str> for GossipTopic { - fn from(topic: &str) -> GossipTopic { - let topic_parts: Vec<&str> = topic.split('/').collect(); - if topic_parts.len() == 4 - && topic_parts[1] == TOPIC_PREFIX - && topic_parts[3] == TOPIC_ENCODING_POSTFIX - { - match topic_parts[2] { - BEACON_BLOCK_TOPIC => GossipTopic::BeaconBlock, - BEACON_ATTESTATION_TOPIC => GossipTopic::BeaconAttestation, - VOLUNTARY_EXIT_TOPIC => GossipTopic::VoluntaryExit, - PROPOSER_SLASHING_TOPIC => GossipTopic::ProposerSlashing, - ATTESTER_SLASHING_TOPIC => GossipTopic::AttesterSlashing, - unknown_topic => GossipTopic::Unknown(unknown_topic.into()), - } - } else { - GossipTopic::Unknown(topic.into()) - } - } -} - -impl Into for GossipTopic { - fn into(self) -> Topic { - Topic::new(self.into()) - } -} - -impl Into for GossipTopic { - fn into(self) -> String { - match self { - GossipTopic::BeaconBlock => topic_builder(BEACON_BLOCK_TOPIC), - GossipTopic::BeaconAttestation => topic_builder(BEACON_ATTESTATION_TOPIC), - GossipTopic::VoluntaryExit => topic_builder(VOLUNTARY_EXIT_TOPIC), - GossipTopic::ProposerSlashing => topic_builder(PROPOSER_SLASHING_TOPIC), - GossipTopic::AttesterSlashing => topic_builder(ATTESTER_SLASHING_TOPIC), - GossipTopic::Shard => topic_builder(SHARD_TOPIC_PREFIX), - GossipTopic::Unknown(topic) => topic, - } - } -} - -fn topic_builder(topic: &'static str) -> String { - format!("/{}/{}/{}", TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX,) -} diff --git a/beacon_node/eth2-libp2p/src/error.rs b/beacon_node/eth2-libp2p/src/types/error.rs similarity index 100% rename from beacon_node/eth2-libp2p/src/error.rs rename to beacon_node/eth2-libp2p/src/types/error.rs diff --git a/beacon_node/eth2-libp2p/src/types/globals.rs b/beacon_node/eth2-libp2p/src/types/globals.rs new file mode 100644 index 000000000..3912bf86c --- /dev/null +++ b/beacon_node/eth2-libp2p/src/types/globals.rs @@ -0,0 +1,124 @@ +//! A collection of variables that are accessible outside of the network thread itself. +use crate::peer_manager::PeerDB; +use crate::rpc::methods::MetaData; +use crate::types::SyncState; +use crate::{discovery::enr::Eth2Enr, Enr, GossipTopic, Multiaddr, PeerId}; +use parking_lot::RwLock; +use std::collections::HashSet; +use std::sync::atomic::{AtomicU16, Ordering}; +use types::EthSpec; + +pub struct NetworkGlobals { + /// The current local ENR. + pub local_enr: RwLock, + /// The current node's meta-data. + pub meta_data: RwLock>, + /// The local peer_id. + pub peer_id: RwLock, + /// Listening multiaddrs. + pub listen_multiaddrs: RwLock>, + /// The TCP port that the libp2p service is listening on + pub listen_port_tcp: AtomicU16, + /// The UDP port that the discovery service is listening on + pub listen_port_udp: AtomicU16, + /// The collection of known peers. + pub peers: RwLock>, + /// The current gossipsub topic subscriptions. + pub gossipsub_subscriptions: RwLock>, + /// The current sync status of the node. + pub sync_state: RwLock, +} + +impl NetworkGlobals { + pub fn new(enr: Enr, tcp_port: u16, udp_port: u16, log: &slog::Logger) -> Self { + // set up the local meta data of the node + let meta_data = RwLock::new(MetaData { + seq_number: 0, + attnets: enr + .bitfield::() + .expect("Local ENR must have a bitfield specified"), + }); + + NetworkGlobals { + local_enr: RwLock::new(enr.clone()), + meta_data, + peer_id: RwLock::new(enr.peer_id()), + listen_multiaddrs: RwLock::new(Vec::new()), + listen_port_tcp: AtomicU16::new(tcp_port), + listen_port_udp: AtomicU16::new(udp_port), + peers: RwLock::new(PeerDB::new(log)), + gossipsub_subscriptions: RwLock::new(HashSet::new()), + sync_state: RwLock::new(SyncState::Stalled), + } + } + + /// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect + /// to. + pub fn local_enr(&self) -> Enr { + self.local_enr.read().clone() + } + + /// Returns the local libp2p PeerID. + pub fn local_peer_id(&self) -> PeerId { + self.peer_id.read().clone() + } + + /// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on. + pub fn listen_multiaddrs(&self) -> Vec { + self.listen_multiaddrs.read().clone() + } + + /// Returns the libp2p TCP port that this node has been configured to listen on. + pub fn listen_port_tcp(&self) -> u16 { + self.listen_port_tcp.load(Ordering::Relaxed) + } + + /// Returns the UDP discovery port that this node has been configured to listen on. + pub fn listen_port_udp(&self) -> u16 { + self.listen_port_udp.load(Ordering::Relaxed) + } + + /// Returns the number of libp2p connected peers. + pub fn connected_peers(&self) -> usize { + self.peers.read().connected_peer_ids().count() + } + + /// Returns the number of libp2p peers that are either connected or being dialed. + pub fn connected_or_dialing_peers(&self) -> usize { + self.peers.read().connected_or_dialing_peers().count() + } + + /// Returns in the node is syncing. + pub fn is_syncing(&self) -> bool { + self.sync_state.read().is_syncing() + } + + /// Returns the current sync state of the peer. + pub fn sync_state(&self) -> SyncState { + self.sync_state.read().clone() + } + + /// Updates the syncing state of the node. + /// + /// If there is a new state, the old state and the new states are returned. + pub fn update_sync_state(&self) -> Option<(SyncState, SyncState)> { + let mut result = None; + // if we are in a range sync, nothing changes. Range sync will update this. + if !self.is_syncing() { + let new_state = self + .peers + .read() + .synced_peers() + .next() + .map(|_| SyncState::Synced) + .unwrap_or_else(|| SyncState::Stalled); + + let mut peer_state = self.sync_state.write(); + if new_state != *peer_state { + result = Some((peer_state.clone(), new_state.clone())); + } + *peer_state = new_state; + } + result + } +} diff --git a/beacon_node/eth2-libp2p/src/types/mod.rs b/beacon_node/eth2-libp2p/src/types/mod.rs new file mode 100644 index 000000000..94d24bad6 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/types/mod.rs @@ -0,0 +1,17 @@ +pub mod error; +mod globals; +mod pubsub; +mod sync_state; +mod topics; + +use types::{BitVector, EthSpec}; + +#[allow(type_alias_bounds)] +pub type EnrBitfield = BitVector; + +pub type Enr = libp2p::discv5::enr::Enr; + +pub use globals::NetworkGlobals; +pub use pubsub::PubsubMessage; +pub use sync_state::SyncState; +pub use topics::{GossipEncoding, GossipKind, GossipTopic}; diff --git a/beacon_node/eth2-libp2p/src/types/pubsub.rs b/beacon_node/eth2-libp2p/src/types/pubsub.rs new file mode 100644 index 000000000..279dde28b --- /dev/null +++ b/beacon_node/eth2-libp2p/src/types/pubsub.rs @@ -0,0 +1,201 @@ +//! Handles the encoding and decoding of pubsub messages. + +use crate::config::GOSSIP_MAX_SIZE; +use crate::types::{GossipEncoding, GossipKind, GossipTopic}; +use crate::TopicHash; +use snap::raw::{decompress_len, Decoder, Encoder}; +use ssz::{Decode, Encode}; +use std::boxed::Box; +use types::SubnetId; +use types::{ + Attestation, AttesterSlashing, EthSpec, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, VoluntaryExit, +}; + +#[derive(Debug, Clone, PartialEq)] +pub enum PubsubMessage { + /// Gossipsub message providing notification of a new block. + BeaconBlock(Box>), + /// Gossipsub message providing notification of a Aggregate attestation and associated proof. + AggregateAndProofAttestation(Box>), + /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. + Attestation(Box<(SubnetId, Attestation)>), + /// Gossipsub message providing notification of a voluntary exit. + VoluntaryExit(Box), + /// Gossipsub message providing notification of a new proposer slashing. + ProposerSlashing(Box), + /// Gossipsub message providing notification of a new attester slashing. + AttesterSlashing(Box>), +} + +impl PubsubMessage { + /// Returns the topics that each pubsub message will be sent across, given a supported + /// gossipsub encoding and fork version. + pub fn topics(&self, encoding: GossipEncoding, fork_version: [u8; 4]) -> Vec { + vec![GossipTopic::new(self.kind(), encoding, fork_version)] + } + + /// Returns the kind of gossipsub topic associated with the message. + pub fn kind(&self) -> GossipKind { + match self { + PubsubMessage::BeaconBlock(_) => GossipKind::BeaconBlock, + PubsubMessage::AggregateAndProofAttestation(_) => GossipKind::BeaconAggregateAndProof, + PubsubMessage::Attestation(attestation_data) => { + GossipKind::CommitteeIndex(attestation_data.0) + } + PubsubMessage::VoluntaryExit(_) => GossipKind::VoluntaryExit, + PubsubMessage::ProposerSlashing(_) => GossipKind::ProposerSlashing, + PubsubMessage::AttesterSlashing(_) => GossipKind::AttesterSlashing, + } + } + + /// This decodes `data` into a `PubsubMessage` given a list of topics. + /// + /// The topics are checked + /// in order and as soon as one topic matches the decoded data, we return the data. + /* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will + * need to be modified. + * + * Also note that a message can be associated with many topics. As soon as one of the topics is + * known we match. If none of the topics are known we return an unknown state. + */ + pub fn decode(topics: &[TopicHash], data: &[u8]) -> Result { + let mut unknown_topics = Vec::new(); + for topic in topics { + match GossipTopic::decode(topic.as_str()) { + Err(_) => { + unknown_topics.push(topic); + continue; + } + Ok(gossip_topic) => { + let mut decompressed_data: Vec = Vec::new(); + let data = match gossip_topic.encoding() { + // group each part by encoding type + GossipEncoding::SSZSnappy => { + match decompress_len(data) { + Ok(n) if n > GOSSIP_MAX_SIZE => { + return Err("ssz_snappy decoded data > GOSSIP_MAX_SIZE".into()); + } + Ok(n) => decompressed_data.resize(n, 0), + Err(e) => { + return Err(format!("{}", e)); + } + }; + let mut decoder = Decoder::new(); + match decoder.decompress(data, &mut decompressed_data) { + Ok(n) => { + decompressed_data.truncate(n); + &decompressed_data + } + Err(e) => return Err(format!("{}", e)), + } + } + GossipEncoding::SSZ => data, + }; + // the ssz decoders + match gossip_topic.kind() { + GossipKind::BeaconAggregateAndProof => { + let agg_and_proof = SignedAggregateAndProof::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + return Ok(PubsubMessage::AggregateAndProofAttestation(Box::new( + agg_and_proof, + ))); + } + GossipKind::CommitteeIndex(subnet_id) => { + let attestation = Attestation::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + return Ok(PubsubMessage::Attestation(Box::new(( + *subnet_id, + attestation, + )))); + } + GossipKind::BeaconBlock => { + let beacon_block = SignedBeaconBlock::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + return Ok(PubsubMessage::BeaconBlock(Box::new(beacon_block))); + } + GossipKind::VoluntaryExit => { + let voluntary_exit = VoluntaryExit::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + return Ok(PubsubMessage::VoluntaryExit(Box::new(voluntary_exit))); + } + GossipKind::ProposerSlashing => { + let proposer_slashing = ProposerSlashing::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + return Ok(PubsubMessage::ProposerSlashing(Box::new( + proposer_slashing, + ))); + } + GossipKind::AttesterSlashing => { + let attester_slashing = AttesterSlashing::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?; + return Ok(PubsubMessage::AttesterSlashing(Box::new( + attester_slashing, + ))); + } + } + } + } + } + Err(format!("Unknown gossipsub topics: {:?}", unknown_topics)) + } + + /// Encodes a `PubsubMessage` based on the topic encodings. The first known encoding is used. If + /// no encoding is known, and error is returned. + pub fn encode(&self, encoding: GossipEncoding) -> Result, String> { + let data = match &self { + PubsubMessage::BeaconBlock(data) => data.as_ssz_bytes(), + PubsubMessage::AggregateAndProofAttestation(data) => data.as_ssz_bytes(), + PubsubMessage::VoluntaryExit(data) => data.as_ssz_bytes(), + PubsubMessage::ProposerSlashing(data) => data.as_ssz_bytes(), + PubsubMessage::AttesterSlashing(data) => data.as_ssz_bytes(), + PubsubMessage::Attestation(data) => data.1.as_ssz_bytes(), + }; + match encoding { + GossipEncoding::SSZ => { + if data.len() > GOSSIP_MAX_SIZE { + return Err("ssz encoded data > GOSSIP_MAX_SIZE".into()); + } else { + Ok(data) + } + } + GossipEncoding::SSZSnappy => { + let mut encoder = Encoder::new(); + match encoder.compress_vec(&data) { + Ok(compressed) if compressed.len() > GOSSIP_MAX_SIZE => { + Err("ssz_snappy Encoded data > GOSSIP_MAX_SIZE".into()) + } + Ok(compressed) => Ok(compressed), + Err(e) => Err(format!("{}", e)), + } + } + } + } +} + +impl std::fmt::Display for PubsubMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PubsubMessage::BeaconBlock(block) => write!( + f, + "Beacon Block: slot: {}, proposer_index: {}", + block.message.slot, block.message.proposer_index + ), + PubsubMessage::AggregateAndProofAttestation(att) => write!( + f, + "Aggregate and Proof: slot: {}, index: {}, aggregator_index: {}", + att.message.aggregate.data.slot, + att.message.aggregate.data.index, + att.message.aggregator_index, + ), + PubsubMessage::Attestation(data) => write!( + f, + "Attestation: subnet_id: {}, attestation_slot: {}, attestation_index: {}", + *data.0, data.1.data.slot, data.1.data.index, + ), + PubsubMessage::VoluntaryExit(_data) => write!(f, "Voluntary Exit"), + PubsubMessage::ProposerSlashing(_data) => write!(f, "Proposer Slashing"), + PubsubMessage::AttesterSlashing(_data) => write!(f, "Attester Slashing"), + } + } +} diff --git a/beacon_node/eth2-libp2p/src/types/sync_state.rs b/beacon_node/eth2-libp2p/src/types/sync_state.rs new file mode 100644 index 000000000..572d33a31 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/types/sync_state.rs @@ -0,0 +1,66 @@ +use serde::{Deserialize, Serialize}; +use types::{Hash256, Slot}; + +/// The current state of the node. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum SyncState { + /// The node is performing a long-range (batch) sync over a finalized chain. + /// In this state, parent lookups are disabled. + SyncingFinalized { + start_slot: Slot, + head_slot: Slot, + head_root: Hash256, + }, + /// The node is performing a long-range (batch) sync over one or many head chains. + /// In this state parent lookups are disabled. + SyncingHead { start_slot: Slot, head_slot: Slot }, + /// The node is up to date with all known peers and is connected to at least one + /// fully synced peer. In this state, parent lookups are enabled. + Synced, + /// No useful peers are connected. Long-range sync's cannot proceed and we have no useful + /// peers to download parents for. More peers need to be connected before we can proceed. + Stalled, +} + +impl PartialEq for SyncState { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) => true, + (SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) => true, + (SyncState::Synced, SyncState::Synced) => true, + (SyncState::Stalled, SyncState::Stalled) => true, + _ => false, + } + } +} + +impl SyncState { + /// Returns a boolean indicating the node is currently performing a long-range sync. + pub fn is_syncing(&self) -> bool { + match self { + SyncState::SyncingFinalized { .. } => true, + SyncState::SyncingHead { .. } => true, + SyncState::Synced => false, + SyncState::Stalled => false, + } + } + + /// Returns true if the node is synced. + pub fn is_synced(&self) -> bool { + match self { + SyncState::Synced => true, + _ => false, + } + } +} + +impl std::fmt::Display for SyncState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SyncState::SyncingFinalized { .. } => write!(f, "Syncing Finalized Chain"), + SyncState::SyncingHead { .. } => write!(f, "Syncing Head Chain"), + SyncState::Synced { .. } => write!(f, "Synced"), + SyncState::Stalled { .. } => write!(f, "Stalled"), + } + } +} diff --git a/beacon_node/eth2-libp2p/src/types/topics.rs b/beacon_node/eth2-libp2p/src/types/topics.rs new file mode 100644 index 000000000..ebb067e6b --- /dev/null +++ b/beacon_node/eth2-libp2p/src/types/topics.rs @@ -0,0 +1,205 @@ +use libp2p::gossipsub::Topic; +use serde_derive::{Deserialize, Serialize}; +use types::SubnetId; + +/// The gossipsub topic names. +// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX +// For example /eth2/beacon_block/ssz +pub const TOPIC_PREFIX: &str = "eth2"; +pub const SSZ_ENCODING_POSTFIX: &str = "ssz"; +pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; +pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; +pub const BEACON_AGGREGATE_AND_PROOF_TOPIC: &str = "beacon_aggregate_and_proof"; +// for speed and easier string manipulation, committee topic index is split into a prefix and a +// postfix. The topic is committee_index{}_beacon_attestation where {} is an integer. +pub const COMMITEE_INDEX_TOPIC_PREFIX: &str = "committee_index"; +pub const COMMITEE_INDEX_TOPIC_POSTFIX: &str = "_beacon_attestation"; +pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; +pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; +pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; + +/// A gossipsub topic which encapsulates the type of messages that should be sent and received over +/// the pubsub protocol and the way the messages should be encoded. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct GossipTopic { + /// The encoding of the topic. + encoding: GossipEncoding, + /// The fork digest of the topic, + fork_digest: [u8; 4], + /// The kind of topic. + kind: GossipKind, +} + +/// Enum that brings these topics into the rust type system. +// NOTE: There is intentionally no unknown type here. We only allow known gossipsub topics. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub enum GossipKind { + /// Topic for publishing beacon blocks. + BeaconBlock, + /// Topic for publishing aggregate attestations and proofs. + BeaconAggregateAndProof, + /// Topic for publishing raw attestations on a particular subnet. + CommitteeIndex(SubnetId), + /// Topic for publishing voluntary exits. + VoluntaryExit, + /// Topic for publishing block proposer slashings. + ProposerSlashing, + /// Topic for publishing attester slashings. + AttesterSlashing, +} + +impl std::fmt::Display for GossipKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GossipKind::BeaconBlock => write!(f, "beacon_block"), + GossipKind::BeaconAggregateAndProof => write!(f, "beacon_aggregate_and_proof"), + GossipKind::CommitteeIndex(subnet_id) => write!(f, "committee_index_{}", **subnet_id), + GossipKind::VoluntaryExit => write!(f, "voluntary_exit"), + GossipKind::ProposerSlashing => write!(f, "proposer_slashing"), + GossipKind::AttesterSlashing => write!(f, "attester_slashing"), + } + } +} + +/// The known encoding types for gossipsub messages. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub enum GossipEncoding { + /// Messages are encoded with SSZ. + SSZ, + /// Messages are encoded with SSZSnappy. + SSZSnappy, +} + +impl Default for GossipEncoding { + fn default() -> Self { + GossipEncoding::SSZSnappy + } +} + +impl GossipTopic { + pub fn new(kind: GossipKind, encoding: GossipEncoding, fork_digest: [u8; 4]) -> Self { + GossipTopic { + encoding, + kind, + fork_digest, + } + } + + /// Returns the encoding type for the gossipsub topic. + pub fn encoding(&self) -> &GossipEncoding { + &self.encoding + } + + /// Returns a mutable reference to the fork digest of the gossipsub topic. + pub fn digest(&mut self) -> &mut [u8; 4] { + &mut self.fork_digest + } + + /// Returns the kind of message expected on the gossipsub topic. + pub fn kind(&self) -> &GossipKind { + &self.kind + } + + pub fn decode(topic: &str) -> Result { + let topic_parts: Vec<&str> = topic.split('/').collect(); + if topic_parts.len() == 5 && topic_parts[1] == TOPIC_PREFIX { + let digest_bytes = hex::decode(topic_parts[2]) + .map_err(|e| format!("Could not decode fork_digest hex: {}", e))?; + + if digest_bytes.len() != 4 { + return Err(format!( + "Invalid gossipsub fork digest size: {}", + digest_bytes.len() + )); + } + + let mut fork_digest = [0; 4]; + fork_digest.copy_from_slice(&digest_bytes); + + let encoding = match topic_parts[4] { + SSZ_ENCODING_POSTFIX => GossipEncoding::SSZ, + SSZ_SNAPPY_ENCODING_POSTFIX => GossipEncoding::SSZSnappy, + _ => return Err(format!("Unknown encoding: {}", topic)), + }; + let kind = match topic_parts[3] { + BEACON_BLOCK_TOPIC => GossipKind::BeaconBlock, + BEACON_AGGREGATE_AND_PROOF_TOPIC => GossipKind::BeaconAggregateAndProof, + VOLUNTARY_EXIT_TOPIC => GossipKind::VoluntaryExit, + PROPOSER_SLASHING_TOPIC => GossipKind::ProposerSlashing, + ATTESTER_SLASHING_TOPIC => GossipKind::AttesterSlashing, + topic => match committee_topic_index(topic) { + Some(subnet_id) => GossipKind::CommitteeIndex(subnet_id), + None => return Err(format!("Unknown topic: {}", topic)), + }, + }; + + return Ok(GossipTopic { + encoding, + kind, + fork_digest, + }); + } + + Err(format!("Unknown topic: {}", topic)) + } +} + +impl Into for GossipTopic { + fn into(self) -> Topic { + Topic::new(self.into()) + } +} + +impl Into for GossipTopic { + fn into(self) -> String { + let encoding = match self.encoding { + GossipEncoding::SSZ => SSZ_ENCODING_POSTFIX, + GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX, + }; + + let kind = match self.kind { + GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(), + GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(), + GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(), + GossipKind::ProposerSlashing => PROPOSER_SLASHING_TOPIC.into(), + GossipKind::AttesterSlashing => ATTESTER_SLASHING_TOPIC.into(), + GossipKind::CommitteeIndex(index) => format!( + "{}{}{}", + COMMITEE_INDEX_TOPIC_PREFIX, *index, COMMITEE_INDEX_TOPIC_POSTFIX + ), + }; + format!( + "/{}/{}/{}/{}", + TOPIC_PREFIX, + hex::encode(self.fork_digest), + kind, + encoding + ) + } +} + +impl From for GossipKind { + fn from(subnet_id: SubnetId) -> Self { + GossipKind::CommitteeIndex(subnet_id) + } +} + +// helper functions + +// Determines if a string is a committee topic. +fn committee_topic_index(topic: &str) -> Option { + if topic.starts_with(COMMITEE_INDEX_TOPIC_PREFIX) + && topic.ends_with(COMMITEE_INDEX_TOPIC_POSTFIX) + { + return Some(SubnetId::new( + u64::from_str_radix( + topic + .trim_start_matches(COMMITEE_INDEX_TOPIC_PREFIX) + .trim_end_matches(COMMITEE_INDEX_TOPIC_POSTFIX), + 10, + ) + .ok()?, + )); + } + None +} diff --git a/beacon_node/eth2-libp2p/tests/common/mod.rs b/beacon_node/eth2-libp2p/tests/common/mod.rs index fd982ac02..45168c2e6 100644 --- a/beacon_node/eth2-libp2p/tests/common/mod.rs +++ b/beacon_node/eth2-libp2p/tests/common/mod.rs @@ -1,11 +1,14 @@ #![cfg(test)] -use enr::Enr; +use eth2_libp2p::Enr; use eth2_libp2p::Multiaddr; use eth2_libp2p::NetworkConfig; use eth2_libp2p::Service as LibP2PService; use slog::{debug, error, o, Drain}; use std::net::{TcpListener, UdpSocket}; use std::time::Duration; +use types::{EnrForkId, MinimalEthSpec}; + +type E = MinimalEthSpec; use tempdir::TempDir; pub fn build_log(level: slog::Level, enabled: bool) -> slog::Logger { @@ -62,6 +65,9 @@ pub fn build_config( config.libp2p_port = port; // tcp port config.discovery_port = port; // udp port + config.enr_tcp_port = Some(port); + config.enr_udp_port = Some(port); + config.enr_address = Some("127.0.0.1".parse().unwrap()); config.boot_nodes.append(&mut boot_nodes); config.secret_key_hex = secret_key; config.network_dir = path.into_path(); @@ -72,29 +78,30 @@ pub fn build_config( } pub fn build_libp2p_instance( - port: u16, boot_nodes: Vec, secret_key: Option, log: slog::Logger, -) -> LibP2PService { +) -> LibP2PService { + let port = unused_port("tcp").unwrap(); let config = build_config(port, boot_nodes, secret_key); // launch libp2p service - LibP2PService::new(&config, log.clone()) + LibP2PService::new(&config, EnrForkId::default(), log.clone()) .expect("should build libp2p instance") .1 } #[allow(dead_code)] -pub fn get_enr(node: &LibP2PService) -> Enr { - node.swarm.discovery().local_enr().clone() +pub fn get_enr(node: &LibP2PService) -> Enr { + let enr = node.swarm.discovery().local_enr().clone(); + dbg!(enr.multiaddr()); + enr } // Returns `n` libp2p peers in fully connected topology. #[allow(dead_code)] -pub fn build_full_mesh(log: slog::Logger, n: usize, start_port: Option) -> Vec { - let base_port = start_port.unwrap_or(9000); - let mut nodes: Vec = (base_port..base_port + n as u16) - .map(|p| build_libp2p_instance(p, vec![], None, log.clone())) +pub fn build_full_mesh(log: slog::Logger, n: usize) -> Vec> { + let mut nodes: Vec> = (0..n) + .map(|_| build_libp2p_instance(vec![], None, log.clone())) .collect(); let multiaddrs: Vec = nodes .iter() @@ -117,12 +124,12 @@ pub fn build_full_mesh(log: slog::Logger, n: usize, start_port: Option) -> // Constructs a pair of nodes with seperate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] -pub fn build_node_pair(log: &slog::Logger, start_port: u16) -> (LibP2PService, LibP2PService) { +pub fn build_node_pair(log: &slog::Logger) -> (LibP2PService, LibP2PService) { let sender_log = log.new(o!("who" => "sender")); let receiver_log = log.new(o!("who" => "receiver")); - let mut sender = build_libp2p_instance(start_port, vec![], None, sender_log); - let receiver = build_libp2p_instance(start_port + 1, vec![], None, receiver_log); + let mut sender = build_libp2p_instance(vec![], None, sender_log); + let receiver = build_libp2p_instance(vec![], None, receiver_log); let receiver_multiaddr = receiver.swarm.discovery().local_enr().clone().multiaddr()[1].clone(); match libp2p::Swarm::dial_addr(&mut sender.swarm, receiver_multiaddr) { @@ -134,10 +141,9 @@ pub fn build_node_pair(log: &slog::Logger, start_port: u16) -> (LibP2PService, L // Returns `n` peers in a linear topology #[allow(dead_code)] -pub fn build_linear(log: slog::Logger, n: usize, start_port: Option) -> Vec { - let base_port = start_port.unwrap_or(9000); - let mut nodes: Vec = (base_port..base_port + n as u16) - .map(|p| build_libp2p_instance(p, vec![], None, log.clone())) +pub fn build_linear(log: slog::Logger, n: usize) -> Vec> { + let mut nodes: Vec> = (0..n) + .map(|_| build_libp2p_instance(vec![], None, log.clone())) .collect(); let multiaddrs: Vec = nodes .iter() diff --git a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs index dd96465c2..aac538744 100644 --- a/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs +++ b/beacon_node/eth2-libp2p/tests/gossipsub_tests.rs @@ -1,8 +1,12 @@ #![cfg(test)] +use crate::types::GossipEncoding; +use ::types::{BeaconBlock, EthSpec, MinimalEthSpec, Signature, SignedBeaconBlock}; use eth2_libp2p::*; use futures::prelude::*; use slog::{debug, Level}; +type E = MinimalEthSpec; + mod common; /* Gossipsub tests */ @@ -21,17 +25,27 @@ fn test_gossipsub_forward() { let log = common::build_log(Level::Info, false); let num_nodes = 20; - let base_port = common::unused_port("tcp").unwrap(); - let mut nodes = common::build_linear(log.clone(), num_nodes, Some(base_port)); + let mut nodes = common::build_linear(log.clone(), num_nodes); let mut received_count = 0; - let pubsub_message = PubsubMessage::Block(vec![0; 4]); - let publishing_topic: String = "/eth2/beacon_block/ssz".into(); + let spec = E::default_spec(); + let empty_block = BeaconBlock::empty(&spec); + let signed_block = SignedBeaconBlock { + message: empty_block, + signature: Signature::empty_signature(), + }; + let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block)); + let publishing_topic: String = pubsub_message + .topics(GossipEncoding::default(), [0, 0, 0, 0]) + .first() + .unwrap() + .clone() + .into(); let mut subscribed_count = 0; tokio::run(futures::future::poll_fn(move || -> Result<_, ()> { for node in nodes.iter_mut() { loop { match node.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PubsubMessage { + Async::Ready(Some(BehaviourEvent::PubsubMessage { topics, message, source, @@ -54,18 +68,13 @@ fn test_gossipsub_forward() { return Ok(Async::Ready(())); } } - Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) => { - // Received topics is one of subscribed eth2 topics - assert!(topic.clone().into_string().starts_with("/eth2/")); + Async::Ready(Some(BehaviourEvent::PeerSubscribed(_, topic))) => { // Publish on beacon block topic - if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") { + if topic == TopicHash::from_raw(publishing_topic.clone()) { subscribed_count += 1; // Every node except the corner nodes are connected to 2 nodes. if subscribed_count == (num_nodes * 2) - 2 { - node.swarm.publish( - &[Topic::new(topic.into_string())], - pubsub_message.clone(), - ); + node.swarm.publish(vec![pubsub_message.clone()]); } } } @@ -82,23 +91,33 @@ fn test_gossipsub_forward() { #[test] fn test_gossipsub_full_mesh_publish() { // set up the logging. The level and enabled or not - let log = common::build_log(Level::Info, false); + let log = common::build_log(Level::Debug, false); // Note: This test does not propagate gossipsub messages. // Having `num_nodes` > `mesh_n_high` may give inconsistent results // as nodes may get pruned out of the mesh before the gossipsub message // is published to them. let num_nodes = 12; - let base_port = common::unused_port("tcp").unwrap(); - let mut nodes = common::build_full_mesh(log, num_nodes, Some(base_port)); + let mut nodes = common::build_full_mesh(log, num_nodes); let mut publishing_node = nodes.pop().unwrap(); - let pubsub_message = PubsubMessage::Block(vec![0; 4]); - let publishing_topic: String = "/eth2/beacon_block/ssz".into(); + let spec = E::default_spec(); + let empty_block = BeaconBlock::empty(&spec); + let signed_block = SignedBeaconBlock { + message: empty_block, + signature: Signature::empty_signature(), + }; + let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block)); + let publishing_topic: String = pubsub_message + .topics(GossipEncoding::default(), [0, 0, 0, 0]) + .first() + .unwrap() + .clone() + .into(); let mut subscribed_count = 0; let mut received_count = 0; tokio::run(futures::future::poll_fn(move || -> Result<_, ()> { for node in nodes.iter_mut() { - while let Async::Ready(Some(Libp2pEvent::PubsubMessage { + while let Async::Ready(Some(BehaviourEvent::PubsubMessage { topics, message, .. })) = node.poll().unwrap() { @@ -116,18 +135,14 @@ fn test_gossipsub_full_mesh_publish() { } } } - while let Async::Ready(Some(Libp2pEvent::PeerSubscribed(_, topic))) = + while let Async::Ready(Some(BehaviourEvent::PeerSubscribed(_, topic))) = publishing_node.poll().unwrap() { - // Received topics is one of subscribed eth2 topics - assert!(topic.clone().into_string().starts_with("/eth2/")); // Publish on beacon block topic - if topic == TopicHash::from_raw("/eth2/beacon_block/ssz") { + if topic == TopicHash::from_raw(publishing_topic.clone()) { subscribed_count += 1; if subscribed_count == num_nodes - 1 { - publishing_node - .swarm - .publish(&[Topic::new(topic.into_string())], pubsub_message.clone()); + publishing_node.swarm.publish(vec![pubsub_message.clone()]); } } } diff --git a/beacon_node/eth2-libp2p/tests/noise.rs b/beacon_node/eth2-libp2p/tests/noise.rs index 70d3faa84..236150b63 100644 --- a/beacon_node/eth2-libp2p/tests/noise.rs +++ b/beacon_node/eth2-libp2p/tests/noise.rs @@ -1,6 +1,8 @@ #![cfg(test)] use crate::behaviour::{Behaviour, BehaviourEvent}; use crate::multiaddr::Protocol; +use ::types::{EnrForkId, MinimalEthSpec}; +use eth2_libp2p::discovery::build_enr; use eth2_libp2p::*; use futures::prelude::*; use libp2p::core::identity::Keypair; @@ -10,16 +12,19 @@ use libp2p::{ secio, PeerId, Swarm, Transport, }; use slog::{crit, debug, info, Level}; +use std::convert::TryInto; use std::io::{Error, ErrorKind}; use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; use std::sync::Arc; use std::time::Duration; use tokio::prelude::*; +type TSpec = MinimalEthSpec; + mod common; type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour>; +type Libp2pBehaviour = Behaviour, TSpec>; /// Build and return a eth2_libp2p Swarm with only secio support. fn build_secio_swarm( @@ -28,8 +33,14 @@ fn build_secio_swarm( ) -> error::Result> { let local_keypair = Keypair::generate_secp256k1(); let local_peer_id = PeerId::from(local_keypair.public()); - - let network_globals = Arc::new(NetworkGlobals::new(local_peer_id.clone())); + let enr_key: libp2p::discv5::enr::CombinedKey = local_keypair.clone().try_into().unwrap(); + let enr = build_enr::(&enr_key, config, EnrForkId::default()).unwrap(); + let network_globals = Arc::new(NetworkGlobals::new( + enr, + config.libp2p_port, + config.discovery_port, + &log, + )); let mut swarm = { // Set up the transport - tcp/ws with secio and mplex/yamux @@ -110,13 +121,13 @@ fn build_secio_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMux fn test_secio_noise_fallback() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; - let enable_logging = true; + let enable_logging = false; let log = common::build_log(log_level, enable_logging); let port = common::unused_port("tcp").unwrap(); let noisy_config = common::build_config(port, vec![], None); - let mut noisy_node = Service::new(&noisy_config, log.clone()) + let mut noisy_node = Service::new(&noisy_config, EnrForkId::default(), log.clone()) .expect("should build a libp2p instance") .1; diff --git a/beacon_node/eth2-libp2p/tests/rpc_tests.rs b/beacon_node/eth2-libp2p/tests/rpc_tests.rs index 231f89ad6..74a0a7a1b 100644 --- a/beacon_node/eth2-libp2p/tests/rpc_tests.rs +++ b/beacon_node/eth2-libp2p/tests/rpc_tests.rs @@ -1,16 +1,20 @@ #![cfg(test)] use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::*; -use eth2_libp2p::{Libp2pEvent, RPCEvent}; +use eth2_libp2p::{BehaviourEvent, RPCEvent}; use slog::{warn, Level}; use std::sync::atomic::{AtomicBool, Ordering::Relaxed}; use std::sync::{Arc, Mutex}; use std::time::Duration; use tokio::prelude::*; -use types::{Epoch, Hash256, Slot}; +use types::{ + BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, +}; mod common; +type E = MinimalEthSpec; + #[test] // Tests the STATUS RPC message fn test_status_rpc() { @@ -21,12 +25,11 @@ fn test_status_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let port = common::unused_port("tcp").unwrap(); - let (mut sender, mut receiver) = common::build_node_pair(&log, port); + let (mut sender, mut receiver) = common::build_node_pair(&log); // Dummy STATUS RPC message let rpc_request = RPCRequest::Status(StatusMessage { - fork_version: [0; 4], + fork_digest: [0; 4], finalized_root: Hash256::from_low_u64_be(0), finalized_epoch: Epoch::new(1), head_root: Hash256::from_low_u64_be(0), @@ -35,7 +38,7 @@ fn test_status_rpc() { // Dummy STATUS RPC message let rpc_response = RPCResponse::Status(StatusMessage { - fork_version: [0; 4], + fork_digest: [0; 4], finalized_root: Hash256::from_low_u64_be(0), finalized_epoch: Epoch::new(1), head_root: Hash256::from_low_u64_be(0), @@ -50,31 +53,31 @@ fn test_status_rpc() { let sender_future = future::poll_fn(move || -> Poll { loop { match sender.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => { + Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { // Send a STATUS message warn!(sender_log, "Sending RPC"); sender .swarm .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); } - Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { // Should receive the RPC response RPCEvent::Response(id, response @ RPCErrorResponse::Success(_)) => { - warn!(sender_log, "Sender Received"); - assert_eq!(id, 1); + if id == 1 { + warn!(sender_log, "Sender Received"); + let response = { + match response { + RPCErrorResponse::Success(r) => r, + _ => unreachable!(), + } + }; + assert_eq!(response, sender_response.clone()); - let response = { - match response { - RPCErrorResponse::Success(r) => r, - _ => unreachable!(), - } - }; - assert_eq!(response, sender_response.clone()); - - warn!(sender_log, "Sender Completed"); - return Ok(Async::Ready(true)); + warn!(sender_log, "Sender Completed"); + return Ok(Async::Ready(true)); + } } - _ => panic!("Received invalid RPC message"), + e => panic!("Received invalid RPC message {}", e), }, Async::Ready(Some(_)) => (), Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), @@ -86,20 +89,22 @@ fn test_status_rpc() { let receiver_future = future::poll_fn(move || -> Poll { loop { match receiver.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { // Should receive sent RPC request RPCEvent::Request(id, request) => { - assert_eq!(id, 1); - assert_eq!(rpc_request.clone(), request); - - // send the response - warn!(log, "Receiver Received"); - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response(id, RPCErrorResponse::Success(rpc_response.clone())), - ); + if request == rpc_request { + // send the response + warn!(log, "Receiver Received"); + receiver.swarm.send_rpc( + peer_id, + RPCEvent::Response( + id, + RPCErrorResponse::Success(rpc_response.clone()), + ), + ); + } } - _ => panic!("Received invalid RPC message"), + e => panic!("Received invalid RPC message {}", e), }, Async::Ready(Some(_)) => (), Async::Ready(None) | Async::NotReady => return Ok(Async::NotReady), @@ -135,19 +140,23 @@ fn test_blocks_by_range_chunked_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let port = common::unused_port("tcp").unwrap(); - let (mut sender, mut receiver) = common::build_node_pair(&log, port); + let (mut sender, mut receiver) = common::build_node_pair(&log); // BlocksByRange Request let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { - head_block_root: Hash256::from_low_u64_be(0), start_slot: 0, count: messages_to_send, step: 0, }); // BlocksByRange Response - let rpc_response = RPCResponse::BlocksByRange(vec![13, 13, 13]); + let spec = E::default_spec(); + let empty_block = BeaconBlock::empty(&spec); + let empty_signed = SignedBeaconBlock { + message: empty_block, + signature: Signature::empty_signature(), + }; + let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed)); let sender_request = rpc_request.clone(); let sender_log = log.clone(); @@ -159,33 +168,37 @@ fn test_blocks_by_range_chunked_rpc() { let sender_future = future::poll_fn(move || -> Poll { loop { match sender.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => { + Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { // Send a BlocksByRange request warn!(sender_log, "Sender sending RPC request"); sender .swarm .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); } - Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { // Should receive the RPC response RPCEvent::Response(id, response) => { - warn!(sender_log, "Sender received a response"); - assert_eq!(id, 1); - match response { - RPCErrorResponse::Success(res) => { - assert_eq!(res, sender_response.clone()); - *messages_received.lock().unwrap() += 1; - warn!(sender_log, "Chunk received"); + if id == 1 { + warn!(sender_log, "Sender received a response"); + match response { + RPCErrorResponse::Success(res) => { + assert_eq!(res, sender_response.clone()); + *messages_received.lock().unwrap() += 1; + warn!(sender_log, "Chunk received"); + } + RPCErrorResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ) => { + // should be exactly 10 messages before terminating + assert_eq!( + *messages_received.lock().unwrap(), + messages_to_send + ); + // end the test + return Ok(Async::Ready(true)); + } + _ => panic!("Invalid RPC received"), } - RPCErrorResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ) => { - // should be exactly 10 messages before terminating - assert_eq!(*messages_received.lock().unwrap(), messages_to_send); - // end the test - return Ok(Async::Ready(true)); - } - _ => panic!("Invalid RPC received"), } } _ => panic!("Received invalid RPC message"), @@ -200,34 +213,33 @@ fn test_blocks_by_range_chunked_rpc() { let receiver_future = future::poll_fn(move || -> Poll { loop { match receiver.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { // Should receive the sent RPC request RPCEvent::Request(id, request) => { - assert_eq!(id, 1); - assert_eq!(rpc_request.clone(), request); + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); - // send the response - warn!(log, "Receiver got request"); - - for _ in 1..=messages_to_send { + for _ in 1..=messages_to_send { + receiver.swarm.send_rpc( + peer_id.clone(), + RPCEvent::Response( + id, + RPCErrorResponse::Success(rpc_response.clone()), + ), + ); + } + // send the stream termination receiver.swarm.send_rpc( - peer_id.clone(), + peer_id, RPCEvent::Response( id, - RPCErrorResponse::Success(rpc_response.clone()), + RPCErrorResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ), ), ); } - // send the stream termination - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response( - id, - RPCErrorResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ), - ), - ); } _ => panic!("Received invalid RPC message"), }, @@ -263,19 +275,23 @@ fn test_blocks_by_range_single_empty_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let port = common::unused_port("tcp").unwrap(); - let (mut sender, mut receiver) = common::build_node_pair(&log, port); + let (mut sender, mut receiver) = common::build_node_pair(&log); // BlocksByRange Request let rpc_request = RPCRequest::BlocksByRange(BlocksByRangeRequest { - head_block_root: Hash256::from_low_u64_be(0), start_slot: 0, count: 10, step: 0, }); // BlocksByRange Response - let rpc_response = RPCResponse::BlocksByRange(vec![]); + let spec = E::default_spec(); + let empty_block = BeaconBlock::empty(&spec); + let empty_signed = SignedBeaconBlock { + message: empty_block, + signature: Signature::empty_signature(), + }; + let rpc_response = RPCResponse::BlocksByRange(Box::new(empty_signed)); let sender_request = rpc_request.clone(); let sender_log = log.clone(); @@ -287,33 +303,34 @@ fn test_blocks_by_range_single_empty_rpc() { let sender_future = future::poll_fn(move || -> Poll { loop { match sender.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => { + Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { // Send a BlocksByRange request warn!(sender_log, "Sender sending RPC request"); sender .swarm .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); } - Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { // Should receive the RPC response RPCEvent::Response(id, response) => { - warn!(sender_log, "Sender received a response"); - assert_eq!(id, 1); - match response { - RPCErrorResponse::Success(res) => { - assert_eq!(res, sender_response.clone()); - *messages_received.lock().unwrap() += 1; - warn!(sender_log, "Chunk received"); + if id == 1 { + warn!(sender_log, "Sender received a response"); + match response { + RPCErrorResponse::Success(res) => { + assert_eq!(res, sender_response.clone()); + *messages_received.lock().unwrap() += 1; + warn!(sender_log, "Chunk received"); + } + RPCErrorResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ) => { + // should be exactly 1 messages before terminating + assert_eq!(*messages_received.lock().unwrap(), 1); + // end the test + return Ok(Async::Ready(true)); + } + _ => panic!("Invalid RPC received"), } - RPCErrorResponse::StreamTermination( - ResponseTermination::BlocksByRange, - ) => { - // should be exactly 1 messages before terminating - assert_eq!(*messages_received.lock().unwrap(), 1); - // end the test - return Ok(Async::Ready(true)); - } - _ => panic!("Invalid RPC received"), } } m => panic!("Received invalid RPC message: {}", m), @@ -328,29 +345,31 @@ fn test_blocks_by_range_single_empty_rpc() { let receiver_future = future::poll_fn(move || -> Poll { loop { match receiver.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { // Should receive the sent RPC request RPCEvent::Request(id, request) => { - assert_eq!(id, 1); - assert_eq!(rpc_request.clone(), request); + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); - // send the response - warn!(log, "Receiver got request"); - - receiver.swarm.send_rpc( - peer_id.clone(), - RPCEvent::Response(id, RPCErrorResponse::Success(rpc_response.clone())), - ); - // send the stream termination - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response( - id, - RPCErrorResponse::StreamTermination( - ResponseTermination::BlocksByRange, + receiver.swarm.send_rpc( + peer_id.clone(), + RPCEvent::Response( + id, + RPCErrorResponse::Success(rpc_response.clone()), ), - ), - ); + ); + // send the stream termination + receiver.swarm.send_rpc( + peer_id, + RPCEvent::Response( + id, + RPCErrorResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ), + ), + ); + } } _ => panic!("Received invalid RPC message"), }, @@ -378,6 +397,9 @@ fn test_blocks_by_range_single_empty_rpc() { #[test] // Tests a streamed, chunked BlocksByRoot RPC Message +// The size of the reponse is a full `BeaconBlock` +// which is greater than the Snappy frame size. Hence, this test +// serves to test the snappy framing format as well. fn test_blocks_by_root_chunked_rpc() { // set up the logging. The level and enabled logging or not let log_level = Level::Trace; @@ -386,10 +408,10 @@ fn test_blocks_by_root_chunked_rpc() { let messages_to_send = 3; let log = common::build_log(log_level, enable_logging); + let spec = E::default_spec(); // get sender/receiver - let port = common::unused_port("tcp").unwrap(); - let (mut sender, mut receiver) = common::build_node_pair(&log, port); + let (mut sender, mut receiver) = common::build_node_pair(&log); // BlocksByRoot Request let rpc_request = RPCRequest::BlocksByRoot(BlocksByRootRequest { @@ -397,7 +419,12 @@ fn test_blocks_by_root_chunked_rpc() { }); // BlocksByRoot Response - let rpc_response = RPCResponse::BlocksByRoot(vec![13, 13, 13]); + let full_block = BeaconBlock::full(&spec); + let signed_full_block = SignedBeaconBlock { + message: full_block, + signature: Signature::empty_signature(), + }; + let rpc_response = RPCResponse::BlocksByRoot(Box::new(signed_full_block)); let sender_request = rpc_request.clone(); let sender_log = log.clone(); @@ -409,14 +436,14 @@ fn test_blocks_by_root_chunked_rpc() { let sender_future = future::poll_fn(move || -> Poll { loop { match sender.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => { + Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { // Send a BlocksByRoot request warn!(sender_log, "Sender sending RPC request"); sender .swarm .send_rpc(peer_id, RPCEvent::Request(1, sender_request.clone())); } - Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { // Should receive the RPC response RPCEvent::Response(id, response) => { warn!(sender_log, "Sender received a response"); @@ -450,34 +477,33 @@ fn test_blocks_by_root_chunked_rpc() { let receiver_future = future::poll_fn(move || -> Poll { loop { match receiver.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::RPC(peer_id, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(peer_id, event))) => match event { // Should receive the sent RPC request RPCEvent::Request(id, request) => { - assert_eq!(id, 1); - assert_eq!(rpc_request.clone(), request); + if request == rpc_request { + // send the response + warn!(log, "Receiver got request"); - // send the response - warn!(log, "Receiver got request"); - - for _ in 1..=messages_to_send { + for _ in 1..=messages_to_send { + receiver.swarm.send_rpc( + peer_id.clone(), + RPCEvent::Response( + id, + RPCErrorResponse::Success(rpc_response.clone()), + ), + ); + } + // send the stream termination receiver.swarm.send_rpc( - peer_id.clone(), + peer_id, RPCEvent::Response( id, - RPCErrorResponse::Success(rpc_response.clone()), + RPCErrorResponse::StreamTermination( + ResponseTermination::BlocksByRange, + ), ), ); } - // send the stream termination - receiver.swarm.send_rpc( - peer_id, - RPCEvent::Response( - id, - RPCErrorResponse::StreamTermination( - ResponseTermination::BlocksByRoot, - ), - ), - ); } _ => panic!("Received invalid RPC message"), }, @@ -513,8 +539,7 @@ fn test_goodbye_rpc() { let log = common::build_log(log_level, enable_logging); // get sender/receiver - let port = common::unused_port("tcp").unwrap(); - let (mut sender, mut receiver) = common::build_node_pair(&log, port); + let (mut sender, mut receiver) = common::build_node_pair(&log); // Goodbye Request let rpc_request = RPCRequest::Goodbye(GoodbyeReason::ClientShutdown); @@ -526,7 +551,7 @@ fn test_goodbye_rpc() { let sender_future = future::poll_fn(move || -> Poll { loop { match sender.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::PeerDialed(peer_id))) => { + Async::Ready(Some(BehaviourEvent::PeerDialed(peer_id))) => { // Send a Goodbye request warn!(sender_log, "Sender sending RPC request"); sender @@ -543,13 +568,15 @@ fn test_goodbye_rpc() { let receiver_future = future::poll_fn(move || -> Poll { loop { match receiver.poll().unwrap() { - Async::Ready(Some(Libp2pEvent::RPC(_, event))) => match event { + Async::Ready(Some(BehaviourEvent::RPC(_, event))) => match event { // Should receive the sent RPC request RPCEvent::Request(id, request) => { - assert_eq!(id, 0); - assert_eq!(rpc_request.clone(), request); - // receives the goodbye. Nothing left to do - return Ok(Async::Ready(true)); + if request == rpc_request { + assert_eq!(id, 0); + assert_eq!(rpc_request.clone(), request); + // receives the goodbye. Nothing left to do + return Ok(Async::Ready(true)); + } } _ => panic!("Received invalid RPC message"), }, diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 7ab56a689..a5723ee6e 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "genesis" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index b299c4a27..88b1b2560 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "network" -version = "0.1.0" +version = "0.2.0" authors = ["Age Manning "] edition = "2018" @@ -13,7 +13,10 @@ tempdir = "0.3" beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } eth2-libp2p = { path = "../eth2-libp2p" } +hashmap_delay = { path = "../../eth2/utils/hashmap_delay" } +rest_types = { path = "../../eth2/utils/rest_types" } types = { path = "../../eth2/types" } +slot_clock = { path = "../../eth2/utils/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.3" eth2_ssz = "0.1.2" diff --git a/beacon_node/network/src/attestation_service/mod.rs b/beacon_node/network/src/attestation_service/mod.rs new file mode 100644 index 000000000..e4c319512 --- /dev/null +++ b/beacon_node/network/src/attestation_service/mod.rs @@ -0,0 +1,638 @@ +//! This service keeps track of which shard subnet the beacon node should be subscribed to at any +//! given time. It schedules subscriptions to shard subnets, requests peer discoveries and +//! determines whether attestations should be aggregated and/or passed to the beacon node. + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_libp2p::{types::GossipKind, MessageId, NetworkGlobals, PeerId}; +use futures::prelude::*; +use hashmap_delay::HashSetDelay; +use rand::seq::SliceRandom; +use rest_types::ValidatorSubscription; +use slog::{crit, debug, error, o, warn}; +use slot_clock::SlotClock; +use std::collections::{HashMap, VecDeque}; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use types::{Attestation, EthSpec, Slot, SubnetId}; + +/// The minimum number of slots ahead that we attempt to discover peers for a subscription. If the +/// slot is less than this number, skip the peer discovery process. +const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 1; +/// The number of slots ahead that we attempt to discover peers for a subscription. If the slot to +/// attest to is greater than this, we queue a discovery request for this many slots prior to +/// subscribing. +const TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 6; +/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from the random +/// gossip topics that we subscribed to due to the validator connection. +const LAST_SEEN_VALIDATOR_TIMEOUT: u32 = 150; // 30 mins at a 12s slot time +/// The fraction of a slot that we subscribe to a subnet before the required slot. +/// +/// Note: The time is calculated as `time = milliseconds_per_slot / ADVANCE_SUBSCRIPTION_TIME`. +const ADVANCE_SUBSCRIBE_TIME: u32 = 3; +/// The default number of slots before items in hash delay sets used by this class should expire. +const DEFAULT_EXPIRATION_TIMEOUT: u32 = 3; // 36s at 12s slot time + +#[derive(Debug, PartialEq)] +pub enum AttServiceMessage { + /// Subscribe to the specified subnet id. + Subscribe(SubnetId), + /// Unsubscribe to the specified subnet id. + Unsubscribe(SubnetId), + /// Add the `SubnetId` to the ENR bitfield. + EnrAdd(SubnetId), + /// Remove the `SubnetId` from the ENR bitfield. + EnrRemove(SubnetId), + /// Discover peers for a particular subnet. + DiscoverPeers(SubnetId), +} + +/// A particular subnet at a given slot. +#[derive(PartialEq, Eq, Hash, Clone)] +struct ExactSubnet { + /// The `SubnetId` associated with this subnet. + pub subnet_id: SubnetId, + /// The `Slot` associated with this subnet. + pub slot: Slot, +} + +pub struct AttestationService { + /// Queued events to return to the driving service. + events: VecDeque, + + /// A collection of public network variables. + network_globals: Arc>, + + /// A reference to the beacon chain to process received attestations. + beacon_chain: Arc>, + + /// The collection of currently subscribed random subnets mapped to their expiry deadline. + random_subnets: HashSetDelay, + + /// A collection of timeouts for when to start searching for peers for a particular shard. + discover_peers: HashSetDelay, + + /// A collection of timeouts for when to subscribe to a shard subnet. + subscriptions: HashSetDelay, + + /// A collection of timeouts for when to unsubscribe from a shard subnet. + unsubscriptions: HashSetDelay, + + /// A mapping indicating the number of known aggregate validators for a given `ExactSubnet`. + _aggregate_validators_on_subnet: HashMap, + + /// A collection of seen validators. These dictate how many random subnets we should be + /// subscribed to. As these time out, we unsubscribe for the required random subnets and update + /// our ENR. + /// This is a set of validator indices. + known_validators: HashSetDelay, + + /// The logger for the attestation service. + log: slog::Logger, +} + +impl AttestationService { + /* Public functions */ + + pub fn new( + beacon_chain: Arc>, + network_globals: Arc>, + log: &slog::Logger, + ) -> Self { + let log = log.new(o!("service" => "attestation_service")); + + // calculate the random subnet duration from the spec constants + let spec = &beacon_chain.spec; + let slot_duration = beacon_chain.slot_clock.slot_duration(); + let random_subnet_duration_millis = spec + .epochs_per_random_subnet_subscription + .saturating_mul(T::EthSpec::slots_per_epoch()) + .saturating_mul(slot_duration.as_millis() as u64); + + // Panics on overflow. Ensure LAST_SEEN_VALIDATOR_TIMEOUT is not too large. + let last_seen_val_timeout = slot_duration + .checked_mul(LAST_SEEN_VALIDATOR_TIMEOUT) + .expect("LAST_SEEN_VALIDATOR_TIMEOUT must not be ridiculously large"); + let default_timeout = slot_duration + .checked_mul(DEFAULT_EXPIRATION_TIMEOUT) + .expect("DEFAULT_EXPIRATION_TIMEOUT must not be ridiculoustly large"); + + AttestationService { + events: VecDeque::with_capacity(10), + network_globals, + beacon_chain, + random_subnets: HashSetDelay::new(Duration::from_millis(random_subnet_duration_millis)), + discover_peers: HashSetDelay::new(default_timeout), + subscriptions: HashSetDelay::new(default_timeout), + unsubscriptions: HashSetDelay::new(default_timeout), + _aggregate_validators_on_subnet: HashMap::new(), + known_validators: HashSetDelay::new(last_seen_val_timeout), + log, + } + } + + /// Processes a list of validator subscriptions. + /// + /// This will: + /// - Register new validators as being known. + /// - Subscribe to the required number of random subnets. + /// - Update the local ENR for new random subnets due to seeing new validators. + /// - Search for peers for required subnets. + /// - Request subscriptions for subnets on specific slots when required. + /// - Build the timeouts for each of these events. + /// + /// This returns a result simply for the ergonomics of using ?. The result can be + /// safely dropped. + pub fn validator_subscriptions( + &mut self, + subscriptions: Vec, + ) -> Result<(), ()> { + for subscription in subscriptions { + //NOTE: We assume all subscriptions have been verified before reaching this service + + // Registers the validator with the attestation service. + // This will subscribe to long-lived random subnets if required. + self.add_known_validator(subscription.validator_index); + + let subnet_id = SubnetId::new( + subscription.attestation_committee_index + % self.beacon_chain.spec.attestation_subnet_count, + ); + + let exact_subnet = ExactSubnet { + subnet_id, + slot: subscription.slot, + }; + // determine if we should run a discovery lookup request and request it if required + if let Err(e) = self.discover_peers_request(exact_subnet.clone()) { + warn!(self.log, "Discovery lookup request error"; "error" => e); + } + + // determine if the validator is an aggregator. If so, we subscribe to the subnet and + // if successful add the validator to a mapping of known aggregators for that exact + // subnet. + // NOTE: There is a chance that a fork occurs between now and when the validator needs + // to aggregate attestations. If this happens, the signature will no longer be valid + // and it could be likely the validator no longer needs to aggregate. More + // sophisticated logic should be added using known future forks. + // TODO: Implement + + // set the subscription timer to subscribe to the next subnet if required + if let Err(e) = self.subscribe_to_subnet(exact_subnet) { + warn!(self.log, "Subscription to subnet error"; "error" => e); + return Err(()); + } + } + Ok(()) + } + + /// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip + /// verification, re-propagates and returns false. + pub fn should_process_attestation( + &mut self, + _message_id: &MessageId, + _peer_id: &PeerId, + _subnet: &SubnetId, + _attestation: &Attestation, + ) -> bool { + // TODO: Correctly handle validation aggregator checks + true + } + + /* Internal private functions */ + + /// Checks if there are currently queued discovery requests and the time required to make the + /// request. + /// + /// If there is sufficient time and no other request exists, queues a peer discovery request + /// for the required subnet. + fn discover_peers_request(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or_else(|| "Could not get the current slot")?; + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + + // if there is enough time to perform a discovery lookup + if exact_subnet.slot >= current_slot.saturating_add(MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD) { + // check if a discovery request already exists + if self.discover_peers.get(&exact_subnet).is_some() { + // already a request queued, end + return Ok(()); + } + + // check current event log to see if there is a discovery event queued + if self + .events + .iter() + .find(|event| event == &&AttServiceMessage::DiscoverPeers(exact_subnet.subnet_id)) + .is_some() + { + // already queued a discovery event + return Ok(()); + } + + // if the slot is more than epoch away, add an event to start looking for peers + if exact_subnet.slot + < current_slot.saturating_add(TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD) + { + // then instantly add a discovery request + self.events + .push_back(AttServiceMessage::DiscoverPeers(exact_subnet.subnet_id)); + } else { + // Queue the discovery event to be executed for + // TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD + + let duration_to_discover = { + let duration_to_next_slot = self + .beacon_chain + .slot_clock + .duration_to_next_slot() + .ok_or_else(|| "Unable to determine duration to next slot")?; + // The -1 is done here to exclude the current slot duration, as we will use + // `duration_to_next_slot`. + let slots_until_discover = exact_subnet + .slot + .saturating_sub(current_slot) + .saturating_sub(1u64) + .saturating_sub(TARGET_PEER_DISCOVERY_SLOT_LOOK_AHEAD); + + duration_to_next_slot + slot_duration * (slots_until_discover.as_u64() as u32) + }; + + self.discover_peers + .insert_at(exact_subnet, duration_to_discover); + } + } else { + // TODO: Send the time frame needed to have a peer connected, so that we can + // maintain peers for a least this duration. + // We may want to check the global PeerInfo to see estimated timeouts for each + // peer before they can be removed. + return Err("Not enough time for a discovery search"); + } + Ok(()) + } + + /// Checks the current random subnets and subscriptions to determine if a new subscription for this + /// subnet is required for the given slot. + /// + /// If required, adds a subscription event and an associated unsubscription event. + fn subscribe_to_subnet(&mut self, exact_subnet: ExactSubnet) -> Result<(), &'static str> { + // initialise timing variables + let current_slot = self + .beacon_chain + .slot_clock + .now() + .ok_or_else(|| "Could not get the current slot")?; + + // Calculate the duration to the subscription event and the duration to the end event. + // There are two main cases. Attempting to subscribe to the current slot and all others. + let (duration_to_subscribe, expected_end_subscription_duration) = { + let duration_to_next_slot = self + .beacon_chain + .slot_clock + .duration_to_next_slot() + .ok_or_else(|| "Unable to determine duration to next slot")?; + + if current_slot >= exact_subnet.slot { + (Duration::from_secs(0), duration_to_next_slot) + } else { + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + let advance_subscription_duration = slot_duration + .checked_div(ADVANCE_SUBSCRIBE_TIME) + .expect("ADVANCE_SUBSCRIPTION_TIME cannot be too large"); + + // calculate the time to subscribe to the subnet + let duration_to_subscribe = self + .beacon_chain + .slot_clock + .duration_to_slot(exact_subnet.slot) + .ok_or_else(|| "Unable to determine duration to subscription slot")? + .checked_sub(advance_subscription_duration) + .unwrap_or_else(|| Duration::from_secs(0)); + + // the duration until we no longer need this subscription. We assume a single slot is + // sufficient. + let expected_end_subscription_duration = duration_to_subscribe + + slot_duration + + std::cmp::min(advance_subscription_duration, duration_to_next_slot); + + (duration_to_subscribe, expected_end_subscription_duration) + } + }; + + // Checks on current subscriptions + // Note: We may be connected to a long-lived random subnet. In this case we still add the + // subscription timeout and check this case when the timeout fires. This is because a + // long-lived random subnet can be unsubscribed at any time when a validator becomes + // in-active. This case is checked on the subscription event (see `handle_subscriptions`). + + // Return if we already have a subscription for this subnet_id and slot + if self.subscriptions.contains(&exact_subnet) { + return Ok(()); + } + + // We are not currently subscribed and have no waiting subscription, create one + self.subscriptions + .insert_at(exact_subnet.clone(), duration_to_subscribe); + + // if there is an unsubscription event for the slot prior, we remove it to prevent + // unsubscriptions immediately after the subscription. We also want to minimize + // subscription churn and maintain a consecutive subnet subscriptions. + let to_remove_subnet = ExactSubnet { + subnet_id: exact_subnet.subnet_id, + slot: exact_subnet.slot.saturating_sub(1u64), + }; + self.unsubscriptions.remove(&to_remove_subnet); + // add an unsubscription event to remove ourselves from the subnet once completed + self.unsubscriptions + .insert_at(exact_subnet, expected_end_subscription_duration); + Ok(()) + } + + /// Updates the `known_validators` mapping and subscribes to a set of random subnets if required. + /// + /// This also updates the ENR to indicate our long-lived subscription to the subnet + fn add_known_validator(&mut self, validator_index: u64) { + if self.known_validators.get(&validator_index).is_none() { + // New validator has subscribed + // Subscribe to random topics and update the ENR if needed. + + let spec = &self.beacon_chain.spec; + + if self.random_subnets.len() < spec.attestation_subnet_count as usize { + // Still room for subscriptions + self.subscribe_to_random_subnets( + self.beacon_chain.spec.random_subnets_per_validator as usize, + ); + } + } + // add the new validator or update the current timeout for a known validator + self.known_validators.insert(validator_index); + } + + /// Subscribe to long-lived random subnets and update the local ENR bitfield. + fn subscribe_to_random_subnets(&mut self, no_subnets_to_subscribe: usize) { + let subnet_count = self.beacon_chain.spec.attestation_subnet_count; + + // Build a list of random subnets that we are not currently subscribed to. + let available_subnets = (0..subnet_count) + .map(SubnetId::new) + .filter(|subnet_id| self.random_subnets.get(subnet_id).is_none()) + .collect::>(); + + let to_subscribe_subnets = { + if available_subnets.len() < no_subnets_to_subscribe { + debug!(self.log, "Reached maximum random subnet subscriptions"); + available_subnets + } else { + // select a random sample of available subnets + available_subnets + .choose_multiple(&mut rand::thread_rng(), no_subnets_to_subscribe) + .cloned() + .collect::>() + } + }; + + for subnet_id in to_subscribe_subnets { + // remove this subnet from any immediate subscription/un-subscription events + self.subscriptions + .retain(|exact_subnet| exact_subnet.subnet_id != subnet_id); + self.unsubscriptions + .retain(|exact_subnet| exact_subnet.subnet_id != subnet_id); + + // insert a new random subnet + self.random_subnets.insert(subnet_id); + + // if we are not already subscribed, then subscribe + let topic_kind = &GossipKind::CommitteeIndex(subnet_id); + + if let None = self + .network_globals + .gossipsub_subscriptions + .read() + .iter() + .find(|topic| topic.kind() == topic_kind) + { + // not already subscribed to the topic + + // send a discovery request and a subscription + self.events + .push_back(AttServiceMessage::DiscoverPeers(subnet_id)); + self.events + .push_back(AttServiceMessage::Subscribe(subnet_id)); + } + // add the subnet to the ENR bitfield + self.events.push_back(AttServiceMessage::EnrAdd(subnet_id)); + } + } + + /* A collection of functions that handle the various timeouts */ + + /// Request a discovery query to find peers for a particular subnet. + fn handle_discover_peers(&mut self, exact_subnet: ExactSubnet) { + debug!(self.log, "Searching for peers for subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot); + self.events + .push_back(AttServiceMessage::DiscoverPeers(exact_subnet.subnet_id)); + } + + /// A queued subscription is ready. + /// + /// We add subscriptions events even if we are already subscribed to a random subnet (as these + /// can be unsubscribed at any time by inactive validators). If we are + /// still subscribed at the time the event fires, we don't re-subscribe. + fn handle_subscriptions(&mut self, exact_subnet: ExactSubnet) { + // Check if the subnet currently exists as a long-lasting random subnet + if let Some(expiry) = self.random_subnets.get(&exact_subnet.subnet_id) { + // we are subscribed via a random subnet, if this is to expire during the time we need + // to be subscribed, just extend the expiry + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + let advance_subscription_duration = slot_duration + .checked_div(ADVANCE_SUBSCRIBE_TIME) + .expect("ADVANCE_SUBSCRIPTION_TIME cannot be too large"); + // we require the subnet subscription for at least a slot on top of the initial + // subscription time + let expected_end_subscription_duration = slot_duration + advance_subscription_duration; + + if expiry < &(Instant::now() + expected_end_subscription_duration) { + self.random_subnets + .update_timeout(&exact_subnet.subnet_id, expected_end_subscription_duration); + } + } else { + // we are also not un-subscribing from a subnet if the next slot requires us to be + // subscribed. Therefore there could be the case that we are already still subscribed + // to the required subnet. In which case we do not issue another subscription request. + let topic_kind = &GossipKind::CommitteeIndex(exact_subnet.subnet_id); + if self + .network_globals + .gossipsub_subscriptions + .read() + .iter() + .find(|topic| topic.kind() == topic_kind) + .is_none() + { + // we are not already subscribed + debug!(self.log, "Subscribing to subnet"; "subnet" => *exact_subnet.subnet_id, "target_slot" => exact_subnet.slot.as_u64()); + self.events + .push_back(AttServiceMessage::Subscribe(exact_subnet.subnet_id)); + } + } + } + + /// A queued unsubscription is ready. + /// + /// Unsubscription events are added, even if we are subscribed to long-lived random subnets. If + /// a random subnet is present, we do not unsubscribe from it. + fn handle_unsubscriptions(&mut self, exact_subnet: ExactSubnet) { + // Check if the subnet currently exists as a long-lasting random subnet + if self.random_subnets.contains(&exact_subnet.subnet_id) { + return; + } + + debug!(self.log, "Unsubscribing from subnet"; "subnet" => *exact_subnet.subnet_id, "processed_slot" => exact_subnet.slot.as_u64()); + + // various logic checks + if self.subscriptions.contains(&exact_subnet) { + crit!(self.log, "Unsubscribing from a subnet in subscriptions"); + } + self.events + .push_back(AttServiceMessage::Unsubscribe(exact_subnet.subnet_id)); + } + + /// A random subnet has expired. + /// + /// This function selects a new subnet to join, or extends the expiry if there are no more + /// available subnets to choose from. + fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) { + let subnet_count = self.beacon_chain.spec.attestation_subnet_count; + if self.random_subnets.len() == (subnet_count - 1) as usize { + // We are at capacity, simply increase the timeout of the current subnet + self.random_subnets.insert(subnet_id); + return; + } + + // we are not at capacity, unsubscribe from the current subnet, remove the ENR bitfield bit and choose a new random one + // from the available subnets + // Note: This should not occur during a required subnet as subscriptions update the timeout + // to last as long as they are needed. + + debug!(self.log, "Unsubscribing from random subnet"; "subnet_id" => *subnet_id); + self.events + .push_back(AttServiceMessage::Unsubscribe(subnet_id)); + self.events + .push_back(AttServiceMessage::EnrRemove(subnet_id)); + self.subscribe_to_random_subnets(1); + } + + /// A known validator has not sent a subscription in a while. They are considered offline and the + /// beacon node no longer needs to be subscribed to the allocated random subnets. + /// + /// We don't keep track of a specific validator to random subnet, rather the ratio of active + /// validators to random subnets. So when a validator goes offline, we can simply remove the + /// allocated amount of random subnets. + fn handle_known_validator_expiry(&mut self) -> Result<(), ()> { + let spec = &self.beacon_chain.spec; + let subnet_count = spec.attestation_subnet_count; + let random_subnets_per_validator = spec.random_subnets_per_validator; + if self.known_validators.len() as u64 * random_subnets_per_validator >= subnet_count { + // have too many validators, ignore + return Ok(()); + } + + let subscribed_subnets = self.random_subnets.keys_vec(); + let to_remove_subnets = subscribed_subnets.choose_multiple( + &mut rand::thread_rng(), + random_subnets_per_validator as usize, + ); + let current_slot = self.beacon_chain.slot_clock.now().ok_or_else(|| { + warn!(self.log, "Could not get the current slot"); + })?; + + for subnet_id in to_remove_subnets { + // If a subscription is queued for two slots in the future, it's associated unsubscription + // will unsubscribe from the expired subnet. + // If there is no subscription for this subnet,slot it is safe to add one, without + // unsubscribing early from a required subnet + let subnet = ExactSubnet { + subnet_id: **subnet_id, + slot: current_slot + 2, + }; + if self.subscriptions.get(&subnet).is_none() { + // set an unsubscribe event + let duration_to_next_slot = self + .beacon_chain + .slot_clock + .duration_to_next_slot() + .ok_or_else(|| { + warn!(self.log, "Unable to determine duration to next slot"); + })?; + let slot_duration = self.beacon_chain.slot_clock.slot_duration(); + // Set the unsubscription timeout + let unsubscription_duration = duration_to_next_slot + slot_duration * 2; + self.unsubscriptions + .insert_at(subnet, unsubscription_duration); + } + + // as the long lasting subnet subscription is being removed, remove the subnet_id from + // the ENR bitfield + self.events + .push_back(AttServiceMessage::EnrRemove(**subnet_id)); + } + Ok(()) + } +} + +impl Stream for AttestationService { + type Item = AttServiceMessage; + type Error = (); + + fn poll(&mut self) -> Poll, Self::Error> { + // process any peer discovery events + while let Async::Ready(Some(exact_subnet)) = + self.discover_peers.poll().map_err(|e| { + error!(self.log, "Failed to check for peer discovery requests"; "error"=> format!("{}", e)); + })? + { + self.handle_discover_peers(exact_subnet); + } + + // process any subscription events + while let Async::Ready(Some(exact_subnet)) = self.subscriptions.poll().map_err(|e| { + error!(self.log, "Failed to check for subnet subscription times"; "error"=> format!("{}", e)); + })? + { + self.handle_subscriptions(exact_subnet); + } + + // process any un-subscription events + while let Async::Ready(Some(exact_subnet)) = self.unsubscriptions.poll().map_err(|e| { + error!(self.log, "Failed to check for subnet unsubscription times"; "error"=> format!("{}", e)); + })? + { + self.handle_unsubscriptions(exact_subnet); + } + + // process any random subnet expiries + while let Async::Ready(Some(subnet)) = self.random_subnets.poll().map_err(|e| { + error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e)); + })? + { + self.handle_random_subnet_expiry(subnet); + } + + // process any known validator expiries + while let Async::Ready(Some(_validator_index)) = self.known_validators.poll().map_err(|e| { + error!(self.log, "Failed to check for random subnet cycles"; "error"=> format!("{}", e)); + })? + { + let _ = self.handle_known_validator_expiry(); + } + + // process any generated events + if let Some(event) = self.events.pop_front() { + return Ok(Async::Ready(Some(event))); + } + + Ok(Async::NotReady) + } +} diff --git a/beacon_node/network/src/error.rs b/beacon_node/network/src/error.rs index fc061ff44..e30ce4f43 100644 --- a/beacon_node/network/src/error.rs +++ b/beacon_node/network/src/error.rs @@ -1,6 +1,4 @@ // generates error types -use eth2_libp2p; - use error_chain::error_chain; error_chain! { diff --git a/beacon_node/network/src/lib.rs b/beacon_node/network/src/lib.rs index fddca767b..040ade2b2 100644 --- a/beacon_node/network/src/lib.rs +++ b/beacon_node/network/src/lib.rs @@ -1,12 +1,11 @@ /// This crate provides the network server for Lighthouse. pub mod error; -pub mod message_handler; -pub mod message_processor; -pub mod persisted_dht; pub mod service; -pub mod sync; + +mod attestation_service; +mod persisted_dht; +mod router; +mod sync; pub use eth2_libp2p::NetworkConfig; -pub use message_processor::MessageProcessor; -pub use service::NetworkMessage; -pub use service::Service; +pub use service::{NetworkMessage, NetworkService}; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs deleted file mode 100644 index 97212a55e..000000000 --- a/beacon_node/network/src/message_handler.rs +++ /dev/null @@ -1,367 +0,0 @@ -#![allow(clippy::unit_arg)] -use crate::error; -use crate::service::NetworkMessage; -use crate::MessageProcessor; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::{ - behaviour::PubsubMessage, - rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId, ResponseTermination}, - MessageId, PeerId, RPCEvent, -}; -use futures::future::Future; -use futures::stream::Stream; -use slog::{debug, o, trace, warn}; -use ssz::{Decode, DecodeError}; -use std::sync::Arc; -use tokio::sync::mpsc; -use types::{Attestation, AttesterSlashing, ProposerSlashing, SignedBeaconBlock, VoluntaryExit}; - -/// Handles messages received from the network and client and organises syncing. This -/// functionality of this struct is to validate an decode messages from the network before -/// passing them to the internal message processor. The message processor spawns a syncing thread -/// which manages which blocks need to be requested and processed. -pub struct MessageHandler { - /// A channel to the network service to allow for gossip propagation. - network_send: mpsc::UnboundedSender, - /// Processes validated and decoded messages from the network. Has direct access to the - /// sync manager. - message_processor: MessageProcessor, - /// The `MessageHandler` logger. - log: slog::Logger, -} - -/// Types of messages the handler can receive. -#[derive(Debug)] -pub enum HandlerMessage { - /// We have initiated a connection to a new peer. - PeerDialed(PeerId), - /// Peer has disconnected, - PeerDisconnected(PeerId), - /// An RPC response/request has been received. - RPC(PeerId, RPCEvent), - /// A gossip message has been received. The fields are: message id, the peer that sent us this - /// message and the message itself. - PubsubMessage(MessageId, PeerId, PubsubMessage), -} - -impl MessageHandler { - /// Initializes and runs the MessageHandler. - pub fn spawn( - beacon_chain: Arc>, - network_send: mpsc::UnboundedSender, - executor: &tokio::runtime::TaskExecutor, - log: slog::Logger, - ) -> error::Result> { - let message_handler_log = log.new(o!("service"=> "msg_handler")); - trace!(message_handler_log, "Service starting"); - - let (handler_send, handler_recv) = mpsc::unbounded_channel(); - - // Initialise a message instance, which itself spawns the syncing thread. - let message_processor = - MessageProcessor::new(executor, beacon_chain, network_send.clone(), &log); - - // generate the Message handler - let mut handler = MessageHandler { - network_send, - message_processor, - log: message_handler_log, - }; - - // spawn handler task and move the message handler instance into the spawned thread - executor.spawn( - handler_recv - .for_each(move |msg| Ok(handler.handle_message(msg))) - .map_err(move |_| { - debug!(log, "Network message handler terminated."); - }), - ); - - Ok(handler_send) - } - - /// Handle all messages incoming from the network service. - fn handle_message(&mut self, message: HandlerMessage) { - match message { - // we have initiated a connection to a peer - HandlerMessage::PeerDialed(peer_id) => { - self.message_processor.on_connect(peer_id); - } - // A peer has disconnected - HandlerMessage::PeerDisconnected(peer_id) => { - self.message_processor.on_disconnect(peer_id); - } - // An RPC message request/response has been received - HandlerMessage::RPC(peer_id, rpc_event) => { - self.handle_rpc_message(peer_id, rpc_event); - } - // An RPC message request/response has been received - HandlerMessage::PubsubMessage(id, peer_id, gossip) => { - self.handle_gossip(id, peer_id, gossip); - } - } - } - - /* RPC - Related functionality */ - - /// Handle RPC messages - fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { - match rpc_message { - RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req), - RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp), - RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error), - } - } - - /// A new RPC request has been received from the network. - fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { - match request { - RPCRequest::Status(status_message) => { - self.message_processor - .on_status_request(peer_id, request_id, status_message) - } - RPCRequest::Goodbye(goodbye_reason) => { - debug!( - self.log, "PeerGoodbye"; - "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", goodbye_reason), - ); - self.message_processor.on_disconnect(peer_id); - } - RPCRequest::BlocksByRange(request) => self - .message_processor - .on_blocks_by_range_request(peer_id, request_id, request), - RPCRequest::BlocksByRoot(request) => self - .message_processor - .on_blocks_by_root_request(peer_id, request_id, request), - } - } - - /// An RPC response has been received from the network. - // we match on id and ignore responses past the timeout. - fn handle_rpc_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - error_response: RPCErrorResponse, - ) { - // an error could have occurred. - match error_response { - RPCErrorResponse::InvalidRequest(error) => { - warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()); - self.handle_rpc_error(peer_id, request_id, RPCError::RPCErrorResponse); - } - RPCErrorResponse::ServerError(error) => { - warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()); - self.handle_rpc_error(peer_id, request_id, RPCError::RPCErrorResponse); - } - RPCErrorResponse::Unknown(error) => { - warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()); - self.handle_rpc_error(peer_id, request_id, RPCError::RPCErrorResponse); - } - RPCErrorResponse::Success(response) => { - match response { - RPCResponse::Status(status_message) => { - self.message_processor - .on_status_response(peer_id, status_message); - } - RPCResponse::BlocksByRange(response) => { - match self.decode_beacon_block(response) { - Ok(beacon_block) => { - self.message_processor.on_blocks_by_range_response( - peer_id, - request_id, - Some(beacon_block), - ); - } - Err(e) => { - // TODO: Down-vote Peer - warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); - } - } - } - RPCResponse::BlocksByRoot(response) => { - match self.decode_beacon_block(response) { - Ok(beacon_block) => { - self.message_processor.on_blocks_by_root_response( - peer_id, - request_id, - Some(beacon_block), - ); - } - Err(e) => { - // TODO: Down-vote Peer - warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); - } - } - } - } - } - RPCErrorResponse::StreamTermination(response_type) => { - // have received a stream termination, notify the processing functions - match response_type { - ResponseTermination::BlocksByRange => { - self.message_processor - .on_blocks_by_range_response(peer_id, request_id, None); - } - ResponseTermination::BlocksByRoot => { - self.message_processor - .on_blocks_by_root_response(peer_id, request_id, None); - } - } - } - } - } - - /// Handle various RPC errors - fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "request_id" => format!("{}", request_id), "Error" => format!("{:?}", error)); - self.message_processor.on_rpc_error(peer_id, request_id); - } - - /// Handle RPC messages - fn handle_gossip(&mut self, id: MessageId, peer_id: PeerId, gossip_message: PubsubMessage) { - match gossip_message { - PubsubMessage::Block(message) => match self.decode_gossip_block(message) { - Ok(block) => { - let should_forward_on = self - .message_processor - .on_block_gossip(peer_id.clone(), block); - // TODO: Apply more sophisticated validation and decoding logic - if should_forward_on { - self.propagate_message(id, peer_id); - } - } - Err(e) => { - debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } - }, - PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Ok(attestation) => { - // TODO: Apply more sophisticated validation and decoding logic - self.propagate_message(id, peer_id.clone()); - self.message_processor - .on_attestation_gossip(peer_id, attestation); - } - Err(e) => { - debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } - }, - PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) { - Ok(_exit) => { - // TODO: Apply more sophisticated validation and decoding logic - self.propagate_message(id, peer_id.clone()); - // TODO: Handle exits - debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); - } - Err(e) => { - debug!(self.log, "Invalid gossiped exit"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } - }, - PubsubMessage::ProposerSlashing(message) => { - match self.decode_gossip_proposer_slashing(message) { - Ok(_slashing) => { - // TODO: Apply more sophisticated validation and decoding logic - self.propagate_message(id, peer_id.clone()); - // TODO: Handle proposer slashings - debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); - } - Err(e) => { - debug!(self.log, "Invalid gossiped proposer slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } - } - } - PubsubMessage::AttesterSlashing(message) => { - match self.decode_gossip_attestation_slashing(message) { - Ok(_slashing) => { - // TODO: Apply more sophisticated validation and decoding logic - self.propagate_message(id, peer_id.clone()); - // TODO: Handle attester slashings - debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); - } - Err(e) => { - debug!(self.log, "Invalid gossiped attester slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } - } - } - PubsubMessage::Unknown(message) => { - // Received a message from an unknown topic. Ignore for now - debug!(self.log, "Unknown Gossip Message"; "peer_id" => format!("{}", peer_id), "Message" => format!("{:?}", message)); - } - } - } - - /// Informs the network service that the message should be forwarded to other peers. - fn propagate_message(&mut self, message_id: MessageId, propagation_source: PeerId) { - self.network_send - .try_send(NetworkMessage::Propagate { - propagation_source, - message_id, - }) - .unwrap_or_else(|_| { - warn!( - self.log, - "Could not send propagation request to the network service" - ) - }); - } - - /* Decoding of gossipsub objects from the network. - * - * The decoding is done in the message handler as it has access to to a `BeaconChain` and can - * therefore apply more efficient logic in decoding and verification. - * - * TODO: Apply efficient decoding/verification of these objects - */ - - /* Gossipsub Domain Decoding */ - // Note: These are not generics as type-specific verification will need to be applied. - fn decode_gossip_block( - &self, - beacon_block: Vec, - ) -> Result, DecodeError> { - //TODO: Apply verification before decoding. - SignedBeaconBlock::from_ssz_bytes(&beacon_block) - } - - fn decode_gossip_attestation( - &self, - beacon_block: Vec, - ) -> Result, DecodeError> { - //TODO: Apply verification before decoding. - Attestation::from_ssz_bytes(&beacon_block) - } - - fn decode_gossip_exit(&self, voluntary_exit: Vec) -> Result { - //TODO: Apply verification before decoding. - VoluntaryExit::from_ssz_bytes(&voluntary_exit) - } - - fn decode_gossip_proposer_slashing( - &self, - proposer_slashing: Vec, - ) -> Result { - //TODO: Apply verification before decoding. - ProposerSlashing::from_ssz_bytes(&proposer_slashing) - } - - fn decode_gossip_attestation_slashing( - &self, - attester_slashing: Vec, - ) -> Result, DecodeError> { - //TODO: Apply verification before decoding. - AttesterSlashing::from_ssz_bytes(&attester_slashing) - } - - /* Req/Resp Domain Decoding */ - - /// Verifies and decodes an ssz-encoded `SignedBeaconBlock`. If `None` is passed, this represents a - /// stream termination. - fn decode_beacon_block( - &self, - beacon_block: Vec, - ) -> Result, DecodeError> { - //TODO: Implement faster block verification before decoding entirely - SignedBeaconBlock::from_ssz_bytes(&beacon_block) - } -} diff --git a/beacon_node/network/src/router/mod.rs b/beacon_node/network/src/router/mod.rs new file mode 100644 index 000000000..466c70363 --- /dev/null +++ b/beacon_node/network/src/router/mod.rs @@ -0,0 +1,314 @@ +//! This module handles incoming network messages. +//! +//! It routes the messages to appropriate services, such as the Sync +//! and processes those that are +#![allow(clippy::unit_arg)] + +pub mod processor; + +use crate::error; +use crate::service::NetworkMessage; +use beacon_chain::{AttestationType, BeaconChain, BeaconChainTypes, BlockError}; +use eth2_libp2p::{ + rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId, ResponseTermination}, + MessageId, NetworkGlobals, PeerId, PubsubMessage, RPCEvent, +}; +use futures::future::Future; +use futures::stream::Stream; +use processor::Processor; +use slog::{debug, o, trace, warn}; +use std::sync::Arc; +use tokio::sync::mpsc; +use types::EthSpec; + +/// Handles messages received from the network and client and organises syncing. This +/// functionality of this struct is to validate an decode messages from the network before +/// passing them to the internal message processor. The message processor spawns a syncing thread +/// which manages which blocks need to be requested and processed. +pub struct Router { + /// A channel to the network service to allow for gossip propagation. + network_send: mpsc::UnboundedSender>, + /// Processes validated and decoded messages from the network. Has direct access to the + /// sync manager. + processor: Processor, + /// The `Router` logger. + log: slog::Logger, +} + +/// Types of messages the handler can receive. +#[derive(Debug)] +pub enum RouterMessage { + /// We have initiated a connection to a new peer. + PeerDialed(PeerId), + /// Peer has disconnected, + PeerDisconnected(PeerId), + /// An RPC response/request has been received. + RPC(PeerId, RPCEvent), + /// A gossip message has been received. The fields are: message id, the peer that sent us this + /// message and the message itself. + PubsubMessage(MessageId, PeerId, PubsubMessage), + /// The peer manager has requested we re-status a peer. + StatusPeer(PeerId), +} + +impl Router { + /// Initializes and runs the Router. + pub fn spawn( + beacon_chain: Arc>, + network_globals: Arc>, + network_send: mpsc::UnboundedSender>, + executor: &tokio::runtime::TaskExecutor, + log: slog::Logger, + ) -> error::Result>> { + let message_handler_log = log.new(o!("service"=> "router")); + trace!(message_handler_log, "Service starting"); + + let (handler_send, handler_recv) = mpsc::unbounded_channel(); + + // Initialise a message instance, which itself spawns the syncing thread. + let processor = Processor::new( + executor, + beacon_chain, + network_globals, + network_send.clone(), + &log, + ); + + // generate the Message handler + let mut handler = Router { + network_send, + processor, + log: message_handler_log, + }; + + // spawn handler task and move the message handler instance into the spawned thread + executor.spawn( + handler_recv + .for_each(move |msg| Ok(handler.handle_message(msg))) + .map_err(move |_| { + debug!(log, "Network message handler terminated."); + }), + ); + + Ok(handler_send) + } + + /// Handle all messages incoming from the network service. + fn handle_message(&mut self, message: RouterMessage) { + match message { + // we have initiated a connection to a peer or the peer manager has requested a + // re-status + RouterMessage::PeerDialed(peer_id) | RouterMessage::StatusPeer(peer_id) => { + self.processor.send_status(peer_id); + } + // A peer has disconnected + RouterMessage::PeerDisconnected(peer_id) => { + self.processor.on_disconnect(peer_id); + } + // An RPC message request/response has been received + RouterMessage::RPC(peer_id, rpc_event) => { + self.handle_rpc_message(peer_id, rpc_event); + } + // An RPC message request/response has been received + RouterMessage::PubsubMessage(id, peer_id, gossip) => { + self.handle_gossip(id, peer_id, gossip); + } + } + } + + /* RPC - Related functionality */ + + /// Handle RPC messages + fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { + match rpc_message { + RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req), + RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp), + RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error), + } + } + + /// A new RPC request has been received from the network. + fn handle_rpc_request( + &mut self, + peer_id: PeerId, + request_id: RequestId, + request: RPCRequest, + ) { + match request { + RPCRequest::Status(status_message) => { + self.processor + .on_status_request(peer_id, request_id, status_message) + } + RPCRequest::Goodbye(goodbye_reason) => { + debug!( + self.log, "PeerGoodbye"; + "peer" => format!("{:?}", peer_id), + "reason" => format!("{:?}", goodbye_reason), + ); + self.processor.on_disconnect(peer_id); + } + RPCRequest::BlocksByRange(request) => self + .processor + .on_blocks_by_range_request(peer_id, request_id, request), + RPCRequest::BlocksByRoot(request) => self + .processor + .on_blocks_by_root_request(peer_id, request_id, request), + RPCRequest::Ping(_) => unreachable!("Ping MUST be handled in the behaviour"), + RPCRequest::MetaData(_) => unreachable!("MetaData MUST be handled in the behaviour"), + } + } + + /// An RPC response has been received from the network. + // we match on id and ignore responses past the timeout. + fn handle_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + error_response: RPCErrorResponse, + ) { + // an error could have occurred. + match error_response { + RPCErrorResponse::InvalidRequest(error) => { + warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()); + self.handle_rpc_error(peer_id, request_id, RPCError::RPCErrorResponse); + } + RPCErrorResponse::ServerError(error) => { + warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()); + self.handle_rpc_error(peer_id, request_id, RPCError::RPCErrorResponse); + } + RPCErrorResponse::Unknown(error) => { + warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()); + self.handle_rpc_error(peer_id, request_id, RPCError::RPCErrorResponse); + } + RPCErrorResponse::Success(response) => match response { + RPCResponse::Status(status_message) => { + self.processor.on_status_response(peer_id, status_message); + } + RPCResponse::BlocksByRange(beacon_block) => { + self.processor.on_blocks_by_range_response( + peer_id, + request_id, + Some(beacon_block), + ); + } + RPCResponse::BlocksByRoot(beacon_block) => { + self.processor.on_blocks_by_root_response( + peer_id, + request_id, + Some(beacon_block), + ); + } + RPCResponse::Pong(_) => { + unreachable!("Ping must be handled in the behaviour"); + } + RPCResponse::MetaData(_) => { + unreachable!("Meta data must be handled in the behaviour"); + } + }, + RPCErrorResponse::StreamTermination(response_type) => { + // have received a stream termination, notify the processing functions + match response_type { + ResponseTermination::BlocksByRange => { + self.processor + .on_blocks_by_range_response(peer_id, request_id, None); + } + ResponseTermination::BlocksByRoot => { + self.processor + .on_blocks_by_root_response(peer_id, request_id, None); + } + } + } + } + } + + /// Handle various RPC errors + fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "request_id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + self.processor.on_rpc_error(peer_id, request_id); + } + + /// Handle RPC messages + fn handle_gossip( + &mut self, + id: MessageId, + peer_id: PeerId, + gossip_message: PubsubMessage, + ) { + match gossip_message { + // Attestations should never reach the router. + PubsubMessage::AggregateAndProofAttestation(aggregate_and_proof) => { + if self + .processor + .should_forward_aggregate_attestation(&aggregate_and_proof) + { + self.propagate_message(id, peer_id.clone()); + } + self.processor.process_attestation_gossip( + peer_id, + aggregate_and_proof.message.aggregate, + AttestationType::Aggregated, + ); + } + PubsubMessage::Attestation(subnet_attestation) => { + if self + .processor + .should_forward_attestation(&subnet_attestation.1) + { + self.propagate_message(id, peer_id.clone()); + } + self.processor.process_attestation_gossip( + peer_id, + subnet_attestation.1, + AttestationType::Unaggregated { should_store: true }, + ); + } + PubsubMessage::BeaconBlock(block) => { + match self.processor.should_forward_block(&peer_id, block) { + Ok(verified_block) => { + self.propagate_message(id, peer_id.clone()); + self.processor.on_block_gossip(peer_id, verified_block); + } + Err(BlockError::ParentUnknown { .. }) => {} // performing a parent lookup + Err(e) => { + // performing a parent lookup + warn!(self.log, "Could not verify block for gossip"; + "error" => format!("{:?}", e)); + } + } + } + PubsubMessage::VoluntaryExit(_exit) => { + // TODO: Apply more sophisticated validation + self.propagate_message(id, peer_id.clone()); + // TODO: Handle exits + debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); + } + PubsubMessage::ProposerSlashing(_proposer_slashing) => { + // TODO: Apply more sophisticated validation + self.propagate_message(id, peer_id.clone()); + // TODO: Handle proposer slashings + debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); + } + PubsubMessage::AttesterSlashing(_attester_slashing) => { + // TODO: Apply more sophisticated validation + self.propagate_message(id, peer_id.clone()); + // TODO: Handle attester slashings + debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); + } + } + } + + /// Informs the network service that the message should be forwarded to other peers. + fn propagate_message(&mut self, message_id: MessageId, propagation_source: PeerId) { + self.network_send + .try_send(NetworkMessage::Propagate { + propagation_source, + message_id, + }) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send propagation request to the network service" + ) + }); + } +} diff --git a/beacon_node/network/src/message_processor.rs b/beacon_node/network/src/router/processor.rs similarity index 82% rename from beacon_node/network/src/message_processor.rs rename to beacon_node/network/src/router/processor.rs index 99b1a9fce..d3c3d98cc 100644 --- a/beacon_node/network/src/message_processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -1,17 +1,21 @@ use crate::service::NetworkMessage; -use crate::sync::SyncMessage; +use crate::sync::{PeerSyncInfo, SyncMessage}; use beacon_chain::{ - AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, + AttestationProcessingOutcome, AttestationType, BeaconChain, BeaconChainTypes, BlockError, + BlockProcessingOutcome, GossipVerifiedBlock, }; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; -use eth2_libp2p::PeerId; +use eth2_libp2p::{NetworkGlobals, PeerId}; use slog::{debug, error, o, trace, warn}; use ssz::Encode; use std::sync::Arc; use store::Store; use tokio::sync::{mpsc, oneshot}; -use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{ + Attestation, ChainSpec, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, + Slot, +}; //TODO: Rate limit requests @@ -19,40 +23,9 @@ use types::{Attestation, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot}; /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; -const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; -const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; - -/// Keeps track of syncing information for known connected peers. -#[derive(Clone, Copy, Debug)] -pub struct PeerSyncInfo { - fork_version: [u8; 4], - pub finalized_root: Hash256, - pub finalized_epoch: Epoch, - pub head_root: Hash256, - pub head_slot: Slot, -} - -impl From for PeerSyncInfo { - fn from(status: StatusMessage) -> PeerSyncInfo { - PeerSyncInfo { - fork_version: status.fork_version, - finalized_root: status.finalized_root, - finalized_epoch: status.finalized_epoch, - head_root: status.head_root, - head_slot: status.head_slot, - } - } -} - -impl PeerSyncInfo { - pub fn from_chain(chain: &Arc>) -> Option { - Some(Self::from(status_message(chain)?)) - } -} - /// Processes validated messages from the network. It relays necessary data to the syncing thread /// and processes blocks from the pubsub network. -pub struct MessageProcessor { +pub struct Processor { /// A reference to the underlying beacon chain. chain: Arc>, /// A channel to the syncing thread. @@ -60,17 +33,18 @@ pub struct MessageProcessor { /// A oneshot channel for destroying the sync thread. _sync_exit: oneshot::Sender<()>, /// A network context to return and handle RPC requests. - network: HandlerNetworkContext, + network: HandlerNetworkContext, /// The `RPCHandler` logger. log: slog::Logger, } -impl MessageProcessor { - /// Instantiate a `MessageProcessor` instance +impl Processor { + /// Instantiate a `Processor` instance pub fn new( executor: &tokio::runtime::TaskExecutor, beacon_chain: Arc>, - network_send: mpsc::UnboundedSender, + network_globals: Arc>, + network_send: mpsc::UnboundedSender>, log: &slog::Logger, ) -> Self { let sync_logger = log.new(o!("service"=> "sync")); @@ -78,12 +52,13 @@ impl MessageProcessor { // spawn the sync thread let (sync_send, _sync_exit) = crate::sync::manager::spawn( executor, - Arc::downgrade(&beacon_chain), + beacon_chain.clone(), + network_globals, network_send.clone(), sync_logger, ); - MessageProcessor { + Processor { chain: beacon_chain, sync_send, _sync_exit, @@ -114,16 +89,17 @@ impl MessageProcessor { self.send_to_sync(SyncMessage::RPCError(peer_id, request_id)); } - /// Handle the connection of a new peer. - /// /// Sends a `Status` message to the peer. - pub fn on_connect(&mut self, peer_id: PeerId) { + /// + /// Called when we first connect to a peer, or when the PeerManager determines we need to + /// re-status. + pub fn send_status(&mut self, peer_id: PeerId) { if let Some(status_message) = status_message(&self.chain) { debug!( self.log, "Sending Status Request"; "peer" => format!("{:?}", peer_id), - "fork_version" => format!("{:?}", status_message.fork_version), + "fork_digest" => format!("{:?}", status_message.fork_digest), "finalized_root" => format!("{:?}", status_message.finalized_root), "finalized_epoch" => format!("{:?}", status_message.finalized_epoch), "head_root" => format!("{}", status_message.head_root), @@ -147,7 +123,7 @@ impl MessageProcessor { self.log, "Received Status Request"; "peer" => format!("{:?}", peer_id), - "fork_version" => format!("{:?}", status.fork_version), + "fork_digest" => format!("{:?}", status.fork_digest), "finalized_root" => format!("{:?}", status.finalized_root), "finalized_epoch" => format!("{:?}", status.finalized_epoch), "head_root" => format!("{}", status.head_root), @@ -169,7 +145,16 @@ impl MessageProcessor { /// Process a `Status` response from a peer. pub fn on_status_response(&mut self, peer_id: PeerId, status: StatusMessage) { - trace!(self.log, "StatusResponse"; "peer" => format!("{:?}", peer_id)); + trace!( + self.log, + "Received Status Response"; + "peer" => format!("{:?}", peer_id), + "fork_digest" => format!("{:?}", status.fork_digest), + "finalized_root" => format!("{:?}", status.finalized_root), + "finalized_epoch" => format!("{:?}", status.finalized_epoch), + "head_root" => format!("{}", status.head_root), + "head_slot" => format!("{}", status.head_slot), + ); // Process the status message, without sending back another status. self.process_status(peer_id, status); @@ -193,12 +178,14 @@ impl MessageProcessor { let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - if local.fork_version != remote.fork_version { + if local.fork_digest != remote.fork_digest { // The node is on a different network/fork, disconnect them. debug!( self.log, "Handshake Failure"; "peer" => format!("{:?}", peer_id), - "reason" => "network_id" + "reason" => "incompatible forks", + "our_fork" => hex::encode(local.fork_digest), + "their_fork" => hex::encode(remote.fork_digest) ); self.network @@ -265,7 +252,7 @@ impl MessageProcessor { .exists::>(&remote.head_root) .unwrap_or_else(|_| false) { - trace!( + debug!( self.log, "Peer with known chain found"; "peer" => format!("{:?}", peer_id), "remote_head_slot" => remote.head_slot, @@ -303,7 +290,7 @@ impl MessageProcessor { self.network.send_rpc_response( peer_id.clone(), request_id, - RPCResponse::BlocksByRoot(block.as_ssz_bytes()), + RPCResponse::BlocksByRoot(Box::new(block)), ); send_block_count += 1; } else { @@ -389,7 +376,7 @@ impl MessageProcessor { self.network.send_rpc_response( peer_id.clone(), request_id, - RPCResponse::BlocksByRange(block.as_ssz_bytes()), + RPCResponse::BlocksByRange(Box::new(block)), ); } } else { @@ -436,9 +423,8 @@ impl MessageProcessor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>, + beacon_block: Option>>, ) { - let beacon_block = beacon_block.map(Box::new); trace!( self.log, "Received BlocksByRange Response"; @@ -457,9 +443,8 @@ impl MessageProcessor { &mut self, peer_id: PeerId, request_id: RequestId, - beacon_block: Option>, + beacon_block: Option>>, ) { - let beacon_block = beacon_block.map(Box::new); trace!( self.log, "Received BlocksByRoot Response"; @@ -473,6 +458,24 @@ impl MessageProcessor { }); } + /// Template function to be called on a block to determine if the block should be propagated + /// across the network. + pub fn should_forward_block( + &mut self, + peer_id: &PeerId, + block: Box>, + ) -> Result, BlockError> { + let result = self.chain.verify_block_for_gossip(*block.clone()); + + if let Err(BlockError::ParentUnknown(block_hash)) = result { + // if we don't know the parent, start a parent lookup + // TODO: Modify the return to avoid the block clone. + debug!(self.log, "Unknown block received. Starting a parent lookup"; "block_slot" => block.message.slot, "block_hash" => format!("{}", block_hash)); + self.send_to_sync(SyncMessage::UnknownBlock(peer_id.clone(), block)); + } + result + } + /// Process a gossip message declaring a new block. /// /// Attempts to apply to block to the beacon chain. May queue the block for later processing. @@ -481,9 +484,10 @@ impl MessageProcessor { pub fn on_block_gossip( &mut self, peer_id: PeerId, - block: SignedBeaconBlock, + verified_block: GossipVerifiedBlock, ) -> bool { - match self.chain.process_block(block.clone()) { + let block = Box::new(verified_block.block.clone()); + match BlockProcessingOutcome::shim(self.chain.process_block(verified_block)) { Ok(outcome) => match outcome { BlockProcessingOutcome::Processed { .. } => { trace!(self.log, "Gossipsub block processed"; @@ -508,24 +512,14 @@ impl MessageProcessor { "location" => "block gossip" ), } - - SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::ParentUnknown { .. } => { // Inform the sync manager to find parents for this block - trace!(self.log, "Block with unknown parent received"; + // This should not occur. It should be checked by `should_forward_block` + error!(self.log, "Block with unknown parent attempted to be processed"; "peer_id" => format!("{:?}",peer_id)); - self.send_to_sync(SyncMessage::UnknownBlock(peer_id, Box::new(block))); - SHOULD_FORWARD_GOSSIP_BLOCK + self.send_to_sync(SyncMessage::UnknownBlock(peer_id, block)); } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => { - //TODO: Decide the logic here - SHOULD_FORWARD_GOSSIP_BLOCK - } - BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, other => { warn!( self.log, @@ -539,7 +533,6 @@ impl MessageProcessor { "Invalid gossip beacon block ssz"; "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), ); - SHOULD_NOT_FORWARD_GOSSIP_BLOCK //TODO: Decide if we want to forward these } }, Err(_) => { @@ -549,16 +542,38 @@ impl MessageProcessor { "Erroneous gossip beacon block ssz"; "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), ); - SHOULD_NOT_FORWARD_GOSSIP_BLOCK } } + // TODO: Update with correct block gossip checking + true } - /// Process a gossip message declaring a new attestation. - /// - /// Not currently implemented. - pub fn on_attestation_gossip(&mut self, peer_id: PeerId, msg: Attestation) { - match self.chain.process_attestation(msg.clone()) { + /// Verifies the Aggregate attestation before propagating. + pub fn should_forward_aggregate_attestation( + &self, + _aggregate_and_proof: &Box>, + ) -> bool { + // TODO: Implement + true + } + + /// Verifies the attestation before propagating. + pub fn should_forward_attestation(&self, _aggregate: &Attestation) -> bool { + // TODO: Implement + true + } + + /// Process a new attestation received from gossipsub. + pub fn process_attestation_gossip( + &mut self, + peer_id: PeerId, + msg: Attestation, + attestation_type: AttestationType, + ) { + match self + .chain + .process_attestation(msg.clone(), attestation_type) + { Ok(outcome) => match outcome { AttestationProcessingOutcome::Processed => { debug!( @@ -572,7 +587,7 @@ impl MessageProcessor { } AttestationProcessingOutcome::UnknownHeadBlock { beacon_block_root } => { // TODO: Maintain this attestation and re-process once sync completes - trace!( + debug!( self.log, "Attestation for unknown block"; "peer_id" => format!("{:?}", peer_id), @@ -603,7 +618,7 @@ impl MessageProcessor { "ssz" => format!("0x{}", hex::encode(msg.as_ssz_bytes())), ); } - } + }; } } @@ -612,9 +627,13 @@ pub(crate) fn status_message( beacon_chain: &BeaconChain, ) -> Option { let head_info = beacon_chain.head_info().ok()?; + let genesis_validators_root = beacon_chain.genesis_validators_root; + + let fork_digest = + ChainSpec::compute_fork_digest(head_info.fork.current_version, genesis_validators_root); Some(StatusMessage { - fork_version: head_info.fork.current_version, + fork_digest, finalized_root: head_info.finalized_checkpoint.root, finalized_epoch: head_info.finalized_checkpoint.epoch, head_root: head_info.block_root, @@ -622,18 +641,19 @@ pub(crate) fn status_message( }) } -/// Wraps a Network Channel to employ various RPC related network functionality for the message -/// handler. The handler doesn't manage it's own request Id's and can therefore only send +/// Wraps a Network Channel to employ various RPC related network functionality for the +/// processor. +/// The Processor doesn't manage it's own request Id's and can therefore only send /// responses or requests with 0 request Ids. -pub struct HandlerNetworkContext { +pub struct HandlerNetworkContext { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender, + network_send: mpsc::UnboundedSender>, /// Logger for the `NetworkContext`. log: slog::Logger, } -impl HandlerNetworkContext { - pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { +impl HandlerNetworkContext { + pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { Self { network_send, log } } @@ -655,7 +675,7 @@ impl HandlerNetworkContext { }); } - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { + pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { // the message handler cannot send requests with ids. Id's are managed by the sync // manager. let request_id = 0; @@ -667,7 +687,7 @@ impl HandlerNetworkContext { &mut self, peer_id: PeerId, request_id: RequestId, - rpc_response: RPCResponse, + rpc_response: RPCResponse, ) { self.send_rpc_event( peer_id, @@ -680,12 +700,12 @@ impl HandlerNetworkContext { &mut self, peer_id: PeerId, request_id: RequestId, - rpc_error_response: RPCErrorResponse, + rpc_error_response: RPCErrorResponse, ) { self.send_rpc_event(peer_id, RPCEvent::Response(request_id, rpc_error_response)); } - fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { self.network_send .try_send(NetworkMessage::RPC(peer_id, rpc_event)) .unwrap_or_else(|_| { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0439e39c8..5eff3654e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,23 +1,24 @@ use crate::error; -use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::persisted_dht::{load_dht, persist_dht}; -use crate::NetworkConfig; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use core::marker::PhantomData; -use eth2_libp2p::Service as LibP2PService; -use eth2_libp2p::{ - rpc::RPCRequest, Enr, Libp2pEvent, MessageId, Multiaddr, NetworkGlobals, PeerId, Swarm, Topic, +use crate::router::{Router, RouterMessage}; +use crate::{ + attestation_service::{AttServiceMessage, AttestationService}, + NetworkConfig, }; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_libp2p::Service as LibP2PService; +use eth2_libp2p::{rpc::RPCRequest, BehaviourEvent, Enr, MessageId, NetworkGlobals, PeerId, Swarm}; use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; +use rest_types::ValidatorSubscription; use slog::{debug, error, info, trace}; -use std::collections::HashSet; -use std::sync::{atomic::Ordering, Arc}; +use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; use tokio::timer::Delay; +use types::EthSpec; mod tests; @@ -25,110 +26,107 @@ mod tests; const BAN_PEER_TIMEOUT: u64 = 30; /// Service that handles communication between internal services and the `eth2_libp2p` network service. -pub struct Service { - libp2p_port: u16, - network_globals: Arc, - _libp2p_exit: oneshot::Sender<()>, - _network_send: mpsc::UnboundedSender, - _phantom: PhantomData, +pub struct NetworkService { + /// A reference to the underlying beacon chain. + beacon_chain: Arc>, + /// The underlying libp2p service that drives all the network interactions. + libp2p: LibP2PService, + /// An attestation and subnet manager service. + attestation_service: AttestationService, + /// The receiver channel for lighthouse to communicate with the network service. + network_recv: mpsc::UnboundedReceiver>, + /// The sending channel for the network service to send messages to be routed throughout + /// lighthouse. + router_send: mpsc::UnboundedSender>, + /// A reference to lighthouse's database to persist the DHT. + store: Arc, + /// A collection of global variables, accessible outside of the network service. + network_globals: Arc>, + /// An initial delay to update variables after the libp2p service has started. + initial_delay: Delay, + /// A delay that expires when a new fork takes place. + next_fork_update: Option, + /// The logger for the network service. + log: slog::Logger, + /// A probability of propagation. + propagation_percentage: Option, } -impl Service { - pub fn new( +impl NetworkService { + pub fn start( beacon_chain: Arc>, config: &NetworkConfig, executor: &TaskExecutor, network_log: slog::Logger, - ) -> error::Result<(Arc, mpsc::UnboundedSender)> { + ) -> error::Result<( + Arc>, + mpsc::UnboundedSender>, + oneshot::Sender<()>, + )> { // build the network channel - let (network_send, network_recv) = mpsc::unbounded_channel::(); - // launch message handler thread + let (network_send, network_recv) = mpsc::unbounded_channel::>(); + // get a reference to the beacon chain store let store = beacon_chain.store.clone(); - let message_handler_send = MessageHandler::spawn( - beacon_chain, - network_send.clone(), - executor, - network_log.clone(), - )?; let propagation_percentage = config.propagation_percentage; + + // build the current enr_fork_id for adding to our local ENR + let enr_fork_id = beacon_chain.enr_fork_id(); + + // keep track of when our fork_id needs to be updated + let next_fork_update = next_fork_delay(&beacon_chain); + // launch libp2p service - let (network_globals, mut libp2p_service) = - LibP2PService::new(config, network_log.clone())?; + let (network_globals, mut libp2p) = + LibP2PService::new(config, enr_fork_id, network_log.clone())?; for enr in load_dht::(store.clone()) { - libp2p_service.swarm.add_enr(enr); + libp2p.swarm.add_enr(enr); } // A delay used to initialise code after the network has started // This is currently used to obtain the listening addresses from the libp2p service. let initial_delay = Delay::new(Instant::now() + Duration::from_secs(1)); - let libp2p_exit = spawn_service::( - libp2p_service, - network_recv, - message_handler_send, - executor, - store, + // launch derived network services + + // router task + let router_send = Router::spawn( + beacon_chain.clone(), network_globals.clone(), - initial_delay, + network_send.clone(), + executor, network_log.clone(), - propagation_percentage, )?; - let network_service = Service { - libp2p_port: config.libp2p_port, - network_globals, - _libp2p_exit: libp2p_exit, - _network_send: network_send.clone(), - _phantom: PhantomData, + // attestation service + let attestation_service = + AttestationService::new(beacon_chain.clone(), network_globals.clone(), &network_log); + + // create the network service and spawn the task + let network_service = NetworkService { + beacon_chain, + libp2p, + attestation_service, + network_recv, + router_send, + store, + network_globals: network_globals.clone(), + initial_delay, + next_fork_update, + log: network_log, + propagation_percentage, }; - Ok((Arc::new(network_service), network_send)) - } + let network_exit = spawn_service(network_service, &executor)?; - /// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect - /// to. - pub fn local_enr(&self) -> Option { - self.network_globals.local_enr.read().clone() - } - - /// Returns the local libp2p PeerID. - pub fn local_peer_id(&self) -> PeerId { - self.network_globals.peer_id.read().clone() - } - - /// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on. - pub fn listen_multiaddrs(&self) -> Vec { - self.network_globals.listen_multiaddrs.read().clone() - } - - /// Returns the libp2p port that this node has been configured to listen using. - pub fn listen_port(&self) -> u16 { - self.libp2p_port - } - - /// Returns the number of libp2p connected peers. - pub fn connected_peers(&self) -> usize { - self.network_globals.connected_peers.load(Ordering::Relaxed) - } - - /// Returns the set of `PeerId` that are connected via libp2p. - pub fn connected_peer_set(&self) -> HashSet { - self.network_globals.connected_peer_set.read().clone() + Ok((network_globals, network_send, network_exit)) } } fn spawn_service( - mut libp2p_service: LibP2PService, - mut network_recv: mpsc::UnboundedReceiver, - mut message_handler_send: mpsc::UnboundedSender, + mut service: NetworkService, executor: &TaskExecutor, - store: Arc, - network_globals: Arc, - mut initial_delay: Delay, - log: slog::Logger, - propagation_percentage: Option, ) -> error::Result> { let (network_exit, mut exit_rx) = tokio::sync::oneshot::channel(); @@ -136,25 +134,27 @@ fn spawn_service( executor.spawn( futures::future::poll_fn(move || -> Result<_, ()> { + let log = &service.log; - if !initial_delay.is_elapsed() { - if let Ok(Async::Ready(_)) = initial_delay.poll() { - let multi_addrs = Swarm::listeners(&libp2p_service.swarm).cloned().collect(); - *network_globals.listen_multiaddrs.write() = multi_addrs; + // handles any logic which requires an initial delay + if !service.initial_delay.is_elapsed() { + if let Ok(Async::Ready(_)) = service.initial_delay.poll() { + let multi_addrs = Swarm::listeners(&service.libp2p.swarm).cloned().collect(); + *service.network_globals.listen_multiaddrs.write() = multi_addrs; } } // perform termination tasks when the network is being shutdown if let Ok(Async::Ready(_)) | Err(_) = exit_rx.poll() { // network thread is terminating - let enrs: Vec = libp2p_service.swarm.enr_entries().cloned().collect(); + let enrs: Vec = service.libp2p.swarm.enr_entries().cloned().collect(); debug!( log, "Persisting DHT to store"; "Number of peers" => format!("{}", enrs.len()), ); - match persist_dht::(store.clone(), enrs) { + match persist_dht::(service.store.clone(), enrs) { Err(e) => error!( log, "Failed to persist DHT on drop"; @@ -173,11 +173,11 @@ fn spawn_service( // processes the network channel before processing the libp2p swarm loop { // poll the network channel - match network_recv.poll() { + match service.network_recv.poll() { Ok(Async::Ready(Some(message))) => match message { NetworkMessage::RPC(peer_id, rpc_event) => { trace!(log, "Sending RPC"; "rpc" => format!("{}", rpc_event)); - libp2p_service.swarm.send_rpc(peer_id, rpc_event); + service.libp2p.swarm.send_rpc(peer_id, rpc_event); } NetworkMessage::Propagate { propagation_source, @@ -186,7 +186,7 @@ fn spawn_service( // TODO: Remove this for mainnet // randomly prevents propagation let mut should_send = true; - if let Some(percentage) = propagation_percentage { + if let Some(percentage) = service.propagation_percentage { // not exact percentage but close enough let rand = rand::random::() % 100; if rand > percentage { @@ -201,16 +201,16 @@ fn spawn_service( "propagation_peer" => format!("{:?}", propagation_source), "message_id" => message_id.to_string(), ); - libp2p_service + service.libp2p .swarm .propagate_message(&propagation_source, message_id); } } - NetworkMessage::Publish { topics, message } => { + NetworkMessage::Publish { messages } => { // TODO: Remove this for mainnet // randomly prevents propagation let mut should_send = true; - if let Some(percentage) = propagation_percentage { + if let Some(percentage) = service.propagation_percentage { // not exact percentage but close enough let rand = rand::random::() % 100; if rand > percentage { @@ -219,18 +219,29 @@ fn spawn_service( } } if !should_send { - info!(log, "Random filter did not publish message"); + info!(log, "Random filter did not publish messages"); } else { - debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.swarm.publish(&topics, message); + let mut topic_kinds = Vec::new(); + for message in &messages { + if !topic_kinds.contains(&message.kind()) { + topic_kinds.push(message.kind()); + } + } + debug!(log, "Sending pubsub messages"; "count" => messages.len(), "topics" => format!("{:?}", topic_kinds)); + service.libp2p.swarm.publish(messages); } } NetworkMessage::Disconnect { peer_id } => { - libp2p_service.disconnect_and_ban_peer( + service.libp2p.disconnect_and_ban_peer( peer_id, std::time::Duration::from_secs(BAN_PEER_TIMEOUT), ); } + NetworkMessage::Subscribe { subscriptions } => + { + // the result is dropped as it used solely for ergonomics + let _ = service.attestation_service.validator_subscriptions(subscriptions); + } }, Ok(Async::NotReady) => break, Ok(Async::Ready(None)) => { @@ -244,45 +255,90 @@ fn spawn_service( } } + // process any attestation service events + // NOTE: This must come after the network message processing as that may trigger events in + // the attestation service. + while let Ok(Async::Ready(Some(attestation_service_message))) = service.attestation_service.poll() { + match attestation_service_message { + // TODO: Implement + AttServiceMessage::Subscribe(subnet_id) => { + service.libp2p.swarm.subscribe_to_subnet(subnet_id); + }, + AttServiceMessage::Unsubscribe(subnet_id) => { + service.libp2p.swarm.subscribe_to_subnet(subnet_id); + }, + AttServiceMessage::EnrAdd(subnet_id) => { + service.libp2p.swarm.update_enr_subnet(subnet_id, true); + }, + AttServiceMessage::EnrRemove(subnet_id) => { + service.libp2p.swarm.update_enr_subnet(subnet_id, false); + }, + AttServiceMessage::DiscoverPeers(subnet_id) => { + service.libp2p.swarm.peers_request(subnet_id); + }, + } + } + let mut peers_to_ban = Vec::new(); // poll the swarm loop { - match libp2p_service.poll() { + match service.libp2p.poll() { Ok(Async::Ready(Some(event))) => match event { - Libp2pEvent::RPC(peer_id, rpc_event) => { - // trace!(log, "Received RPC"; "rpc" => format!("{}", rpc_event)); - + BehaviourEvent::RPC(peer_id, rpc_event) => { // if we received a Goodbye message, drop and ban the peer if let RPCEvent::Request(_, RPCRequest::Goodbye(_)) = rpc_event { peers_to_ban.push(peer_id.clone()); }; - message_handler_send - .try_send(HandlerMessage::RPC(peer_id, rpc_event)) - .map_err(|_| { debug!(log, "Failed to send RPC to handler");} )?; + service.router_send + .try_send(RouterMessage::RPC(peer_id, rpc_event)) + .map_err(|_| { debug!(log, "Failed to send RPC to router");} )?; } - Libp2pEvent::PeerDialed(peer_id) => { - debug!(log, "Peer Dialed"; "peer_id" => format!("{:?}", peer_id)); - message_handler_send - .try_send(HandlerMessage::PeerDialed(peer_id)) - .map_err(|_| { debug!(log, "Failed to send peer dialed to handler");})?; + BehaviourEvent::PeerDialed(peer_id) => { + debug!(log, "Peer Dialed"; "peer_id" => format!("{}", peer_id)); + service.router_send + .try_send(RouterMessage::PeerDialed(peer_id)) + .map_err(|_| { debug!(log, "Failed to send peer dialed to router");})?; } - Libp2pEvent::PeerDisconnected(peer_id) => { - debug!(log, "Peer Disconnected"; "peer_id" => format!("{:?}", peer_id)); - message_handler_send - .try_send(HandlerMessage::PeerDisconnected(peer_id)) - .map_err(|_| { debug!(log, "Failed to send peer disconnect to handler");})?; + BehaviourEvent::PeerDisconnected(peer_id) => { + debug!(log, "Peer Disconnected"; "peer_id" => format!("{}", peer_id)); + service.router_send + .try_send(RouterMessage::PeerDisconnected(peer_id)) + .map_err(|_| { debug!(log, "Failed to send peer disconnect to router");})?; } - Libp2pEvent::PubsubMessage { + BehaviourEvent::StatusPeer(peer_id) => { + service.router_send + .try_send(RouterMessage::StatusPeer(peer_id)) + .map_err(|_| { debug!(log, "Failed to send re-status peer to router");})?; + } + BehaviourEvent::PubsubMessage { id, source, message, .. } => { - message_handler_send - .try_send(HandlerMessage::PubsubMessage(id, source, message)) - .map_err(|_| { debug!(log, "Failed to send pubsub message to handler");})?; + + match message { + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = &subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we process + // the attestation + if service.attestation_service.should_process_attestation(&id, &source, subnet, attestation) { + service.router_send + .try_send(RouterMessage::PubsubMessage(id, source, message)) + .map_err(|_| { debug!(log, "Failed to send pubsub message to router");})?; + } + } + _ => { + // all else is sent to the router + service.router_send + .try_send(RouterMessage::PubsubMessage(id, source, message)) + .map_err(|_| { debug!(log, "Failed to send pubsub message to router");})?; + } + } } - Libp2pEvent::PeerSubscribed(_, _) => {} + BehaviourEvent::PeerSubscribed(_, _) => {} }, Ok(Async::Ready(None)) => unreachable!("Stream never ends"), Ok(Async::NotReady) => break, @@ -292,12 +348,22 @@ fn spawn_service( // ban and disconnect any peers that sent Goodbye requests while let Some(peer_id) = peers_to_ban.pop() { - libp2p_service.disconnect_and_ban_peer( + service.libp2p.disconnect_and_ban_peer( peer_id.clone(), std::time::Duration::from_secs(BAN_PEER_TIMEOUT), ); } + // if we have just forked, update inform the libp2p layer + if let Some(mut update_fork_delay) = service.next_fork_update.take() { + if !update_fork_delay.is_elapsed() { + if let Ok(Async::Ready(_)) = update_fork_delay.poll() { + service.libp2p.swarm.update_fork_version(service.beacon_chain.enr_fork_id()); + service.next_fork_update = next_fork_delay(&service.beacon_chain); + } + } + } + Ok(Async::NotReady) }) @@ -306,16 +372,29 @@ fn spawn_service( Ok(network_exit) } +/// Returns a `Delay` that triggers shortly after the next change in the beacon chain fork version. +/// If there is no scheduled fork, `None` is returned. +fn next_fork_delay( + beacon_chain: &BeaconChain, +) -> Option { + beacon_chain.duration_to_next_fork().map(|until_fork| { + // Add a short time-out to start within the new fork period. + let delay = Duration::from_millis(200); + tokio::timer::Delay::new(Instant::now() + until_fork + delay) + }) +} + /// Types of messages that the network service can receive. #[derive(Debug)] -pub enum NetworkMessage { - /// Send an RPC message to the libp2p service. - RPC(PeerId, RPCEvent), - /// Publish a message to gossipsub. - Publish { - topics: Vec, - message: PubsubMessage, +pub enum NetworkMessage { + /// Subscribes a list of validators to specific slots for attestation duties. + Subscribe { + subscriptions: Vec, }, + /// Send an RPC message to the libp2p service. + RPC(PeerId, RPCEvent), + /// Publish a list of messages to the gossipsub protocol. + Publish { messages: Vec> }, /// Propagate a received gossipsub message. Propagate { propagation_source: PeerId, diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index c77cd064b..90a6170db 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -2,7 +2,7 @@ #[cfg(test)] mod tests { use crate::persisted_dht::load_dht; - use crate::{NetworkConfig, Service}; + use crate::{NetworkConfig, NetworkService}; use beacon_chain::test_utils::BeaconChainHarness; use eth2_libp2p::Enr; use futures::{Future, IntoFuture}; @@ -10,7 +10,6 @@ mod tests { use sloggers::{null::NullLoggerBuilder, Build}; use std::str::FromStr; use std::sync::Arc; - use store::MemoryStore; use tokio::runtime::Runtime; use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec}; @@ -44,14 +43,14 @@ mod tests { .block_on_all( // Create a new network service which implicitly gets dropped at the // end of the block. - Service::new(beacon_chain.clone(), &config, &executor, log.clone()) + NetworkService::start(beacon_chain.clone(), &config, &executor, log.clone()) .into_future() - .and_then(move |(_service, _)| Ok(())), + .and_then(move |(_globals, _service, _exit)| Ok(())), ) .unwrap(); // Load the persisted dht from the store - let persisted_enrs = load_dht::, MinimalEthSpec>(store); + let persisted_enrs = load_dht(store); assert!( persisted_enrs.contains(&enrs[0]), "should have persisted the first ENR to store" diff --git a/beacon_node/network/src/sync/block_processor.rs b/beacon_node/network/src/sync/block_processor.rs index cfb82eb8f..8c53869e4 100644 --- a/beacon_node/network/src/sync/block_processor.rs +++ b/beacon_node/network/src/sync/block_processor.rs @@ -1,7 +1,7 @@ -use crate::message_processor::FUTURE_SLOT_TOLERANCE; +use crate::router::processor::FUTURE_SLOT_TOLERANCE; use crate::sync::manager::SyncMessage; -use crate::sync::range_sync::BatchId; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use crate::sync::range_sync::{BatchId, ChainId}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, ChainSegmentResult}; use eth2_libp2p::PeerId; use slog::{debug, error, trace, warn}; use std::sync::{Arc, Weak}; @@ -12,7 +12,7 @@ use types::SignedBeaconBlock; #[derive(Clone, Debug, PartialEq)] pub enum ProcessId { /// Processing Id of a range syncing batch. - RangeBatchId(BatchId), + RangeBatchId(ChainId, BatchId), /// Processing Id of the parent lookup of a block ParentLookup(PeerId), } @@ -25,6 +25,8 @@ pub enum BatchProcessResult { Success, /// The batch processing failed. Failed, + /// The batch processing failed but managed to import at least one block. + Partial, } /// Spawns a thread handling the block processing of a request: range syncing or parent lookup. @@ -38,22 +40,28 @@ pub fn spawn_block_processor( std::thread::spawn(move || { match process_id { // this a request from the range sync - ProcessId::RangeBatchId(batch_id) => { + ProcessId::RangeBatchId(chain_id, batch_id) => { debug!(log, "Processing batch"; "id" => *batch_id, "blocks" => downloaded_blocks.len()); let result = match process_blocks(chain, downloaded_blocks.iter(), &log) { - Ok(_) => { + (_, Ok(_)) => { debug!(log, "Batch processed"; "id" => *batch_id ); BatchProcessResult::Success } - Err(e) => { + (imported_blocks, Err(e)) if imported_blocks > 0 => { + debug!(log, "Batch processing failed but imported some blocks"; + "id" => *batch_id, "error" => e, "imported_blocks"=> imported_blocks); + BatchProcessResult::Partial + } + (_, Err(e)) => { debug!(log, "Batch processing failed"; "id" => *batch_id, "error" => e); BatchProcessResult::Failed } }; let msg = SyncMessage::BatchProcessed { - batch_id: batch_id, - downloaded_blocks: downloaded_blocks, + chain_id, + batch_id, + downloaded_blocks, result, }; sync_send.try_send(msg).unwrap_or_else(|_| { @@ -65,11 +73,15 @@ pub fn spawn_block_processor( } // this a parent lookup request from the sync manager ProcessId::ParentLookup(peer_id) => { - debug!(log, "Processing parent lookup"; "last_peer_id" => format!("{}", peer_id), "blocks" => downloaded_blocks.len()); + debug!( + log, "Processing parent lookup"; + "last_peer_id" => format!("{}", peer_id), + "blocks" => downloaded_blocks.len() + ); // parent blocks are ordered from highest slot to lowest, so we need to process in // reverse match process_blocks(chain, downloaded_blocks.iter().rev(), &log) { - Err(e) => { + (_, Err(e)) => { warn!(log, "Parent lookup failed"; "last_peer_id" => format!("{}", peer_id), "error" => e); sync_send .try_send(SyncMessage::ParentLookupFailed(peer_id)) @@ -81,7 +93,7 @@ pub fn spawn_block_processor( ); }); } - Ok(_) => { + (_, Ok(_)) => { debug!(log, "Parent lookup processed successfully"); } } @@ -99,126 +111,40 @@ fn process_blocks< chain: Weak>, downloaded_blocks: I, log: &slog::Logger, -) -> Result<(), String> { - let mut successful_block_import = false; - for block in downloaded_blocks { - if let Some(chain) = chain.upgrade() { - let processing_result = chain.process_block(block.clone()); - - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - trace!( - log, "Imported block from network"; - "slot" => block.slot(), - "block_root" => format!("{}", block_root), - ); - successful_block_import = true; - } - BlockProcessingOutcome::ParentUnknown { parent, .. } => { - // blocks should be sequential and all parents should exist - // this is a failure if blocks do not have parents - warn!( - log, "Parent block is unknown"; - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot(), - ); - if successful_block_import { - run_fork_choice(chain, log); - } - return Err(format!( - "Block at slot {} has an unknown parent.", - block.slot() - )); - } - BlockProcessingOutcome::BlockIsAlreadyKnown => { - // this block is already known to us, move to the next - debug!( - log, "Imported a block that is already known"; - "block_slot" => block.slot(), - ); - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - warn!( - log, "Block is ahead of our slot clock"; - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - if successful_block_import { - run_fork_choice(chain, log); - } - return Err(format!( - "Block at slot {} is too far in the future", - block.slot() - )); - } else { - // The block is in the future, but not too far. - debug!( - log, "Block is slightly ahead of our slot clock, ignoring."; - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - } - } - BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { - debug!( - log, "Finalized or earlier block processed"; - "outcome" => format!("{:?}", outcome), - ); - // block reached our finalized slot or was earlier, move to the next block - } - BlockProcessingOutcome::GenesisBlock => { - debug!( - log, "Genesis block was processed"; - "outcome" => format!("{:?}", outcome), - ); - } - _ => { - warn!( - log, "Invalid block received"; - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - ); - if successful_block_import { - run_fork_choice(chain, log); - } - return Err(format!("Invalid block at slot {}", block.slot())); - } - } - } else { - warn!( - log, "BlockProcessingFailure"; - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - if successful_block_import { +) -> (usize, Result<(), String>) { + if let Some(chain) = chain.upgrade() { + let blocks = downloaded_blocks.cloned().collect::>(); + let (imported_blocks, r) = match chain.process_chain_segment(blocks) { + ChainSegmentResult::Successful { imported_blocks } => { + if imported_blocks == 0 { + debug!(log, "All blocks already known"); + } else { + debug!( + log, "Imported blocks from network"; + "count" => imported_blocks, + ); + // Batch completed successfully with at least one block, run fork choice. run_fork_choice(chain, log); } - return Err(format!( - "Unexpected block processing error: {:?}", - processing_result - )); + + (imported_blocks, Ok(())) } - } else { - return Ok(()); // terminate early due to dropped beacon chain - } + ChainSegmentResult::Failed { + imported_blocks, + error, + } => { + let r = handle_failed_chain_segment(error, log); + if imported_blocks > 0 { + run_fork_choice(chain, log); + } + (imported_blocks, r) + } + }; + + return (imported_blocks, r); } - // Batch completed successfully, run fork choice. - if let Some(chain) = chain.upgrade() { - run_fork_choice(chain, log); - } - - Ok(()) + (0, Ok(())) } /// Runs fork-choice on a given chain. This is used during block processing after one successful @@ -238,3 +164,74 @@ fn run_fork_choice(chain: Arc>, log: &slog:: ), } } + +/// Helper function to handle a `BlockError` from `process_chain_segment` +fn handle_failed_chain_segment(error: BlockError, log: &slog::Logger) -> Result<(), String> { + match error { + BlockError::ParentUnknown(parent) => { + // blocks should be sequential and all parents should exist + + Err(format!("Block has an unknown parent: {}", parent)) + } + BlockError::BlockIsAlreadyKnown => { + // This can happen for many reasons. Head sync's can download multiples and parent + // lookups can download blocks before range sync + Ok(()) + } + BlockError::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + warn!( + log, "Block is ahead of our slot clock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } else { + // The block is in the future, but not too far. + debug!( + log, "Block is slightly ahead of our slot clock, ignoring."; + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } + + Err(format!( + "Block with slot {} is higher than the current slot {}", + block_slot, present_slot + )) + } + BlockError::WouldRevertFinalizedSlot { .. } => { + debug!( log, "Finalized or earlier block processed";); + + Ok(()) + } + BlockError::GenesisBlock => { + debug!(log, "Genesis block was processed"); + Ok(()) + } + BlockError::BeaconChainError(e) => { + warn!( + log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", e) + ); + + Err(format!("Internal error whilst processing block: {:?}", e)) + } + other => { + warn!( + log, "Invalid block received"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", other), + ); + + Err(format!("Peer sent invalid block. Reason: {:?}", other)) + } + } +} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 9fac59497..c67ef22c4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -35,31 +35,29 @@ use super::block_processor::{spawn_block_processor, BatchProcessResult, ProcessId}; use super::network_context::SyncNetworkContext; -use super::range_sync::{BatchId, RangeSync}; -use crate::message_processor::PeerSyncInfo; +use super::peer_sync_info::{PeerSyncInfo, PeerSyncType}; +use super::range_sync::{BatchId, ChainId, RangeSync}; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::RequestId; +use eth2_libp2p::rpc::{methods::*, RequestId}; +use eth2_libp2p::types::NetworkGlobals; use eth2_libp2p::PeerId; use fnv::FnvHashMap; use futures::prelude::*; -use rand::seq::SliceRandom; use slog::{crit, debug, error, info, trace, warn, Logger}; use smallvec::SmallVec; use std::boxed::Box; -use std::collections::HashSet; use std::ops::Sub; -use std::sync::Weak; +use std::sync::Arc; use tokio::sync::{mpsc, oneshot}; -use types::{EthSpec, Hash256, SignedBeaconBlock}; +use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. -const SLOT_IMPORT_TOLERANCE: usize = 20; +pub const SLOT_IMPORT_TOLERANCE: usize = 20; /// How many attempts we try to find a parent of a block before we give up trying . -const PARENT_FAIL_TOLERANCE: usize = 3; +const PARENT_FAIL_TOLERANCE: usize = 5; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// is further back than the most recent head slot. @@ -100,6 +98,7 @@ pub enum SyncMessage { /// A batch has been processed by the block processor thread. BatchProcessed { + chain_id: ChainId, batch_id: BatchId, downloaded_blocks: Vec>, result: BatchProcessResult, @@ -126,38 +125,22 @@ struct ParentRequests { pending: Option, } -#[derive(PartialEq, Debug, Clone)] -/// The current state of the `ImportManager`. -enum ManagerState { - /// The manager is performing a long-range (batch) sync. In this mode, parent lookups are - /// disabled. - Syncing, - - /// The manager is up to date with all known peers and is connected to at least one - /// fully-syncing peer. In this state, parent lookups are enabled. - Regular, - - /// No useful peers are connected. Long-range sync's cannot proceed and we have no useful - /// peers to download parents for. More peers need to be connected before we can proceed. - Stalled, -} - /// The primary object for handling and driving all the current syncing logic. It maintains the /// current state of the syncing process, the number of useful peers, downloaded blocks and /// controls the logic behind both the long-range (batch) sync and the on-going potential parent /// look-up of blocks. pub struct SyncManager { - /// A weak reference to the underlying beacon chain. - chain: Weak>, + /// A reference to the underlying beacon chain. + chain: Arc>, - /// The current state of the import manager. - state: ManagerState, + /// A reference to the network globals and peer-db. + network_globals: Arc>, /// A receiving channel sent by the message processor thread. input_channel: mpsc::UnboundedReceiver>, /// A network context to contact the network service. - network: SyncNetworkContext, + network: SyncNetworkContext, /// The object handling long-range batch load-balanced syncing. range_sync: RangeSync, @@ -169,10 +152,7 @@ pub struct SyncManager { /// received or not. /// /// The flag allows us to determine if the peer returned data or sent us nothing. - single_block_lookups: FnvHashMap, - - /// The collection of known, connected, fully-sync'd peers. - full_peers: HashSet, + single_block_lookups: FnvHashMap, /// The logger for the import manager. log: Logger, @@ -181,13 +161,31 @@ pub struct SyncManager { sync_send: mpsc::UnboundedSender>, } +/// Object representing a single block lookup request. +struct SingleBlockRequest { + /// The hash of the requested block. + pub hash: Hash256, + /// Whether a block was received from this request, or the peer returned an empty response. + pub block_returned: bool, +} + +impl SingleBlockRequest { + pub fn new(hash: Hash256) -> Self { + Self { + hash, + block_returned: false, + } + } +} + /// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon /// chain. This allows the chain to be /// dropped during the syncing process which will gracefully end the `SyncManager`. pub fn spawn( executor: &tokio::runtime::TaskExecutor, - beacon_chain: Weak>, - network_send: mpsc::UnboundedSender, + beacon_chain: Arc>, + network_globals: Arc>, + network_send: mpsc::UnboundedSender>, log: slog::Logger, ) -> ( mpsc::UnboundedSender>, @@ -200,14 +198,18 @@ pub fn spawn( // create an instance of the SyncManager let sync_manager = SyncManager { - chain: beacon_chain.clone(), - state: ManagerState::Stalled, - input_channel: sync_recv, + range_sync: RangeSync::new( + beacon_chain.clone(), + network_globals.clone(), + sync_send.clone(), + log.clone(), + ), network: SyncNetworkContext::new(network_send, log.clone()), - range_sync: RangeSync::new(beacon_chain, sync_send.clone(), log.clone()), + chain: beacon_chain, + network_globals, + input_channel: sync_recv, parent_queue: SmallVec::new(), single_block_lookups: FnvHashMap::default(), - full_peers: HashSet::new(), log: log.clone(), sync_send: sync_send.clone(), }; @@ -239,17 +241,7 @@ impl SyncManager { /// ours that we consider it fully sync'd with respect to our current chain. fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { // ensure the beacon chain still exists - let chain = match self.chain.upgrade() { - Some(chain) => chain, - None => { - warn!(self.log, - "Beacon chain dropped. Peer not considered for sync"; - "peer_id" => format!("{:?}", peer_id)); - return; - } - }; - - let local = match PeerSyncInfo::from_chain(&chain) { + let local_peer_info = match PeerSyncInfo::from_chain(&self.chain) { Some(local) => local, None => { return error!( @@ -260,36 +252,34 @@ impl SyncManager { } }; - // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch/range sync, - // consider it a fully-sync'd peer. - if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { - trace!(self.log, "Ignoring full sync with peer"; - "peer" => format!("{:?}", peer_id), - "peer_head_slot" => remote.head_slot, - "local_head_slot" => local.head_slot, - ); - self.add_full_peer(peer_id); - // notify the range sync that a peer has been added - self.range_sync.fully_synced_peer_found(); - return; - } - - // Check if the peer is significantly behind us. If within `SLOT_IMPORT_TOLERANCE` - // treat them as a fully synced peer. If not, ignore them in the sync process - if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { - self.add_full_peer(peer_id.clone()); - } else { - debug!( - self.log, - "Out of sync peer connected"; + match local_peer_info.peer_sync_type(&remote) { + PeerSyncType::FullySynced => { + trace!(self.log, "Peer synced to our head found"; "peer" => format!("{:?}", peer_id), - ); - return; + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local_peer_info.head_slot, + ); + self.synced_peer(&peer_id, remote); + // notify the range sync that a peer has been added + self.range_sync.fully_synced_peer_found(); + } + PeerSyncType::Advanced => { + trace!(self.log, "Useful peer for sync found"; + "peer" => format!("{:?}", peer_id), + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local_peer_info.head_slot, + "remote_finalized_epoch" => local_peer_info.finalized_epoch, + "local_finalized_epoch" => remote.finalized_epoch, + ); + // Add the peer to our RangeSync + self.range_sync + .add_peer(&mut self.network, peer_id.clone(), remote); + self.advanced_peer(&peer_id, remote); + } + PeerSyncType::Behind => { + self.behind_peer(&peer_id, remote); + } } - - // Add the peer to our RangeSync - self.range_sync.add_peer(&mut self.network, peer_id, remote); - self.update_state(); } /// The response to a `BlocksByRoot` request. @@ -310,12 +300,10 @@ impl SyncManager { // check if this is a single block lookup - i.e we were searching for a specific hash let mut single_block_hash = None; - if let Some((block_hash, data_received)) = - self.single_block_lookups.get_mut(&request_id) - { + if let Some(block_request) = self.single_block_lookups.get_mut(&request_id) { // update the state of the lookup indicating a block was received from the peer - *data_received = true; - single_block_hash = Some(block_hash.clone()); + block_request.block_returned = true; + single_block_hash = Some(block_request.hash.clone()); } if let Some(block_hash) = single_block_hash { self.single_block_lookup_response(peer_id, block, block_hash); @@ -346,12 +334,10 @@ impl SyncManager { // this is a stream termination // stream termination for a single block lookup, remove the key - if let Some((block_hash, data_received)) = - self.single_block_lookups.remove(&request_id) - { + if let Some(single_block_request) = self.single_block_lookups.remove(&request_id) { // the peer didn't respond with a block that it referenced - if !data_received { - warn!(self.log, "Peer didn't respond with a block it referenced"; "referenced_block_hash" => format!("{}", block_hash), "peer_id" => format!("{}", peer_id)); + if !single_block_request.block_returned { + warn!(self.log, "Peer didn't respond with a block it referenced"; "referenced_block_hash" => format!("{}", single_block_request.hash), "peer_id" => format!("{}", peer_id)); self.network.downvote_peer(peer_id); } return; @@ -398,43 +384,41 @@ impl SyncManager { } // we have the correct block, try and process it - if let Some(chain) = self.chain.upgrade() { - match chain.process_block(block.clone()) { - Ok(outcome) => { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - info!(self.log, "Processed block"; "block" => format!("{}", block_root)); + match BlockProcessingOutcome::shim(self.chain.process_block(block.clone())) { + Ok(outcome) => { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + info!(self.log, "Processed block"; "block" => format!("{}", block_root)); - match chain.fork_choice() { - Ok(()) => trace!( - self.log, - "Fork choice success"; - "location" => "single block" - ), - Err(e) => error!( - self.log, - "Fork choice failed"; - "error" => format!("{:?}", e), - "location" => "single block" - ), - } - } - BlockProcessingOutcome::ParentUnknown { .. } => { - // We don't know of the blocks parent, begin a parent lookup search - self.add_unknown_block(peer_id, block); - } - BlockProcessingOutcome::BlockIsAlreadyKnown => { - trace!(self.log, "Single block lookup already known"); - } - _ => { - warn!(self.log, "Single block lookup failed"; "outcome" => format!("{:?}", outcome)); - self.network.downvote_peer(peer_id); + match self.chain.fork_choice() { + Ok(()) => trace!( + self.log, + "Fork choice success"; + "location" => "single block" + ), + Err(e) => error!( + self.log, + "Fork choice failed"; + "error" => format!("{:?}", e), + "location" => "single block" + ), } } + BlockProcessingOutcome::ParentUnknown { .. } => { + // We don't know of the blocks parent, begin a parent lookup search + self.add_unknown_block(peer_id, block); + } + BlockProcessingOutcome::BlockIsAlreadyKnown => { + trace!(self.log, "Single block lookup already known"); + } + _ => { + warn!(self.log, "Single block lookup failed"; "outcome" => format!("{:?}", outcome)); + self.network.downvote_peer(peer_id); + } } - Err(e) => { - warn!(self.log, "Unexpected block processing error"; "error" => format!("{:?}", e)); - } + } + Err(e) => { + warn!(self.log, "Unexpected block processing error"; "error" => format!("{:?}", e)); } } } @@ -442,9 +426,24 @@ impl SyncManager { /// A block has been sent to us that has an unknown parent. This begins a parent lookup search /// to find the parent or chain of parents that match our current chain. fn add_unknown_block(&mut self, peer_id: PeerId, block: SignedBeaconBlock) { - // If we are not in regular sync mode, ignore this block - if self.state != ManagerState::Regular { - return; + // If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore + if !self.network_globals.sync_state.read().is_synced() { + let head_slot = self + .chain + .head_info() + .map(|info| info.slot) + .unwrap_or_else(|_| Slot::from(0u64)); + let unknown_block_slot = block.message.slot; + + // if the block is far in the future, ignore it. If its within the slot tolerance of + // our current head, regardless of the syncing state, fetch it. + if (head_slot >= unknown_block_slot + && head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + || (head_slot < unknown_block_slot + && unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) + { + return; + } } // Make sure this block is not already being searched for @@ -473,8 +472,18 @@ impl SyncManager { /// A request to search for a block hash has been received. This function begins a BlocksByRoot /// request to find the requested block. fn search_for_block(&mut self, peer_id: PeerId, block_hash: Hash256) { - // If we are not in regular sync mode, ignore this block - if self.state != ManagerState::Regular { + // If we are not synced, ignore this block + if !self.network_globals.sync_state.read().is_synced() { + return; + } + + // Do not re-request a block that is already being requested + if self + .single_block_lookups + .values() + .find(|single_block_request| single_block_request.hash == block_hash) + .is_some() + { return; } @@ -484,7 +493,7 @@ impl SyncManager { if let Ok(request_id) = self.network.blocks_by_root_request(peer_id, request) { self.single_block_lookups - .insert(request_id, (block_hash, false)); + .insert(request_id, SingleBlockRequest::new(block_hash)); } } @@ -516,42 +525,57 @@ impl SyncManager { fn peer_disconnect(&mut self, peer_id: &PeerId) { self.range_sync.peer_disconnect(&mut self.network, peer_id); - self.full_peers.remove(peer_id); - self.update_state(); + self.update_sync_state(); } - fn add_full_peer(&mut self, peer_id: PeerId) { - debug!( - self.log, "Fully synced peer added"; - "peer" => format!("{:?}", peer_id), - ); - self.full_peers.insert(peer_id); + /// Updates the syncing state of a peer to be synced. + fn synced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { + if peer_info.sync_status.update_synced(sync_info.into()) { + debug!(self.log, "Peer transitioned to synced status"; "peer_id" => format!("{}", peer_id)); + } + } else { + crit!(self.log, "Status'd peer is unknown"; "peer_id" => format!("{}", peer_id)); + } + self.update_sync_state(); } + /// Updates the syncing state of a peer to be behind. + fn advanced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { + let advanced_slot = sync_info.head_slot; + if peer_info.sync_status.update_ahead(sync_info.into()) { + debug!(self.log, "Peer transitioned to from synced state to ahead"; "peer_id" => format!("{}", peer_id), "head_slot" => advanced_slot); + } + } else { + crit!(self.log, "Status'd peer is unknown"; "peer_id" => format!("{}", peer_id)); + } + self.update_sync_state(); + } + + /// Updates the syncing state of a peer to be behind. + fn behind_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) { + if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) { + let behind_slot = sync_info.head_slot; + if peer_info.sync_status.update_behind(sync_info.into()) { + debug!(self.log, "Peer transitioned to from synced state to behind"; "peer_id" => format!("{}", peer_id), "head_slot" => behind_slot); + } + } else { + crit!(self.log, "Status'd peer is unknown"; "peer_id" => format!("{}", peer_id)); + } + self.update_sync_state(); + } + + /// Updates the global sync state and logs any changes. + fn update_sync_state(&mut self) { + if let Some((old_state, new_state)) = self.network_globals.update_sync_state() { + info!(self.log, "Sync state updated"; "old_state" => format!("{}", old_state), "new_state" => format!("{}",new_state)); + } + } /* Processing State Functions */ // These functions are called in the main poll function to transition the state of the sync // manager - /// Updates the syncing state of the `SyncManager`. - fn update_state(&mut self) { - let previous_state = self.state.clone(); - self.state = { - if self.range_sync.is_syncing() { - ManagerState::Syncing - } else if !self.full_peers.is_empty() { - ManagerState::Regular - } else { - ManagerState::Stalled - } - }; - if self.state != previous_state { - info!(self.log, "Syncing state updated"; - "old_state" => format!("{:?}", previous_state), - "new_state" => format!("{:?}", self.state), - ); - } - } - /// A new block has been received for a parent lookup query, process it. fn process_parent_request(&mut self, mut parent_request: ParentRequests) { // verify the last added block is the parent of the last requested block @@ -598,55 +622,50 @@ impl SyncManager { // If the last block in the queue has an unknown parent, we continue the parent // lookup-search. - if let Some(chain) = self.chain.upgrade() { - let newest_block = parent_request - .downloaded_blocks - .pop() - .expect("There is always at least one block in the queue"); - match chain.process_block(newest_block.clone()) { - Ok(BlockProcessingOutcome::ParentUnknown { .. }) => { - // need to keep looking for parents - // add the block back to the queue and continue the search - parent_request.downloaded_blocks.push(newest_block); - self.request_parent(parent_request); - return; - } - Ok(BlockProcessingOutcome::Processed { .. }) - | Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => { - spawn_block_processor( - self.chain.clone(), - ProcessId::ParentLookup(parent_request.last_submitted_peer.clone()), - parent_request.downloaded_blocks, - self.sync_send.clone(), - self.log.clone(), - ); - } - Ok(outcome) => { - // all else we consider the chain a failure and downvote the peer that sent - // us the last block - warn!( - self.log, "Invalid parent chain. Downvoting peer"; - "outcome" => format!("{:?}", outcome), - "last_peer" => format!("{:?}", parent_request.last_submitted_peer), - ); - self.network - .downvote_peer(parent_request.last_submitted_peer.clone()); - return; - } - Err(e) => { - warn!( - self.log, "Parent chain processing error. Downvoting peer"; - "error" => format!("{:?}", e), - "last_peer" => format!("{:?}", parent_request.last_submitted_peer), - ); - self.network - .downvote_peer(parent_request.last_submitted_peer.clone()); - return; - } + let newest_block = parent_request + .downloaded_blocks + .pop() + .expect("There is always at least one block in the queue"); + match BlockProcessingOutcome::shim(self.chain.process_block(newest_block.clone())) { + Ok(BlockProcessingOutcome::ParentUnknown { .. }) => { + // need to keep looking for parents + // add the block back to the queue and continue the search + parent_request.downloaded_blocks.push(newest_block); + self.request_parent(parent_request); + return; + } + Ok(BlockProcessingOutcome::Processed { .. }) + | Ok(BlockProcessingOutcome::BlockIsAlreadyKnown { .. }) => { + spawn_block_processor( + Arc::downgrade(&self.chain), + ProcessId::ParentLookup(parent_request.last_submitted_peer.clone()), + parent_request.downloaded_blocks, + self.sync_send.clone(), + self.log.clone(), + ); + } + Ok(outcome) => { + // all else we consider the chain a failure and downvote the peer that sent + // us the last block + warn!( + self.log, "Invalid parent chain. Downvoting peer"; + "outcome" => format!("{:?}", outcome), + "last_peer" => format!("{:?}", parent_request.last_submitted_peer), + ); + self.network + .downvote_peer(parent_request.last_submitted_peer.clone()); + return; + } + Err(e) => { + warn!( + self.log, "Parent chain processing error. Downvoting peer"; + "error" => format!("{:?}", e), + "last_peer" => format!("{:?}", parent_request.last_submitted_peer), + ); + self.network + .downvote_peer(parent_request.last_submitted_peer.clone()); + return; } - } else { - // chain doesn't exist, drop the parent queue and return - return; } } } @@ -660,9 +679,16 @@ impl SyncManager { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE || parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + let error = if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { + "too many failed attempts" + } else { + "reached maximum lookup-depth" + }; + debug!(self.log, "Parent import failed"; "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), - "ancestors_found" => parent_request.downloaded_blocks.len() + "ancestors_found" => parent_request.downloaded_blocks.len(), + "reason" => error ); return; // drop the request } @@ -677,14 +703,10 @@ impl SyncManager { let request = BlocksByRootRequest { block_roots: vec![parent_hash], }; - // select a random fully synced peer to attempt to download the parent block - let available_peers = self.full_peers.iter().collect::>(); - let peer_id = if let Some(peer_id) = available_peers.choose(&mut rand::thread_rng()) { - (**peer_id).clone() - } else { - // there were no peers to choose from. We drop the lookup request - return; - }; + + // We continue to search for the chain of blocks from the same peer. Other peers are not + // guaranteed to have this chain of blocks. + let peer_id = parent_request.last_submitted_peer.clone(); if let Ok(request_id) = self.network.blocks_by_root_request(peer_id, request) { // if the request was successful add the queue back into self @@ -738,12 +760,14 @@ impl Future for SyncManager { self.inject_error(peer_id, request_id); } SyncMessage::BatchProcessed { + chain_id, batch_id, downloaded_blocks, result, } => { self.range_sync.handle_block_process_result( &mut self.network, + chain_id, batch_id, downloaded_blocks, result, @@ -763,9 +787,6 @@ impl Future for SyncManager { } } - // update the state of the manager - self.update_state(); - Ok(Async::NotReady) } } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 57d9ee393..2e68dc6e8 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -4,11 +4,8 @@ mod block_processor; pub mod manager; mod network_context; +mod peer_sync_info; mod range_sync; -/// Currently implemented sync methods. -pub enum SyncMethod { - SimpleSync, -} - pub use manager::SyncMessage; +pub use peer_sync_info::PeerSyncInfo; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index f2cd7a970..b28164b6d 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -1,56 +1,55 @@ //! Provides network functionality for the Syncing thread. This fundamentally wraps a network //! channel and stores a global RPC ID to perform requests. -use crate::message_processor::status_message; +use crate::router::processor::status_message; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, trace, warn}; -use std::sync::Weak; +use std::sync::Arc; use tokio::sync::mpsc; +use types::EthSpec; /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. -pub struct SyncNetworkContext { +pub struct SyncNetworkContext { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender, + network_send: mpsc::UnboundedSender>, request_id: RequestId, /// Logger for the `SyncNetworkContext`. log: slog::Logger, } -impl SyncNetworkContext { - pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { +impl SyncNetworkContext { + pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { Self { network_send, - request_id: 0, + request_id: 1, log, } } - pub fn status_peer( + pub fn status_peer( &mut self, - chain: Weak>, + chain: Arc>, peer_id: PeerId, ) { - if let Some(chain) = chain.upgrade() { - if let Some(status_message) = status_message(&chain) { - debug!( - self.log, - "Sending Status Request"; - "peer" => format!("{:?}", peer_id), - "fork_version" => format!("{:?}", status_message.fork_version), - "finalized_root" => format!("{:?}", status_message.finalized_root), - "finalized_epoch" => format!("{:?}", status_message.finalized_epoch), - "head_root" => format!("{}", status_message.head_root), - "head_slot" => format!("{}", status_message.head_slot), - ); + if let Some(status_message) = status_message(&chain) { + debug!( + self.log, + "Sending Status Request"; + "peer" => format!("{:?}", peer_id), + "fork_digest" => format!("{:?}", status_message.fork_digest), + "finalized_root" => format!("{:?}", status_message.finalized_root), + "finalized_epoch" => format!("{:?}", status_message.finalized_epoch), + "head_root" => format!("{}", status_message.head_root), + "head_slot" => format!("{}", status_message.head_slot), + ); - let _ = self.send_rpc_request(peer_id, RPCRequest::Status(status_message)); - } + let _ = self.send_rpc_request(peer_id, RPCRequest::Status(status_message)); } } @@ -117,7 +116,7 @@ impl SyncNetworkContext { pub fn send_rpc_request( &mut self, peer_id: PeerId, - rpc_request: RPCRequest, + rpc_request: RPCRequest, ) -> Result { let request_id = self.request_id; self.request_id += 1; @@ -125,7 +124,11 @@ impl SyncNetworkContext { Ok(request_id) } - fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) -> Result<(), &'static str> { + fn send_rpc_event( + &mut self, + peer_id: PeerId, + rpc_event: RPCEvent, + ) -> Result<(), &'static str> { self.network_send .try_send(NetworkMessage::RPC(peer_id, rpc_event)) .map_err(|_| { diff --git a/beacon_node/network/src/sync/peer_sync_info.rs b/beacon_node/network/src/sync/peer_sync_info.rs new file mode 100644 index 000000000..724aa567d --- /dev/null +++ b/beacon_node/network/src/sync/peer_sync_info.rs @@ -0,0 +1,114 @@ +use super::manager::SLOT_IMPORT_TOLERANCE; +use crate::router::processor::status_message; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::SyncInfo; +use std::ops::Sub; +use std::sync::Arc; +use types::{Epoch, Hash256, Slot}; + +/// +/// Keeps track of syncing information for known connected peers. +#[derive(Clone, Copy, Debug)] +pub struct PeerSyncInfo { + pub fork_digest: [u8; 4], + pub finalized_root: Hash256, + pub finalized_epoch: Epoch, + pub head_root: Hash256, + pub head_slot: Slot, +} + +/// The type of peer relative to our current state. +pub enum PeerSyncType { + /// The peer is on our chain and is fully synced with respect to our chain. + FullySynced, + /// The peer has a greater knowledge of the chain that us that warrants a full sync. + Advanced, + /// A peer is behind in the sync and not useful to us for downloading blocks. + Behind, +} + +impl From for PeerSyncInfo { + fn from(status: StatusMessage) -> PeerSyncInfo { + PeerSyncInfo { + fork_digest: status.fork_digest, + finalized_root: status.finalized_root, + finalized_epoch: status.finalized_epoch, + head_root: status.head_root, + head_slot: status.head_slot, + } + } +} + +impl Into for PeerSyncInfo { + fn into(self) -> SyncInfo { + SyncInfo { + status_head_slot: self.head_slot, + status_head_root: self.head_root, + status_finalized_epoch: self.finalized_epoch, + status_finalized_root: self.finalized_root, + } + } +} + +impl PeerSyncInfo { + /// Derives the peer sync information from a beacon chain. + pub fn from_chain(chain: &Arc>) -> Option { + Some(Self::from(status_message(chain)?)) + } + + /// Given another peer's `PeerSyncInfo` this will determine how useful that peer is for us in + /// regards to syncing. This returns the peer sync type that can then be handled by the + /// `SyncManager`. + pub fn peer_sync_type(&self, remote_peer_sync_info: &PeerSyncInfo) -> PeerSyncType { + // check if the peer is fully synced with our current chain + if self.is_fully_synced_peer(remote_peer_sync_info) { + PeerSyncType::FullySynced + } + // if not, check if the peer is ahead of our chain + else if self.is_ahead_peer(remote_peer_sync_info) { + PeerSyncType::Advanced + } else { + // the peer must be behind and not useful + PeerSyncType::Behind + } + } + + /// Determines if another peer is fully synced with the current peer. + /// + /// A fully synced peer is a peer whose finalized epoch and hash match our own and their + /// head is within SLOT_IMPORT_TOLERANCE of our own. + /// In this case we ignore any batch/range syncing. + fn is_fully_synced_peer(&self, remote: &PeerSyncInfo) -> bool { + // ensure we are on the same chain, with minor differing heads + if remote.finalized_epoch == self.finalized_epoch + && remote.finalized_root == self.finalized_root + { + // that we are within SLOT_IMPORT_TOLERANCE of our two heads + if (self.head_slot >= remote.head_slot + && self.head_slot.sub(remote.head_slot).as_usize() <= SLOT_IMPORT_TOLERANCE) + || (self.head_slot < remote.head_slot) + && remote.head_slot.sub(self.head_slot).as_usize() <= SLOT_IMPORT_TOLERANCE + { + return true; + } + } + false + } + + /// Determines if a peer has more knowledge about the current chain than we do. + /// + /// There are two conditions here. + /// 1) The peer could have a head slot that is greater + /// than SLOT_IMPORT_TOLERANCE of our current head. + /// 2) The peer has a greater finalized slot/epoch than our own. + fn is_ahead_peer(&self, remote: &PeerSyncInfo) -> bool { + if remote.head_slot.sub(self.head_slot).as_usize() > SLOT_IMPORT_TOLERANCE + || self.finalized_epoch < remote.finalized_epoch + { + true + } else { + false + } + } +} diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 3e516a93b..58df69339 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -9,7 +9,7 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::ops::Sub; -use types::{EthSpec, Hash256, SignedBeaconBlock, Slot}; +use types::{EthSpec, SignedBeaconBlock, Slot}; #[derive(Copy, Clone, Debug, PartialEq)] pub struct BatchId(pub u64); @@ -41,8 +41,6 @@ pub struct Batch { pub start_slot: Slot, /// The requested end slot of batch, exclusive. pub end_slot: Slot, - /// The hash of the chain root to requested from the peer. - pub head_root: Hash256, /// The peer that was originally assigned to the batch. pub original_peer: PeerId, /// The peer that is currently assigned to the batch. @@ -61,18 +59,11 @@ pub struct Batch { impl Eq for Batch {} impl Batch { - pub fn new( - id: BatchId, - start_slot: Slot, - end_slot: Slot, - head_root: Hash256, - peer_id: PeerId, - ) -> Self { + pub fn new(id: BatchId, start_slot: Slot, end_slot: Slot, peer_id: PeerId) -> Self { Batch { id, start_slot, end_slot, - head_root, original_peer: peer_id.clone(), current_peer: peer_id, retries: 0, @@ -84,7 +75,6 @@ impl Batch { pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest { BlocksByRangeRequest { - head_block_root: self.head_root, start_slot: self.start_slot.into(), count: std::cmp::min(BLOCKS_PER_BATCH, self.end_slot.sub(self.start_slot).into()), step: 1, diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 88061378a..2f19e3673 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -8,7 +8,7 @@ use eth2_libp2p::PeerId; use rand::prelude::*; use slog::{crit, debug, warn}; use std::collections::HashSet; -use std::sync::Weak; +use std::sync::Arc; use tokio::sync::mpsc; use types::{Hash256, SignedBeaconBlock, Slot}; @@ -17,7 +17,7 @@ use types::{Hash256, SignedBeaconBlock, Slot}; /// downvote peers with poor bandwidth. This can be set arbitrarily high, in which case the /// responder will fill the response up to the max request size, assuming they have the bandwidth /// to do so. -pub const BLOCKS_PER_BATCH: u64 = 50; +pub const BLOCKS_PER_BATCH: u64 = 64; /// The number of times to retry a batch before the chain is considered failed and removed. const MAX_BATCH_RETRIES: u8 = 5; @@ -38,10 +38,16 @@ pub enum ProcessingResult { RemoveChain, } +/// A chain identifier +pub type ChainId = u64; + /// A chain of blocks that need to be downloaded. Peers who claim to contain the target head /// root are grouped into the peer pool and queried for batches when downloading the /// chain. pub struct SyncingChain { + /// A random id used to identify this chain. + id: ChainId, + /// The original start slot when this chain was initialised. pub start_slot: Slot, @@ -52,7 +58,7 @@ pub struct SyncingChain { pub target_head_root: Hash256, /// The batches that are currently awaiting a response from a peer. An RPC request for these - /// have been sent. + /// has been sent. pub pending_batches: PendingBatches, /// The batches that have been downloaded and are awaiting processing and/or validation. @@ -82,7 +88,8 @@ pub struct SyncingChain { /// back once batch processing has completed. sync_send: mpsc::UnboundedSender>, - chain: Weak>, + /// A reference to the underlying beacon chain. + chain: Arc>, /// A reference to the sync logger. log: slog::Logger, @@ -98,18 +105,20 @@ pub enum ChainSyncingState { impl SyncingChain { pub fn new( + id: u64, start_slot: Slot, target_head_slot: Slot, target_head_root: Hash256, peer_id: PeerId, sync_send: mpsc::UnboundedSender>, - chain: Weak>, + chain: Arc>, log: slog::Logger, ) -> Self { let mut peer_pool = HashSet::new(); peer_pool.insert(peer_id); SyncingChain { + id, start_slot, target_head_slot, target_head_root, @@ -141,7 +150,7 @@ impl SyncingChain { /// batch. pub fn on_block_response( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, request_id: RequestId, beacon_block: &Option>, ) -> Option<()> { @@ -161,7 +170,7 @@ impl SyncingChain { /// failed indicating that further batches are required. fn handle_completed_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, batch: Batch, ) { // An entire batch of blocks has been received. This functions checks to see if it can be processed, @@ -241,11 +250,11 @@ impl SyncingChain { /// Sends a batch to the batch processor. fn process_batch(&mut self, mut batch: Batch) { let downloaded_blocks = std::mem::replace(&mut batch.downloaded_blocks, Vec::new()); - let batch_id = ProcessId::RangeBatchId(batch.id.clone()); + let process_id = ProcessId::RangeBatchId(self.id.clone(), batch.id.clone()); self.current_processing_batch = Some(batch); spawn_block_processor( - self.chain.clone(), - batch_id, + Arc::downgrade(&self.chain.clone()), + process_id, downloaded_blocks, self.sync_send.clone(), self.log.clone(), @@ -256,27 +265,37 @@ impl SyncingChain { /// of the batch processor. pub fn on_batch_process_result( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, + chain_id: ChainId, batch_id: BatchId, downloaded_blocks: &mut Option>>, result: &BatchProcessResult, ) -> Option { - if let Some(current_batch) = &self.current_processing_batch { - if current_batch.id != batch_id { - // batch process does not belong to this chain + if chain_id != self.id { + // the result does not belong to this chain + return None; + } + match &self.current_processing_batch { + Some(current_batch) if current_batch.id != batch_id => { + debug!(self.log, "Unexpected batch result"; + "chain_id" => self.id, "batch_id" => *batch_id, "expected_batch_id" => *current_batch.id); return None; } - // Continue. This is our processing request - } else { - // not waiting on a processing result - return None; + None => { + debug!(self.log, "Chain was not expecting a batch result"; + "chain_id" => self.id, "batch_id" => *batch_id); + return None; + } + _ => { + // chain_id and batch_id match, continue + } } // claim the result by consuming the option let downloaded_blocks = downloaded_blocks.take().or_else(|| { // if taken by another chain, we are no longer waiting on a result. self.current_processing_batch = None; - crit!(self.log, "Processed batch taken by another chain"); + crit!(self.log, "Processed batch taken by another chain"; "chain_id" => self.id); None })?; @@ -288,6 +307,7 @@ impl SyncingChain { // double check batches are processed in order TODO: Remove for prod if batch.id != self.to_be_processed_id { crit!(self.log, "Batch processed out of order"; + "chain_id" => self.id, "processed_batch_id" => *batch.id, "expected_id" => *self.to_be_processed_id); } @@ -299,40 +319,7 @@ impl SyncingChain { // If the processed batch was not empty, we can validate previous invalidated // blocks if !batch.downloaded_blocks.is_empty() { - // Remove any batches awaiting validation. - // - // All blocks in processed_batches should be prior batches. As the current - // batch has been processed with blocks in it, all previous batches are valid. - // - // If a previous batch has been validated and it had been re-processed, downvote - // the original peer. - while !self.processed_batches.is_empty() { - let processed_batch = self.processed_batches.remove(0); - if *processed_batch.id >= *batch.id { - crit!(self.log, "A processed batch had a greater id than the current process id"; - "processed_id" => *processed_batch.id, - "current_id" => *batch.id); - } - - if let Some(prev_hash) = processed_batch.original_hash { - // The validated batch has been re-processed - if prev_hash != processed_batch.hash() { - // The re-downloaded version was different - if processed_batch.current_peer != processed_batch.original_peer { - // A new peer sent the correct batch, the previous peer did not - // downvote the original peer - // - // If the same peer corrected it's mistake, we allow it.... for - // now. - debug!(self.log, "Re-processed batch validated. Downvoting original peer"; - "batch_id" => *processed_batch.id, - "original_peer" => format!("{}",processed_batch.original_peer), - "new_peer" => format!("{}", processed_batch.current_peer)); - network.downvote_peer(processed_batch.original_peer); - } - } - } - } + self.mark_processed_batches_as_valid(network, &batch); } // Add the current batch to processed batches to be verified in the future. We are @@ -360,20 +347,49 @@ impl SyncingChain { ProcessingResult::KeepChain } } - BatchProcessResult::Failed => { - warn!(self.log, "Batch processing failed"; "id" => *batch.id, "peer" => format!("{}", batch.current_peer)); - // The batch processing failed - // This could be because this batch is invalid, or a previous invalidated batch - // is invalid. We need to find out which and downvote the peer that has sent us - // an invalid batch. + BatchProcessResult::Partial => { + warn!(self.log, "Batch processing failed but at least one block was imported"; + "chain_id" => self.id, "id" => *batch.id, "peer" => format!("{}", batch.current_peer) + ); + // At least one block was successfully verified and imported, so we can be sure all + // previous batches are valid and we only need to download the current failed + // batch. + self.mark_processed_batches_as_valid(network, &batch); - // check that we have no exceeded the re-process retry counter + // check that we have not exceeded the re-process retry counter if batch.reprocess_retries > INVALID_BATCH_LOOKUP_ATTEMPTS { // if a batch has exceeded the invalid batch lookup attempts limit, it means // that it is likely all peers in this chain are are sending invalid batches // repeatedly and are either malicious or faulty. We drop the chain and // downvote all peers. - warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; "id"=> *batch.id); + warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; + "chain_id" => self.id, "id"=> *batch.id); + for peer_id in self.peer_pool.drain() { + network.downvote_peer(peer_id); + } + ProcessingResult::RemoveChain + } else { + // Handle this invalid batch, that is within the re-process retries limit. + self.handle_invalid_batch(network, batch); + ProcessingResult::KeepChain + } + } + BatchProcessResult::Failed => { + warn!(self.log, "Batch processing failed"; + "chain_id" => self.id,"id" => *batch.id, "peer" => format!("{}", batch.current_peer)); + // The batch processing failed + // This could be because this batch is invalid, or a previous invalidated batch + // is invalid. We need to find out which and downvote the peer that has sent us + // an invalid batch. + + // check that we have not exceeded the re-process retry counter + if batch.reprocess_retries > INVALID_BATCH_LOOKUP_ATTEMPTS { + // if a batch has exceeded the invalid batch lookup attempts limit, it means + // that it is likely all peers in this chain are are sending invalid batches + // repeatedly and are either malicious or faulty. We drop the chain and + // downvote all peers. + warn!(self.log, "Batch failed to download. Dropping chain and downvoting peers"; + "chain_id" => self.id, "id"=> *batch.id); for peer_id in self.peer_pool.drain() { network.downvote_peer(peer_id); } @@ -389,6 +405,51 @@ impl SyncingChain { Some(res) } + /// Removes any batches awaiting validation. + /// + /// All blocks in `processed_batches` should be prior batches. As the `last_batch` has been + /// processed with blocks in it, all previous batches are valid. + /// + /// If a previous batch has been validated and it had been re-processed, downvote + /// the original peer. + fn mark_processed_batches_as_valid( + &mut self, + network: &mut SyncNetworkContext, + last_batch: &Batch, + ) { + while !self.processed_batches.is_empty() { + let processed_batch = self.processed_batches.remove(0); + if *processed_batch.id >= *last_batch.id { + crit!(self.log, "A processed batch had a greater id than the current process id"; + "chain_id" => self.id, + "processed_id" => *processed_batch.id, + "current_id" => *last_batch.id); + } + + if let Some(prev_hash) = processed_batch.original_hash { + // The validated batch has been re-processed + if prev_hash != processed_batch.hash() { + // The re-downloaded version was different + if processed_batch.current_peer != processed_batch.original_peer { + // A new peer sent the correct batch, the previous peer did not + // downvote the original peer + // + // If the same peer corrected it's mistake, we allow it.... for + // now. + debug!( + self.log, "Re-processed batch validated. Downvoting original peer"; + "chain_id" => self.id, + "batch_id" => *processed_batch.id, + "original_peer" => format!("{}",processed_batch.original_peer), + "new_peer" => format!("{}", processed_batch.current_peer) + ); + network.downvote_peer(processed_batch.original_peer); + } + } + } + } + } + /// An invalid batch has been received that could not be processed. /// /// These events occur when a peer as successfully responded with blocks, but the blocks we @@ -397,7 +458,11 @@ impl SyncingChain { // TODO: Batches could have been partially downloaded due to RPC size-limit restrictions. We // need to add logic for partial batch downloads. Potentially, if another peer returns the same // batch, we try a partial download. - fn handle_invalid_batch(&mut self, network: &mut SyncNetworkContext, batch: Batch) { + fn handle_invalid_batch( + &mut self, + network: &mut SyncNetworkContext, + batch: Batch, + ) { // The current batch could not be processed, indicating either the current or previous // batches are invalid @@ -427,7 +492,11 @@ impl SyncingChain { /// /// If the re-downloaded batch is different to the original and can be processed, the original /// peer will be downvoted. - fn reprocess_batch(&mut self, network: &mut SyncNetworkContext, mut batch: Batch) { + fn reprocess_batch( + &mut self, + network: &mut SyncNetworkContext, + mut batch: Batch, + ) { // marks the batch as attempting to be reprocessed by hashing the downloaded blocks batch.original_hash = Some(batch.hash()); @@ -449,11 +518,11 @@ impl SyncingChain { batch.current_peer = new_peer.clone(); debug!(self.log, "Re-requesting batch"; + "chain_id" => self.id, "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, "peer" => format!("{}", batch.current_peer), - "head_root"=> format!("{}", batch.head_root), "retries" => batch.retries, "re-processes" => batch.reprocess_retries); self.send_batch(network, batch); @@ -467,7 +536,11 @@ impl SyncingChain { /// This chain has been requested to start syncing. /// /// This could be new chain, or an old chain that is being resumed. - pub fn start_syncing(&mut self, network: &mut SyncNetworkContext, local_finalized_slot: Slot) { + pub fn start_syncing( + &mut self, + network: &mut SyncNetworkContext, + local_finalized_slot: Slot, + ) { // A local finalized slot is provided as other chains may have made // progress whilst this chain was Stopped or paused. If so, update the `processed_batch_id` to // accommodate potentially downloaded batches from other chains. Also prune any old batches @@ -479,6 +552,7 @@ impl SyncingChain { if local_finalized_slot > self.current_processed_slot() { debug!(self.log, "Updating chain's progress"; + "chain_id" => self.id, "prev_completed_slot" => self.current_processed_slot(), "new_completed_slot" => local_finalized_slot.as_u64()); // Re-index batches @@ -502,11 +576,12 @@ impl SyncingChain { /// Add a peer to the chain. /// /// If the chain is active, this starts requesting batches from this peer. - pub fn add_peer(&mut self, network: &mut SyncNetworkContext, peer_id: PeerId) { + pub fn add_peer(&mut self, network: &mut SyncNetworkContext, peer_id: PeerId) { self.peer_pool.insert(peer_id.clone()); // do not request blocks if the chain is not syncing if let ChainSyncingState::Stopped = self.state { - debug!(self.log, "Peer added to a non-syncing chain"; "peer_id" => format!("{}", peer_id)); + debug!(self.log, "Peer added to a non-syncing chain"; + "chain_id" => self.id, "peer_id" => format!("{}", peer_id)); return; } @@ -515,7 +590,7 @@ impl SyncingChain { } /// Sends a STATUS message to all peers in the peer pool. - pub fn status_peers(&self, network: &mut SyncNetworkContext) { + pub fn status_peers(&self, network: &mut SyncNetworkContext) { for peer_id in self.peer_pool.iter() { network.status_peer(self.chain.clone(), peer_id.clone()); } @@ -529,12 +604,13 @@ impl SyncingChain { /// this chain. pub fn inject_error( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, peer_id: &PeerId, request_id: RequestId, ) -> Option { if let Some(batch) = self.pending_batches.remove(request_id) { warn!(self.log, "Batch failed. RPC Error"; + "chain_id" => self.id, "id" => *batch.id, "retries" => batch.retries, "peer" => format!("{:?}", peer_id)); @@ -553,7 +629,7 @@ impl SyncingChain { /// `MAX_BATCH_RETRIES`. pub fn failed_batch( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, mut batch: Batch, ) -> ProcessingResult { batch.retries += 1; @@ -575,11 +651,11 @@ impl SyncingChain { batch.current_peer = new_peer.clone(); debug!(self.log, "Re-Requesting batch"; + "chain_id" => self.id, "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, - "peer" => format!("{:?}", batch.current_peer), - "head_root"=> format!("{}", batch.head_root)); + "peer" => format!("{:?}", batch.current_peer)); self.send_batch(network, batch); ProcessingResult::KeepChain } @@ -587,7 +663,7 @@ impl SyncingChain { /// Attempts to request the next required batches from the peer pool if the chain is syncing. It will exhaust the peer /// pool and left over batches until the batch buffer is reached or all peers are exhausted. - fn request_batches(&mut self, network: &mut SyncNetworkContext) { + fn request_batches(&mut self, network: &mut SyncNetworkContext) { if let ChainSyncingState::Syncing = self.state { while self.send_range_request(network) {} } @@ -595,16 +671,16 @@ impl SyncingChain { /// Requests the next required batch from a peer. Returns true, if there was a peer available /// to send a request and there are batches to request, false otherwise. - fn send_range_request(&mut self, network: &mut SyncNetworkContext) -> bool { + fn send_range_request(&mut self, network: &mut SyncNetworkContext) -> bool { // find the next pending batch and request it from the peer if let Some(peer_id) = self.get_next_peer() { if let Some(batch) = self.get_next_batch(peer_id) { debug!(self.log, "Requesting batch"; + "chain_id" => self.id, "start_slot" => batch.start_slot, "end_slot" => batch.end_slot, "id" => *batch.id, - "peer" => format!("{}", batch.current_peer), - "head_root"=> format!("{}", batch.head_root)); + "peer" => format!("{}", batch.current_peer)); // send the batch self.send_batch(network, batch); return true; @@ -675,13 +751,16 @@ impl SyncingChain { batch_id, batch_start_slot, batch_end_slot, - self.target_head_root, peer_id, )) } /// Requests the provided batch from the provided peer. - fn send_batch(&mut self, network: &mut SyncNetworkContext, batch: Batch) { + fn send_batch( + &mut self, + network: &mut SyncNetworkContext, + batch: Batch, + ) { let request = batch.to_blocks_by_range_request(); if let Ok(request_id) = network.blocks_by_range_request(batch.current_peer.clone(), request) { diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index beb39265a..5c93aa45a 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -4,52 +4,141 @@ //! with this struct to to simplify the logic of the other layers of sync. use super::chain::{ChainSyncingState, SyncingChain}; -use crate::message_processor::PeerSyncInfo; use crate::sync::manager::SyncMessage; use crate::sync::network_context::SyncNetworkContext; +use crate::sync::PeerSyncInfo; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::PeerId; -use slog::{debug, error, warn}; -use std::sync::Weak; +use eth2_libp2p::{types::SyncState, NetworkGlobals, PeerId}; +use slog::{debug, error, info}; +use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; use types::{Hash256, Slot}; /// The state of the long range/batch sync. -pub enum SyncState { +#[derive(Clone)] +pub enum RangeSyncState { /// A finalized chain is being synced. - Finalized, + Finalized { + /// The start of the finalized chain. + start_slot: Slot, + /// The target head slot of the finalized chain. + head_slot: Slot, + /// The target head root of the finalized chain. + head_root: Hash256, + }, /// There are no finalized chains and we are syncing one more head chains. - Head, + Head { + /// The last finalized checkpoint for all head chains. + start_slot: Slot, + /// The largest known slot to sync to. + head_slot: Slot, + }, /// There are no head or finalized chains and no long range sync is in progress. Idle, } +impl PartialEq for RangeSyncState { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (RangeSyncState::Finalized { .. }, RangeSyncState::Finalized { .. }) => true, + (RangeSyncState::Head { .. }, RangeSyncState::Head { .. }) => true, + (RangeSyncState::Idle, RangeSyncState::Idle) => true, + _ => false, + } + } +} + +impl Into for RangeSyncState { + fn into(self) -> SyncState { + match self { + RangeSyncState::Finalized { + start_slot, + head_slot, + head_root, + } => SyncState::SyncingFinalized { + start_slot, + head_slot, + head_root, + }, + RangeSyncState::Head { + start_slot, + head_slot, + } => SyncState::SyncingHead { + start_slot, + head_slot, + }, + RangeSyncState::Idle => SyncState::Stalled, // this should never really be used + } + } +} + /// A collection of finalized and head chains currently being processed. pub struct ChainCollection { /// The beacon chain for processing. - beacon_chain: Weak>, + beacon_chain: Arc>, + /// A reference to the global network parameters. + network_globals: Arc>, /// The set of finalized chains being synced. finalized_chains: Vec>, /// The set of head chains being synced. head_chains: Vec>, /// The current sync state of the process. - sync_state: SyncState, + state: RangeSyncState, + /// Logger for the collection. + log: slog::Logger, } impl ChainCollection { - pub fn new(beacon_chain: Weak>) -> Self { + pub fn new( + beacon_chain: Arc>, + network_globals: Arc>, + log: slog::Logger, + ) -> Self { ChainCollection { - sync_state: SyncState::Idle, + beacon_chain, + network_globals, finalized_chains: Vec::new(), head_chains: Vec::new(), - beacon_chain, + state: RangeSyncState::Idle, + log, } } - /// The current syncing state. - pub fn sync_state(&self) -> &SyncState { - &self.sync_state + pub fn state(&self) -> &RangeSyncState { + &self.state + } + + /// Updates the global sync state and logs any changes. + pub fn update_sync_state(&mut self) { + // if there is no range sync occurring, the state is either synced or not based on + // connected peers. + + if self.state == RangeSyncState::Idle { + // there is no range sync, let the state of peers determine the global node sync state + let new_state = self + .network_globals + .peers + .read() + .synced_peers() + .next() + .map(|_| SyncState::Synced) + .unwrap_or_else(|| SyncState::Stalled); + let mut peer_state = self.network_globals.sync_state.write(); + if new_state != *peer_state { + info!(self.log, "Sync state updated"; "old_state" => format!("{}",peer_state), "new_state" => format!("{}",new_state)); + } + *peer_state = new_state; + } else { + // The state is based on a range sync state, update it + let mut node_sync_state = self.network_globals.sync_state.write(); + let new_state: SyncState = self.state.clone().into(); + if *node_sync_state != new_state { + // we are updating the state, inform the user + info!(self.log, "Sync state updated"; "old_state" => format!("{}",node_sync_state), "new_state" => format!("{}",new_state)); + } + *node_sync_state = new_state; + } } /// A fully synced peer has joined. @@ -57,9 +146,11 @@ impl ChainCollection { /// We could be awaiting a head sync. If we are in the head syncing state, without any head /// chains, then update the state to idle. pub fn fully_synced_peer_found(&mut self) { - if let SyncState::Head = self.sync_state { + if let RangeSyncState::Head { .. } = self.state { if self.head_chains.is_empty() { - self.sync_state = SyncState::Idle; + // Update the global network state to either synced or stalled. + self.state = RangeSyncState::Idle; + self.update_sync_state(); } } } @@ -68,8 +159,20 @@ impl ChainCollection { /// `SyncState::Head` indicating we are awaiting new peers to connect before we can consider /// the state as idle. pub fn set_head_sync(&mut self) { - if let SyncState::Idle = self.sync_state { - self.sync_state = SyncState::Head; + if let RangeSyncState::Idle = self.state { + let current_slot = self + .beacon_chain + .head_info() + .map(|info| info.slot) + .unwrap_or_else(|_| Slot::from(0u64)); + + // NOTE: This will modify the /node/syncing API to show current slot for all fields + // while we update peers to look for new potentially HEAD chains. + let temp_head_state = RangeSyncState::Head { + start_slot: current_slot, + head_slot: current_slot, + }; + self.state = temp_head_state; } } @@ -103,32 +206,26 @@ impl ChainCollection { /// /// This removes any out-dated chains, swaps to any higher priority finalized chains and /// updates the state of the collection. - pub fn update_finalized(&mut self, network: &mut SyncNetworkContext, log: &slog::Logger) { - let local_slot = match self.beacon_chain.upgrade() { - Some(chain) => { - let local = match PeerSyncInfo::from_chain(&chain) { - Some(local) => local, - None => { - return error!( - log, - "Failed to get peer sync info"; - "msg" => "likely due to head lock contention" - ) - } - }; + pub fn update_finalized(&mut self, network: &mut SyncNetworkContext) { + let local_slot = { + let local = match PeerSyncInfo::from_chain(&self.beacon_chain) { + Some(local) => local, + None => { + return error!( + self.log, + "Failed to get peer sync info"; + "msg" => "likely due to head lock contention" + ) + } + }; - local - .finalized_epoch - .start_slot(T::EthSpec::slots_per_epoch()) - } - None => { - warn!(log, "Beacon chain dropped. Chains not updated"); - return; - } + local + .finalized_epoch + .start_slot(T::EthSpec::slots_per_epoch()) }; // Remove any outdated finalized chains - self.purge_outdated_chains(network, log); + self.purge_outdated_chains(network); // Check if any chains become the new syncing chain if let Some(index) = self.finalized_syncing_index() { @@ -145,13 +242,20 @@ impl ChainCollection { }) { // A chain has more peers. Swap the syncing chain - debug!(log, "Switching finalized chains to sync"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_slot"=> chain.start_slot); + debug!(self.log, "Switching finalized chains to sync"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_slot"=> chain.start_slot); + + // update the state to a new finalized state + let state = RangeSyncState::Finalized { + start_slot: chain.start_slot, + head_slot: chain.target_head_slot, + head_root: chain.target_head_root, + }; + self.state = state; // Stop the current chain from syncing self.finalized_chains[index].stop_syncing(); // Start the new chain self.finalized_chains[new_index].start_syncing(network, local_slot); - self.sync_state = SyncState::Finalized; } } else if let Some(chain) = self .finalized_chains @@ -159,15 +263,36 @@ impl ChainCollection { .max_by_key(|chain| chain.peer_pool.len()) { // There is no currently syncing finalization chain, starting the one with the most peers - debug!(log, "New finalized chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_slot"=> chain.start_slot); + debug!(self.log, "New finalized chain started syncing"; "new_target_root" => format!("{}", chain.target_head_root), "new_end_slot" => chain.target_head_slot, "new_start_slot"=> chain.start_slot); chain.start_syncing(network, local_slot); - self.sync_state = SyncState::Finalized; + let state = RangeSyncState::Finalized { + start_slot: chain.start_slot, + head_slot: chain.target_head_slot, + head_root: chain.target_head_root, + }; + self.state = state; } else { // There are no finalized chains, update the state. if self.head_chains.is_empty() { - self.sync_state = SyncState::Idle; + self.state = RangeSyncState::Idle; } else { - self.sync_state = SyncState::Head; + // for the syncing API, we find the minimal start_slot and the maximum + // target_slot of all head chains to report back. + + let (min_slot, max_slot) = self.head_chains.iter().fold( + (Slot::from(0u64), Slot::from(0u64)), + |(min, max), chain| { + ( + std::cmp::min(min, chain.start_slot), + std::cmp::max(max, chain.target_head_slot), + ) + }, + ); + let head_state = RangeSyncState::Head { + start_slot: min_slot, + head_slot: max_slot, + }; + self.state = head_state; } } } @@ -180,16 +305,17 @@ impl ChainCollection { target_slot: Slot, peer_id: PeerId, sync_send: mpsc::UnboundedSender>, - log: &slog::Logger, ) { + let chain_id = rand::random(); self.finalized_chains.push(SyncingChain::new( + chain_id, local_finalized_slot, target_slot, target_head, peer_id, sync_send, self.beacon_chain.clone(), - log.clone(), + self.log.clone(), )); } @@ -197,13 +323,12 @@ impl ChainCollection { #[allow(clippy::too_many_arguments)] pub fn new_head_chain( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, remote_finalized_slot: Slot, target_head: Hash256, target_slot: Slot, peer_id: PeerId, sync_send: mpsc::UnboundedSender>, - log: &slog::Logger, ) { // remove the peer from any other head chains @@ -212,14 +337,16 @@ impl ChainCollection { }); self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); + let chain_id = rand::random(); let mut new_head_chain = SyncingChain::new( + chain_id, remote_finalized_slot, target_slot, target_head, peer_id, sync_send, self.beacon_chain.clone(), - log.clone(), + self.log.clone(), ); // All head chains can sync simultaneously new_head_chain.start_syncing(network, remote_finalized_slot); @@ -277,25 +404,20 @@ impl ChainCollection { /// /// This removes chains with no peers, or chains whose start block slot is less than our current /// finalized block slot. - pub fn purge_outdated_chains(&mut self, network: &mut SyncNetworkContext, log: &slog::Logger) { + pub fn purge_outdated_chains(&mut self, network: &mut SyncNetworkContext) { // Remove any chains that have no peers self.finalized_chains .retain(|chain| !chain.peer_pool.is_empty()); self.head_chains.retain(|chain| !chain.peer_pool.is_empty()); - let (beacon_chain, local_info) = match self.beacon_chain.upgrade() { - Some(chain) => match PeerSyncInfo::from_chain(&chain) { - Some(local) => (chain, local), - None => { - return error!( - log, - "Failed to get peer sync info"; - "msg" => "likely due to head lock contention" - ) - } - }, + let local_info = match PeerSyncInfo::from_chain(&self.beacon_chain) { + Some(local) => local, None => { - return; + return error!( + self.log, + "Failed to get peer sync info"; + "msg" => "likely due to head lock contention" + ) } }; @@ -303,6 +425,8 @@ impl ChainCollection { .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); + let beacon_chain = &self.beacon_chain; + let log_ref = &self.log; // Remove chains that are out-dated and re-status their peers self.finalized_chains.retain(|chain| { if chain.target_head_slot <= local_finalized_slot @@ -310,7 +434,7 @@ impl ChainCollection { .fork_choice .contains_block(&chain.target_head_root) { - debug!(log, "Purging out of finalized chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); + debug!(log_ref, "Purging out of finalized chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); chain.status_peers(network); false } else { @@ -323,7 +447,7 @@ impl ChainCollection { .fork_choice .contains_block(&chain.target_head_root) { - debug!(log, "Purging out of date head chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); + debug!(log_ref, "Purging out of date head chain"; "start_slot" => chain.start_slot, "end_slot" => chain.target_head_slot); chain.status_peers(network); false } else { @@ -347,12 +471,7 @@ impl ChainCollection { /// finalized chain length, indicates a head chain. /// /// This will re-status the chains peers on removal. The index must exist. - pub fn remove_chain( - &mut self, - network: &mut SyncNetworkContext, - index: usize, - log: &slog::Logger, - ) { + pub fn remove_chain(&mut self, network: &mut SyncNetworkContext, index: usize) { let chain = if index >= self.finalized_chains.len() { let index = index - self.finalized_chains.len(); let chain = self.head_chains.swap_remove(index); @@ -364,10 +483,10 @@ impl ChainCollection { chain }; - debug!(log, "Chain was removed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); + debug!(self.log, "Chain was removed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); // update the state - self.update_finalized(network, log); + self.update_finalized(network); } /// Returns the index of finalized chain that is currently syncing. Returns `None` if no diff --git a/beacon_node/network/src/sync/range_sync/mod.rs b/beacon_node/network/src/sync/range_sync/mod.rs index 5d7b17c07..069fe712b 100644 --- a/beacon_node/network/src/sync/range_sync/mod.rs +++ b/beacon_node/network/src/sync/range_sync/mod.rs @@ -8,4 +8,5 @@ mod range; pub use batch::Batch; pub use batch::BatchId; +pub use chain::ChainId; pub use range::RangeSync; diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index d09ef4e25..70efa4dce 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -39,19 +39,19 @@ //! Each chain is downloaded in batches of blocks. The batched blocks are processed sequentially //! and further batches are requested as current blocks are being processed. -use super::chain::ProcessingResult; -use super::chain_collection::{ChainCollection, SyncState}; +use super::chain::{ChainId, ProcessingResult}; +use super::chain_collection::{ChainCollection, RangeSyncState}; use super::BatchId; -use crate::message_processor::PeerSyncInfo; use crate::sync::block_processor::BatchProcessResult; use crate::sync::manager::SyncMessage; use crate::sync::network_context::SyncNetworkContext; +use crate::sync::PeerSyncInfo; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::RequestId; -use eth2_libp2p::PeerId; -use slog::{debug, error, trace, warn}; +use eth2_libp2p::{NetworkGlobals, PeerId}; +use slog::{debug, error, trace}; use std::collections::HashSet; -use std::sync::Weak; +use std::sync::Arc; use tokio::sync::mpsc; use types::{EthSpec, SignedBeaconBlock}; @@ -60,7 +60,7 @@ use types::{EthSpec, SignedBeaconBlock}; /// holds the current state of the long range sync. pub struct RangeSync { /// The beacon chain for processing. - beacon_chain: Weak>, + beacon_chain: Arc>, /// A collection of chains that need to be downloaded. This stores any head or finalized chains /// that need to be downloaded. chains: ChainCollection, @@ -77,13 +77,14 @@ pub struct RangeSync { impl RangeSync { pub fn new( - beacon_chain: Weak>, + beacon_chain: Arc>, + network_globals: Arc>, sync_send: mpsc::UnboundedSender>, log: slog::Logger, ) -> Self { RangeSync { beacon_chain: beacon_chain.clone(), - chains: ChainCollection::new(beacon_chain), + chains: ChainCollection::new(beacon_chain, network_globals, log.clone()), awaiting_head_peers: HashSet::new(), sync_send, log, @@ -109,7 +110,7 @@ impl RangeSync { /// prioritised by peer-pool size. pub fn add_peer( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, peer_id: PeerId, remote: PeerSyncInfo, ) { @@ -118,21 +119,14 @@ impl RangeSync { // determine if we need to run a sync to the nearest finalized state or simply sync to // its current head - let (chain, local_info) = match self.beacon_chain.upgrade() { - Some(chain) => match PeerSyncInfo::from_chain(&chain) { - Some(local) => (chain, local), - None => { - return error!( - self.log, - "Failed to get peer sync info"; - "msg" => "likely due to head lock contention" - ) - } - }, + let local_info = match PeerSyncInfo::from_chain(&self.beacon_chain) { + Some(local) => local, None => { - return warn!(self.log, - "Beacon chain dropped. Peer not considered for sync"; - "peer_id" => format!("{:?}", peer_id)); + return error!( + self.log, + "Failed to get peer sync info"; + "msg" => "likely due to head lock contention" + ) } }; @@ -148,10 +142,13 @@ impl RangeSync { self.remove_peer(network, &peer_id); // remove any out-of-date chains - self.chains.purge_outdated_chains(network, &self.log); + self.chains.purge_outdated_chains(network); if remote_finalized_slot > local_info.head_slot - && !chain.fork_choice.contains_block(&remote.finalized_root) + && !self + .beacon_chain + .fork_choice + .contains_block(&remote.finalized_root) { debug!(self.log, "Finalization sync peer joined"; "peer_id" => format!("{:?}", peer_id)); // Finalized chain search @@ -171,7 +168,8 @@ impl RangeSync { chain.add_peer(network, peer_id); // check if the new peer's addition will favour a new syncing chain. - self.chains.update_finalized(network, &self.log); + self.chains.update_finalized(network); + self.chains.update_sync_state(); } else { // there is no finalized chain that matches this peer's last finalized target // create a new finalized chain @@ -183,9 +181,9 @@ impl RangeSync { remote_finalized_slot, peer_id, self.sync_send.clone(), - &self.log, ); - self.chains.update_finalized(network, &self.log); + self.chains.update_finalized(network); + self.chains.update_sync_state(); } } else { if self.chains.is_finalizing_sync() { @@ -216,10 +214,10 @@ impl RangeSync { remote.head_slot, peer_id, self.sync_send.clone(), - &self.log, ); } - self.chains.update_finalized(network, &self.log); + self.chains.update_finalized(network); + self.chains.update_sync_state(); } } @@ -229,7 +227,7 @@ impl RangeSync { /// This request could complete a chain or simply add to its progress. pub fn blocks_by_range_response( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, peer_id: PeerId, request_id: RequestId, beacon_block: Option>, @@ -256,7 +254,8 @@ impl RangeSync { pub fn handle_block_process_result( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, + chain_id: ChainId, batch_id: BatchId, downloaded_blocks: Vec>, result: BatchProcessResult, @@ -265,30 +264,38 @@ impl RangeSync { let mut downloaded_blocks = Some(downloaded_blocks); match self.chains.finalized_request(|chain| { - chain.on_batch_process_result(network, batch_id, &mut downloaded_blocks, &result) + chain.on_batch_process_result( + network, + chain_id, + batch_id, + &mut downloaded_blocks, + &result, + ) }) { Some((index, ProcessingResult::RemoveChain)) => { let chain = self.chains.remove_finalized_chain(index); debug!(self.log, "Finalized chain removed"; "start_slot" => chain.start_slot.as_u64(), "end_slot" => chain.target_head_slot.as_u64()); + // update the state of the collection + self.chains.update_finalized(network); + // the chain is complete, re-status it's peers chain.status_peers(network); - // update the state of the collection - self.chains.update_finalized(network, &self.log); - - // set the state to a head sync, to inform the manager that we are awaiting a + // set the state to a head sync if there are no finalized chains, to inform the manager that we are awaiting a // head chain. self.chains.set_head_sync(); + // Update the global variables + self.chains.update_sync_state(); // if there are no more finalized chains, re-status all known peers awaiting a head // sync - match self.chains.sync_state() { - SyncState::Idle | SyncState::Head => { + match self.chains.state() { + RangeSyncState::Idle | RangeSyncState::Head { .. } => { for peer_id in self.awaiting_head_peers.drain() { network.status_peer(self.beacon_chain.clone(), peer_id); } } - SyncState::Finalized => {} // Have more finalized chains to complete + RangeSyncState::Finalized { .. } => {} // Have more finalized chains to complete } } Some((_, ProcessingResult::KeepChain)) => {} @@ -296,6 +303,7 @@ impl RangeSync { match self.chains.head_request(|chain| { chain.on_batch_process_result( network, + chain_id, batch_id, &mut downloaded_blocks, &result, @@ -308,7 +316,9 @@ impl RangeSync { chain.status_peers(network); // update the state of the collection - self.chains.update_finalized(network, &self.log); + self.chains.update_finalized(network); + // update the global state and log any change + self.chains.update_sync_state(); } Some((_, ProcessingResult::KeepChain)) => {} None => { @@ -321,18 +331,13 @@ impl RangeSync { } } - /// Public method to indicate the current state of the long range sync. - pub fn is_syncing(&self) -> bool { - match self.chains.sync_state() { - SyncState::Finalized => true, - SyncState::Head => true, - SyncState::Idle => false, - } - } - /// A peer has disconnected. This removes the peer from any ongoing chains and mappings. A /// disconnected peer could remove a chain - pub fn peer_disconnect(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { + pub fn peer_disconnect( + &mut self, + network: &mut SyncNetworkContext, + peer_id: &PeerId, + ) { // if the peer is in the awaiting head mapping, remove it self.awaiting_head_peers.remove(&peer_id); @@ -340,13 +345,15 @@ impl RangeSync { self.remove_peer(network, peer_id); // update the state of the collection - self.chains.update_finalized(network, &self.log); + self.chains.update_finalized(network); + // update the global state and inform the user + self.chains.update_sync_state(); } /// When a peer gets removed, both the head and finalized chains need to be searched to check which pool the peer is in. The chain may also have a batch or batches awaiting /// for this peer. If so we mark the batch as failed. The batch may then hit it's maximum /// retries. In this case, we need to remove the chain and re-status all the peers. - fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { + fn remove_peer(&mut self, network: &mut SyncNetworkContext, peer_id: &PeerId) { if let Some((index, ProcessingResult::RemoveChain)) = self.chains.head_finalized_request(|chain| { if chain.peer_pool.remove(peer_id) { @@ -366,7 +373,7 @@ impl RangeSync { { // the chain needed to be removed debug!(self.log, "Chain being removed due to failed batch"); - self.chains.remove_chain(network, index, &self.log); + self.chains.remove_chain(network, index); } } @@ -376,7 +383,7 @@ impl RangeSync { /// been too many failed attempts for the batch, remove the chain. pub fn inject_error( &mut self, - network: &mut SyncNetworkContext, + network: &mut SyncNetworkContext, peer_id: PeerId, request_id: RequestId, ) { @@ -388,7 +395,7 @@ impl RangeSync { Some((_, ProcessingResult::KeepChain)) => {} // error handled chain persists Some((index, ProcessingResult::RemoveChain)) => { debug!(self.log, "Chain being removed due to RPC error"); - self.chains.remove_chain(network, index, &self.log) + self.chains.remove_chain(network, index) } None => {} // request wasn't in the finalized chains, check the head chains } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 2b2d2b829..c754ba481 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "rest_api" -version = "0.1.0" -authors = ["Paul Hauner ", "Luke Anderson "] +version = "0.2.0" +authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] bls = { path = "../../eth2/utils/bls" } +rest_types = { path = "../../eth2/utils/rest_types" } beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } eth2-libp2p = { path = "../eth2-libp2p" } @@ -24,7 +25,6 @@ state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } http = "0.1" hyper = "0.12" -exit-future = "0.1.4" tokio = "0.1.22" url = "2.1" lazy_static = "1.3.0" @@ -35,6 +35,7 @@ hex = "0.3" parking_lot = "0.9" futures = "0.1.29" operation_pool = { path = "../../eth2/operation_pool" } +rayon = "1.3.0" [dev-dependencies] remote_beacon_node = { path = "../../eth2/utils/remote_beacon_node" } diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 628c04142..0df3673ee 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -5,29 +5,17 @@ use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; use futures::{Future, Stream}; use hyper::{Body, Request}; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; +use rest_types::{ + BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, + ValidatorRequest, ValidatorResponse, +}; use std::sync::Arc; use store::Store; use types::{ - AttesterSlashing, BeaconState, CommitteeIndex, EthSpec, Hash256, ProposerSlashing, - PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, Slot, Validator, + AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes, + RelativeEpoch, Slot, }; -/// Information about the block and state that are at head of the beacon chain. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct CanonicalHeadResponse { - pub slot: Slot, - pub block_root: Hash256, - pub state_root: Hash256, - pub finalized_slot: Slot, - pub finalized_block_root: Hash256, - pub justified_slot: Slot, - pub justified_block_root: Hash256, - pub previous_justified_slot: Slot, - pub previous_justified_block_root: Hash256, -} - /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. pub fn get_head( req: Request, @@ -62,15 +50,7 @@ pub fn get_head( ResponseBuilder::new(&req)?.body(&head) } -/// Information about a block that is at the head of a chain. May or may not represent the -/// canonical head. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct HeadBeaconBlock { - pub beacon_block_root: Hash256, - pub beacon_block_slot: Slot, -} - -/// HTTP handler to return a list of head block roots. +/// HTTP handler to return a list of head BeaconBlocks. pub fn get_heads( req: Request, beacon_chain: Arc>, @@ -87,14 +67,7 @@ pub fn get_heads( ResponseBuilder::new(&req)?.body(&heads) } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse { - pub root: Hash256, - pub beacon_block: SignedBeaconBlock, -} - -/// HTTP handler to return a `SignedBeaconBlock` at a given `root` or `slot`. +/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. pub fn get_block( req: Request, beacon_chain: Arc>, @@ -158,14 +131,6 @@ pub fn get_fork( ResponseBuilder::new(&req)?.body(&beacon_chain.head()?.beacon_state.fork) } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorResponse { - pub pubkey: PublicKeyBytes, - pub validator_index: Option, - pub balance: Option, - pub validator: Option, -} - /// HTTP handler to which accepts a query string of a list of validator pubkeys and maps it to a /// `ValidatorResponse`. /// @@ -246,13 +211,6 @@ pub fn get_active_validators( ResponseBuilder::new(&req)?.body(&validators) } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct ValidatorRequest { - /// If set to `None`, uses the canonical head state. - pub state_root: Option, - pub pubkeys: Vec, -} - /// HTTP handler to which accepts a `ValidatorRequest` and returns a `ValidatorResponse` for /// each of the given `pubkeys`. When `state_root` is `None`, the canonical head is used. /// @@ -365,13 +323,6 @@ fn validator_response_by_pubkey( } } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -pub struct Committee { - pub slot: Slot, - pub index: CommitteeIndex, - pub committee: Vec, -} - /// HTTP handler pub fn get_committees( req: Request, @@ -405,13 +356,6 @@ pub fn get_committees( ResponseBuilder::new(&req)?.body(&committees) } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse { - pub root: Hash256, - pub beacon_state: BeaconState, -} - /// HTTP handler to return a `BeaconState` at a given `root` or `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than diff --git a/beacon_node/rest_api/src/consensus.rs b/beacon_node/rest_api/src/consensus.rs index 207ae073b..64b5a5df3 100644 --- a/beacon_node/rest_api/src/consensus.rs +++ b/beacon_node/rest_api/src/consensus.rs @@ -4,11 +4,12 @@ use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::{Future, Stream}; use hyper::{Body, Request}; +use rest_types::{IndividualVotesRequest, IndividualVotesResponse}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatus, ValidatorStatuses}; +use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatuses}; use std::sync::Arc; -use types::{Epoch, EthSpec, PublicKeyBytes}; +use types::EthSpec; /// The results of validators voting during an epoch. /// @@ -70,68 +71,6 @@ pub fn get_vote_count( ResponseBuilder::new(&req)?.body(&report) } -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesRequest { - pub epoch: Epoch, - pub pubkeys: Vec, -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVote { - /// True if the validator has been slashed, ever. - pub is_slashed: bool, - /// True if the validator can withdraw in the current epoch. - pub is_withdrawable_in_current_epoch: bool, - /// True if the validator was active in the state's _current_ epoch. - pub is_active_in_current_epoch: bool, - /// True if the validator was active in the state's _previous_ epoch. - pub is_active_in_previous_epoch: bool, - /// The validator's effective balance in the _current_ epoch. - pub current_epoch_effective_balance_gwei: u64, - /// True if the validator had an attestation included in the _current_ epoch. - pub is_current_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _current_ - /// epoch matches the block root known to the state. - pub is_current_epoch_target_attester: bool, - /// True if the validator had an attestation included in the _previous_ epoch. - pub is_previous_epoch_attester: bool, - /// True if the validator's beacon block root attestation for the first slot of the _previous_ - /// epoch matches the block root known to the state. - pub is_previous_epoch_target_attester: bool, - /// True if the validator's beacon block root attestation in the _previous_ epoch at the - /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. - pub is_previous_epoch_head_attester: bool, -} - -impl Into for ValidatorStatus { - fn into(self) -> IndividualVote { - IndividualVote { - is_slashed: self.is_slashed, - is_withdrawable_in_current_epoch: self.is_withdrawable_in_current_epoch, - is_active_in_current_epoch: self.is_active_in_current_epoch, - is_active_in_previous_epoch: self.is_active_in_previous_epoch, - current_epoch_effective_balance_gwei: self.current_epoch_effective_balance, - is_current_epoch_attester: self.is_current_epoch_attester, - is_current_epoch_target_attester: self.is_current_epoch_target_attester, - is_previous_epoch_attester: self.is_previous_epoch_attester, - is_previous_epoch_target_attester: self.is_previous_epoch_target_attester, - is_previous_epoch_head_attester: self.is_previous_epoch_head_attester, - } - } -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct IndividualVotesResponse { - /// The epoch which is considered the "current" epoch. - pub epoch: Epoch, - /// The validators public key. - pub pubkey: PublicKeyBytes, - /// The index of the validator in state.validators. - pub validator_index: Option, - /// Voting statistics for the validator, if they voted in the given epoch. - pub vote: Option, -} - pub fn post_individual_votes( req: Request, beacon_chain: Arc>, diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index dc8f4c91e..913fa8bd6 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -33,11 +33,11 @@ impl ApiError { impl Into> for ApiError { fn into(self) -> Response { - let status_code = self.status_code(); + let (status_code, desc) = self.status_code(); Response::builder() - .status(status_code.0) + .status(status_code) .header("content-type", "text/plain; charset=utf-8") - .body(Body::from(status_code.1)) + .body(Body::from(desc)) .expect("Response should always be created.") } } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 8d014efbb..661b561c8 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -1,20 +1,16 @@ -use crate::{ApiError, ApiResult}; +use crate::{ApiError, ApiResult, NetworkChannel}; use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig}; use bls::PublicKeyBytes; -use eth2_libp2p::GossipTopic; use eth2_libp2p::PubsubMessage; use hex; use http::header; use hyper::{Body, Request}; use network::NetworkMessage; -use parking_lot::RwLock; -use ssz::{Decode, Encode}; -use std::sync::Arc; +use ssz::Decode; use store::{iter::AncestorIter, Store}; -use tokio::sync::mpsc; use types::{ - Attestation, BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, Signature, - SignedBeaconBlock, Slot, + Attestation, BeaconState, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, + SignedAggregateAndProof, SignedBeaconBlock, Slot, }; /// Parse a slot. @@ -49,7 +45,7 @@ pub fn parse_committee_index(string: &str) -> Result { /// Checks the provided request to ensure that the `content-type` header. /// /// The content-type header should either be omitted, in which case JSON is assumed, or it should -/// explicity specify `application/json`. If anything else is provided, an error is returned. +/// explicitly specify `application/json`. If anything else is provided, an error is returned. pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> { match req.headers().get(header::CONTENT_TYPE) { Some(h) if h == "application/json" => Ok(()), @@ -61,24 +57,26 @@ pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> } } -/// Parse a signature from a `0x` preixed string. -pub fn parse_signature(string: &str) -> Result { +/// Parse an SSZ object from some hex-encoded bytes. +/// +/// E.g., A signature is `"0x0000000000000000000000000000000000000000000000000000000000000000"` +pub fn parse_hex_ssz_bytes(string: &str) -> Result { const PREFIX: &str = "0x"; if string.starts_with(PREFIX) { let trimmed = string.trim_start_matches(PREFIX); let bytes = hex::decode(trimmed) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse signature hex: {:?}", e)))?; - Signature::from_ssz_bytes(&bytes) - .map_err(|e| ApiError::BadRequest(format!("Unable to parse signature bytes: {:?}", e))) + .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ hex: {:?}", e)))?; + T::from_ssz_bytes(&bytes) + .map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ bytes: {:?}", e))) } else { Err(ApiError::BadRequest( - "Signature must have a 0x prefix".to_string(), + "Hex bytes must have a 0x prefix".to_string(), )) } } -/// Parse a root from a `0x` preixed string. +/// Parse a root from a `0x` prefixed string. /// /// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"` pub fn parse_root(string: &str) -> Result { @@ -232,18 +230,14 @@ pub fn implementation_pending_response(_req: Request) -> ApiResult { } pub fn publish_beacon_block_to_network( - chan: Arc>>, + mut chan: NetworkChannel, block: SignedBeaconBlock, ) -> Result<(), ApiError> { - // create the network topic to send on - let topic = GossipTopic::BeaconBlock; - let message = PubsubMessage::Block(block.as_ssz_bytes()); + // send the block via SSZ encoding + let messages = vec![PubsubMessage::BeaconBlock(Box::new(block))]; // Publish the block to the p2p network via gossipsub. - if let Err(e) = chan.write().try_send(NetworkMessage::Publish { - topics: vec![topic.into()], - message, - }) { + if let Err(e) = chan.try_send(NetworkMessage::Publish { messages }) { return Err(ApiError::ServerError(format!( "Unable to send new block to network: {:?}", e @@ -253,19 +247,50 @@ pub fn publish_beacon_block_to_network( Ok(()) } -pub fn publish_attestation_to_network( - chan: Arc>>, - attestation: Attestation, +/// Publishes a raw un-aggregated attestation to the network. +pub fn publish_raw_attestations_to_network( + mut chan: NetworkChannel, + attestations: Vec>, + spec: &ChainSpec, ) -> Result<(), ApiError> { - // create the network topic to send on - let topic = GossipTopic::BeaconAttestation; - let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); + let messages = attestations + .into_iter() + .map(|attestation| { + // create the gossip message to send to the network + let subnet_id = attestation + .subnet_id(spec) + .map_err(|e| ApiError::ServerError(format!("Unable to get subnet id: {:?}", e)))?; - // Publish the attestation to the p2p network via gossipsub. - if let Err(e) = chan.write().try_send(NetworkMessage::Publish { - topics: vec![topic.into()], - message, - }) { + Ok(PubsubMessage::Attestation(Box::new(( + subnet_id, + attestation, + )))) + }) + .collect::, ApiError>>()?; + + // Publish the attestations to the p2p network via gossipsub. + if let Err(e) = chan.try_send(NetworkMessage::Publish { messages }) { + return Err(ApiError::ServerError(format!( + "Unable to send new attestation to network: {:?}", + e + ))); + } + + Ok(()) +} + +/// Publishes an aggregated attestation to the network. +pub fn publish_aggregate_attestations_to_network( + mut chan: NetworkChannel, + signed_proofs: Vec>, +) -> Result<(), ApiError> { + let messages = signed_proofs + .into_iter() + .map(|signed_proof| PubsubMessage::AggregateAndProofAttestation(Box::new(signed_proof))) + .collect::>(); + + // Publish the attestations to the p2p network via gossipsub. + if let Err(e) = chan.try_send(NetworkMessage::Publish { messages }) { return Err(ApiError::ServerError(format!( "Unable to send new attestation to network: {:?}", e diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 6f219529d..2702f38c9 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -10,6 +10,7 @@ pub mod config; mod consensus; mod error; mod helpers; +mod lighthouse; mod metrics; mod network; mod node; @@ -21,39 +22,32 @@ mod validator; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::NetworkMessage; -use client_network::Service as NetworkService; pub use config::ApiEncodingFormat; use error::{ApiError, ApiResult}; use eth2_config::Eth2Config; +use eth2_libp2p::NetworkGlobals; use hyper::rt::Future; use hyper::server::conn::AddrStream; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Response, Server}; -use parking_lot::RwLock; use slog::{info, warn}; use std::net::SocketAddr; use std::ops::Deref; use std::path::PathBuf; use std::sync::Arc; use tokio::runtime::TaskExecutor; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; use url_query::UrlQuery; pub use crate::helpers::parse_pubkey_bytes; -pub use beacon::{ - BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, - ValidatorRequest, ValidatorResponse, -}; pub use config::Config; -pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse}; -pub use validator::{ValidatorDutiesRequest, ValidatorDuty}; pub type BoxFut = Box, Error = ApiError> + Send>; -pub type NetworkChannel = Arc>>; +pub type NetworkChannel = mpsc::UnboundedSender>; pub struct NetworkInfo { - pub network_service: Arc>, - pub network_chan: mpsc::UnboundedSender, + pub network_globals: Arc>, + pub network_chan: NetworkChannel, } // Allowing more than 7 arguments. @@ -67,7 +61,7 @@ pub fn start_server( freezer_db_path: PathBuf, eth2_config: Eth2Config, log: slog::Logger, -) -> Result<(exit_future::Signal, SocketAddr), hyper::Error> { +) -> Result<(oneshot::Sender<()>, SocketAddr), hyper::Error> { let inner_log = log.clone(); let eth2_config = Arc::new(eth2_config); @@ -76,8 +70,8 @@ pub fn start_server( let beacon_chain = beacon_chain.clone(); let log = inner_log.clone(); let eth2_config = eth2_config.clone(); - let network_service = network_info.network_service.clone(); - let network_channel = Arc::new(RwLock::new(network_info.network_chan.clone())); + let network_globals = network_info.network_globals.clone(); + let network_channel = network_info.network_chan.clone(); let db_path = db_path.clone(); let freezer_db_path = freezer_db_path.clone(); @@ -85,7 +79,7 @@ pub fn start_server( router::route( req, beacon_chain.clone(), - network_service.clone(), + network_globals.clone(), network_channel.clone(), eth2_config.clone(), log.clone(), @@ -105,7 +99,7 @@ pub fn start_server( let actual_listen_addr = server.local_addr(); // Build a channel to kill the HTTP server. - let (exit_signal, exit) = exit_future::signal(); + let (exit_signal, exit) = oneshot::channel(); let inner_log = log.clone(); let server_exit = exit.and_then(move |_| { info!(inner_log, "HTTP service shutdown"); diff --git a/beacon_node/rest_api/src/lighthouse.rs b/beacon_node/rest_api/src/lighthouse.rs new file mode 100644 index 000000000..556046ab3 --- /dev/null +++ b/beacon_node/rest_api/src/lighthouse.rs @@ -0,0 +1,58 @@ +//! This contains a collection of lighthouse specific HTTP endpoints. + +use crate::response_builder::ResponseBuilder; +use crate::ApiResult; +use eth2_libp2p::{NetworkGlobals, PeerInfo}; +use hyper::{Body, Request}; +use serde::Serialize; +use std::sync::Arc; +use types::EthSpec; + +/// The syncing state of the beacon node. +pub fn syncing( + req: Request, + network_globals: Arc>, +) -> ApiResult { + ResponseBuilder::new(&req)?.body_no_ssz(&network_globals.sync_state()) +} + +/// Returns all known peers and corresponding information +pub fn peers(req: Request, network_globals: Arc>) -> ApiResult { + let peers: Vec> = network_globals + .peers + .read() + .peers() + .map(|(peer_id, peer_info)| Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect(); + ResponseBuilder::new(&req)?.body_no_ssz(&peers) +} + +/// Returns all known connected peers and their corresponding information +pub fn connected_peers( + req: Request, + network_globals: Arc>, +) -> ApiResult { + let peers: Vec> = network_globals + .peers + .read() + .connected_peers() + .map(|(peer_id, peer_info)| Peer { + peer_id: peer_id.to_string(), + peer_info: peer_info.clone(), + }) + .collect(); + ResponseBuilder::new(&req)?.body_no_ssz(&peers) +} + +/// Information returned by `peers` and `connected_peers`. +#[derive(Clone, Debug, Serialize)] +#[serde(bound = "T: EthSpec")] +struct Peer { + /// The Peer's ID + peer_id: String, + /// The PeerInfo associated with the peer. + peer_info: PeerInfo, +} diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 1d8df29a3..ae2486d34 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -1,6 +1,6 @@ use crate::error::ApiResult; use crate::response_builder::ResponseBuilder; -use crate::NetworkService; +use crate::NetworkGlobals; use beacon_chain::BeaconChainTypes; use eth2_libp2p::{Multiaddr, PeerId}; use hyper::{Body, Request}; @@ -11,7 +11,7 @@ use std::sync::Arc; /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. pub fn get_listen_addresses( req: Request, - network: Arc>, + network: Arc>, ) -> ApiResult { let multiaddresses: Vec = network.listen_multiaddrs(); ResponseBuilder::new(&req)?.body_no_ssz(&multiaddresses) @@ -22,9 +22,9 @@ pub fn get_listen_addresses( /// Returns the TCP port number in its plain form (which is also valid JSON serialization) pub fn get_listen_port( req: Request, - network: Arc>, + network: Arc>, ) -> ApiResult { - ResponseBuilder::new(&req)?.body(&network.listen_port()) + ResponseBuilder::new(&req)?.body(&network.listen_port_tcp()) } /// HTTP handler to return the Discv5 ENR from the client's libp2p service. @@ -32,14 +32,9 @@ pub fn get_listen_port( /// ENR is encoded as base64 string. pub fn get_enr( req: Request, - network: Arc>, + network: Arc>, ) -> ApiResult { - ResponseBuilder::new(&req)?.body_no_ssz( - &network - .local_enr() - .map(|enr| enr.to_base64()) - .unwrap_or_else(|| "".into()), - ) + ResponseBuilder::new(&req)?.body_no_ssz(&network.local_enr().to_base64()) } /// HTTP handler to return the `PeerId` from the client's libp2p service. @@ -47,7 +42,7 @@ pub fn get_enr( /// PeerId is encoded as base58 string. pub fn get_peer_id( req: Request, - network: Arc>, + network: Arc>, ) -> ApiResult { ResponseBuilder::new(&req)?.body_no_ssz(&network.local_peer_id().to_base58()) } @@ -55,7 +50,7 @@ pub fn get_peer_id( /// HTTP handler to return the number of peers connected in the client's libp2p service. pub fn get_peer_count( req: Request, - network: Arc>, + network: Arc>, ) -> ApiResult { ResponseBuilder::new(&req)?.body(&network.connected_peers()) } @@ -65,11 +60,12 @@ pub fn get_peer_count( /// Peers are presented as a list of `PeerId::to_string()`. pub fn get_peer_list( req: Request, - network: Arc>, + network: Arc>, ) -> ApiResult { let connected_peers: Vec = network - .connected_peer_set() - .iter() + .peers + .read() + .connected_peer_ids() .map(PeerId::to_string) .collect(); ResponseBuilder::new(&req)?.body_no_ssz(&connected_peers) diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 228a7ef0d..ffd07f8f1 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,9 +1,43 @@ use crate::response_builder::ResponseBuilder; use crate::ApiResult; +use eth2_libp2p::{types::SyncState, NetworkGlobals}; use hyper::{Body, Request}; +use rest_types::{SyncingResponse, SyncingStatus}; +use std::sync::Arc; +use types::{EthSpec, Slot}; use version; /// Read the version string from the current Lighthouse build. pub fn get_version(req: Request) -> ApiResult { ResponseBuilder::new(&req)?.body_no_ssz(&version::version()) } + +pub fn syncing( + req: Request, + network: Arc>, + current_slot: Slot, +) -> ApiResult { + let (starting_slot, highest_slot) = match network.sync_state() { + SyncState::SyncingFinalized { + start_slot, + head_slot, + .. + } + | SyncState::SyncingHead { + start_slot, + head_slot, + } => (start_slot, head_slot), + SyncState::Synced | SyncState::Stalled => (Slot::from(0u64), current_slot), + }; + + let sync_status = SyncingStatus { + starting_slot, + current_slot, + highest_slot, + }; + + ResponseBuilder::new(&req)?.body(&SyncingResponse { + is_syncing: network.is_syncing(), + sync_status, + }) +} diff --git a/beacon_node/rest_api/src/router.rs b/beacon_node/rest_api/src/router.rs index 725957ee6..1c86e8ebc 100644 --- a/beacon_node/rest_api/src/router.rs +++ b/beacon_node/rest_api/src/router.rs @@ -1,16 +1,17 @@ use crate::{ - advanced, beacon, consensus, error::ApiError, helpers, metrics, network, node, spec, validator, - BoxFut, NetworkChannel, + advanced, beacon, consensus, error::ApiError, helpers, lighthouse, metrics, network, node, + spec, validator, BoxFut, NetworkChannel, }; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use client_network::Service as NetworkService; use eth2_config::Eth2Config; +use eth2_libp2p::NetworkGlobals; use futures::{Future, IntoFuture}; use hyper::{Body, Error, Method, Request, Response}; use slog::debug; use std::path::PathBuf; use std::sync::Arc; use std::time::Instant; +use types::Slot; fn into_boxfut(item: F) -> BoxFut where @@ -25,8 +26,8 @@ where pub fn route( req: Request, beacon_chain: Arc>, - network_service: Arc>, - network_channel: NetworkChannel, + network_globals: Arc>, + network_channel: NetworkChannel, eth2_config: Arc, local_log: slog::Logger, db_path: PathBuf, @@ -44,27 +45,37 @@ pub fn route( // Methods for Client (&Method::GET, "/node/version") => into_boxfut(node::get_version(req)), (&Method::GET, "/node/syncing") => { - into_boxfut(helpers::implementation_pending_response(req)) + // inform the current slot, or set to 0 + let current_slot = beacon_chain + .head_info() + .map(|info| info.slot) + .unwrap_or_else(|_| Slot::from(0u64)); + + into_boxfut(node::syncing::( + req, + network_globals, + current_slot, + )) } // Methods for Network (&Method::GET, "/network/enr") => { - into_boxfut(network::get_enr::(req, network_service)) + into_boxfut(network::get_enr::(req, network_globals)) } (&Method::GET, "/network/peer_count") => { - into_boxfut(network::get_peer_count::(req, network_service)) + into_boxfut(network::get_peer_count::(req, network_globals)) } (&Method::GET, "/network/peer_id") => { - into_boxfut(network::get_peer_id::(req, network_service)) + into_boxfut(network::get_peer_id::(req, network_globals)) } (&Method::GET, "/network/peers") => { - into_boxfut(network::get_peer_list::(req, network_service)) + into_boxfut(network::get_peer_list::(req, network_globals)) } (&Method::GET, "/network/listen_port") => { - into_boxfut(network::get_listen_port::(req, network_service)) + into_boxfut(network::get_listen_port::(req, network_globals)) } (&Method::GET, "/network/listen_addresses") => { - into_boxfut(network::get_listen_addresses::(req, network_service)) + into_boxfut(network::get_listen_addresses::(req, network_globals)) } // Methods for Beacon Node @@ -124,6 +135,9 @@ pub fn route( drop(timer); into_boxfut(response) } + (&Method::POST, "/validator/subscribe") => { + validator::post_validator_subscriptions::(req, network_channel) + } (&Method::GET, "/validator/duties/all") => { into_boxfut(validator::get_all_validator_duties::(req, beacon_chain)) } @@ -147,10 +161,22 @@ pub fn route( drop(timer); into_boxfut(response) } - (&Method::POST, "/validator/attestation") => { - validator::publish_attestation::(req, beacon_chain, network_channel, log) + (&Method::GET, "/validator/aggregate_attestation") => { + into_boxfut(validator::get_aggregate_attestation::(req, beacon_chain)) + } + (&Method::POST, "/validator/attestations") => { + validator::publish_attestations::(req, beacon_chain, network_channel, log) + } + (&Method::POST, "/validator/aggregate_and_proofs") => { + validator::publish_aggregate_and_proofs::( + req, + beacon_chain, + network_channel, + log, + ) } + // Methods for consensus (&Method::GET, "/consensus/global_votes") => { into_boxfut(consensus::get_vote_count::(req, beacon_chain)) } @@ -177,7 +203,6 @@ pub fn route( (&Method::GET, "/advanced/operation_pool") => { into_boxfut(advanced::get_operation_pool::(req, beacon_chain)) } - (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::( req, beacon_chain, @@ -185,6 +210,16 @@ pub fn route( freezer_db_path, )), + // Lighthouse specific + (&Method::GET, "/lighthouse/syncing") => { + into_boxfut(lighthouse::syncing::(req, network_globals)) + } + (&Method::GET, "/lighthouse/peers") => { + into_boxfut(lighthouse::peers::(req, network_globals)) + } + (&Method::GET, "/lighthouse/connected_peers") => into_boxfut( + lighthouse::connected_peers::(req, network_globals), + ), _ => Box::new(futures::future::err(ApiError::NotFound( "Request path and/or method not found.".to_owned(), ))), diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs index 10e9878d8..fee0cf437 100644 --- a/beacon_node/rest_api/src/url_query.rs +++ b/beacon_node/rest_api/src/url_query.rs @@ -1,7 +1,7 @@ -use crate::helpers::{parse_committee_index, parse_epoch, parse_signature, parse_slot}; +use crate::helpers::{parse_committee_index, parse_epoch, parse_hex_ssz_bytes, parse_slot}; use crate::ApiError; use hyper::Request; -use types::{CommitteeIndex, Epoch, Signature, Slot}; +use types::{AttestationData, CommitteeIndex, Epoch, Signature, Slot}; /// Provides handy functions for parsing the query parameters of a URL. @@ -106,7 +106,13 @@ impl<'a> UrlQuery<'a> { /// Returns the value of the first occurrence of the `randao_reveal` key. pub fn randao_reveal(self) -> Result { self.first_of(&["randao_reveal"]) - .and_then(|(_key, value)| parse_signature(&value)) + .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) + } + + /// Returns the value of the first occurrence of the `attestation_data` key. + pub fn attestation_data(self) -> Result { + self.first_of(&["attestation_data"]) + .and_then(|(_key, value)| parse_hex_ssz_bytes(&value)) } } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 2ca191320..609a52e64 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,47 +1,28 @@ use crate::helpers::{ - check_content_type_for_json, publish_attestation_to_network, publish_beacon_block_to_network, + check_content_type_for_json, publish_aggregate_attestations_to_network, + publish_beacon_block_to_network, publish_raw_attestations_to_network, }; use crate::response_builder::ResponseBuilder; use crate::{ApiError, ApiResult, BoxFut, NetworkChannel, UrlQuery}; use beacon_chain::{ - AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, + AttestationProcessingOutcome, AttestationType, BeaconChain, BeaconChainTypes, BlockError, StateSkipConfig, }; use bls::PublicKeyBytes; use futures::{Future, Stream}; use hyper::{Body, Request}; -use serde::{Deserialize, Serialize}; +use network::NetworkMessage; +use rayon::prelude::*; +use rest_types::{ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorSubscription}; use slog::{error, info, warn, Logger}; -use ssz_derive::{Decode, Encode}; use std::sync::Arc; use types::beacon_state::EthSpec; use types::{ - Attestation, BeaconState, CommitteeIndex, Epoch, RelativeEpoch, SignedBeaconBlock, Slot, + Attestation, BeaconState, Epoch, RelativeEpoch, SignedAggregateAndProof, SignedBeaconBlock, + Slot, }; -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -pub struct ValidatorDuty { - /// The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ - pub validator_pubkey: PublicKeyBytes, - /// The validator's index in `state.validators` - pub validator_index: Option, - /// The slot at which the validator must attest. - pub attestation_slot: Option, - /// The index of the committee within `slot` of which the validator is a member. - pub attestation_committee_index: Option, - /// The position of the validator in the committee. - pub attestation_committee_position: Option, - /// The slots in which a validator must propose a block (can be empty). - pub block_proposal_slots: Vec, -} - -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] -pub struct ValidatorDutiesRequest { - pub epoch: Epoch, - pub pubkeys: Vec, -} - -/// HTTP Handler to retrieve a the duties for a set of validators during a particular epoch. This +/// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This /// method allows for collecting bulk sets of validator duties without risking exceeding the max /// URL length with query pairs. pub fn post_validator_duties( @@ -74,6 +55,42 @@ pub fn post_validator_duties( Box::new(future) } +/// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to +/// organise peer discovery and topic subscription for known validators. +pub fn post_validator_subscriptions( + req: Request, + mut network_chan: NetworkChannel, +) -> BoxFut { + try_future!(check_content_type_for_json(&req)); + let response_builder = ResponseBuilder::new(&req); + + let body = req.into_body(); + Box::new( + body.concat2() + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) + .and_then(|chunks| { + serde_json::from_slice(&chunks).map_err(|e| { + ApiError::BadRequest(format!( + "Unable to parse JSON into ValidatorSubscriptions: {:?}", + e + )) + }) + }) + .and_then(move |subscriptions: Vec| { + network_chan + .try_send(NetworkMessage::Subscribe { subscriptions }) + .map_err(|e| { + ApiError::ServerError(format!( + "Unable to subscriptions to the network: {:?}", + e + )) + })?; + Ok(()) + }) + .and_then(|_| response_builder?.body_no_ssz(&())), + ) +} + /// HTTP Handler to retrieve all validator duties for the given epoch. pub fn get_all_validator_duties( req: Request, @@ -154,7 +171,7 @@ fn return_validator_duties( beacon_chain: Arc>, epoch: Epoch, validator_pubkeys: Vec, -) -> Result, ApiError> { +) -> Result, ApiError> { let mut state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?; let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) @@ -209,28 +226,39 @@ fn return_validator_duties( )) })?; + // Obtain the aggregator modulo + let aggregator_modulo = duties.map(|d| { + std::cmp::max( + 1, + d.committee_len as u64 + / &beacon_chain.spec.target_aggregators_per_committee, + ) + }); + let block_proposal_slots = validator_proposers .iter() .filter(|(i, _slot)| validator_index == *i) .map(|(_i, slot)| *slot) .collect(); - Ok(ValidatorDuty { + Ok(ValidatorDutyBytes { validator_pubkey, - validator_index: Some(validator_index), + validator_index: Some(validator_index as u64), attestation_slot: duties.map(|d| d.slot), attestation_committee_index: duties.map(|d| d.index), attestation_committee_position: duties.map(|d| d.committee_position), block_proposal_slots, + aggregator_modulo, }) } else { - Ok(ValidatorDuty { + Ok(ValidatorDutyBytes { validator_pubkey, validator_index: None, attestation_slot: None, attestation_committee_index: None, attestation_committee_position: None, block_proposal_slots: vec![], + aggregator_modulo: None, }) } }) @@ -270,7 +298,7 @@ pub fn get_new_beacon_block( pub fn publish_beacon_block( req: Request, beacon_chain: Arc>, - network_chan: NetworkChannel, + network_chan: NetworkChannel, log: Logger, ) -> BoxFut { try_future!(check_content_type_for_json(&req)); @@ -288,7 +316,7 @@ pub fn publish_beacon_block( .and_then(move |block: SignedBeaconBlock| { let slot = block.slot(); match beacon_chain.process_block(block.clone()) { - Ok(BlockProcessingOutcome::Processed { block_root }) => { + Ok(block_root) => { // Block was processed, publish via gossipsub info!( log, @@ -331,19 +359,7 @@ pub fn publish_beacon_block( Ok(()) } - Ok(outcome) => { - warn!( - log, - "Invalid block from local validator"; - "outcome" => format!("{:?}", outcome) - ); - - Err(ApiError::ProcessingError(format!( - "The SignedBeaconBlock could not be processed and has not been published: {:?}", - outcome - ))) - } - Err(e) => { + Err(BlockError::BeaconChainError(e)) => { error!( log, "Error whilst processing block"; @@ -355,6 +371,18 @@ pub fn publish_beacon_block( e ))) } + Err(other) => { + warn!( + log, + "Invalid block from local validator"; + "outcome" => format!("{:?}", other) + ); + + Err(ApiError::ProcessingError(format!( + "The SignedBeaconBlock could not be processed and has not been published: {:?}", + other + ))) + } } }) .and_then(|_| response_builder?.body_no_ssz(&())) @@ -378,11 +406,32 @@ pub fn get_new_attestation( ResponseBuilder::new(&req)?.body(&attestation) } -/// HTTP Handler to publish an Attestation, which has been signed by a validator. -pub fn publish_attestation( +/// HTTP Handler to retrieve the aggregate attestation for a slot +pub fn get_aggregate_attestation( req: Request, beacon_chain: Arc>, - network_chan: NetworkChannel, +) -> ApiResult { + let query = UrlQuery::from_request(&req)?; + + let attestation_data = query.attestation_data()?; + + match beacon_chain.get_aggregated_attestation(&attestation_data) { + Ok(Some(attestation)) => ResponseBuilder::new(&req)?.body(&attestation), + Ok(None) => Err(ApiError::NotFound( + "No matching aggregate attestation is known".into(), + )), + Err(e) => Err(ApiError::ServerError(format!( + "Unable to obtain attestation: {:?}", + e + ))), + } +} + +/// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators. +pub fn publish_attestations( + req: Request, + beacon_chain: Arc>, + network_chan: NetworkChannel, log: Logger, ) -> BoxFut { try_future!(check_content_type_for_json(&req)); @@ -396,13 +445,30 @@ pub fn publish_attestation( .and_then(|chunks| { serde_json::from_slice(&chunks.as_slice()).map_err(|e| { ApiError::BadRequest(format!( - "Unable to deserialize JSON into a SignedBeaconBlock: {:?}", + "Unable to deserialize JSON into a list of attestations: {:?}", e )) }) }) - .and_then(move |attestation: Attestation| { - match beacon_chain.process_attestation(attestation.clone()) { + .and_then(move |attestations: Vec>| { + // Note: This is a new attestation from a validator. We want to process this and + // inform the validator whether the attestation was valid. In doing so, we store + // this un-aggregated raw attestation in the op_pool by default. This is + // sub-optimal as if we have no validators needing to aggregate, these don't need + // to be stored in the op-pool. This is minimal however as the op_pool gets pruned + // every slot + attestations.par_iter().try_for_each(|attestation| { + // In accordance with the naive aggregation strategy, the validator client should + // only publish attestations to this endpoint with a single signature. + if attestation.aggregation_bits.num_set_bits() != 1 { + return Err(ApiError::BadRequest(format!("Attestation should have exactly one aggregation bit set"))) + } + + // TODO: we only need to store these attestations if we're aggregating for the + // given subnet. + let attestation_type = AttestationType::Unaggregated { should_store: true }; + + match beacon_chain.process_attestation(attestation.clone(), attestation_type) { Ok(AttestationProcessingOutcome::Processed) => { // Block was processed, publish via gossipsub info!( @@ -413,7 +479,7 @@ pub fn publish_attestation( "index" => attestation.data.index, "slot" => attestation.data.slot, ); - publish_attestation_to_network::(network_chan, attestation) + Ok(()) } Ok(outcome) => { warn!( @@ -423,7 +489,7 @@ pub fn publish_attestation( ); Err(ApiError::ProcessingError(format!( - "The Attestation could not be processed and has not been published: {:?}", + "An Attestation could not be processed and has not been published: {:?}", outcome ))) } @@ -440,6 +506,123 @@ pub fn publish_attestation( ))) } } + })?; + + Ok((attestations, beacon_chain)) + }) + .and_then(|(attestations, beacon_chain)| { + publish_raw_attestations_to_network::(network_chan, attestations, &beacon_chain.spec) + }) + .and_then(|_| response_builder?.body_no_ssz(&())), + ) +} + +/// HTTP Handler to publish an Attestation, which has been signed by a validator. +pub fn publish_aggregate_and_proofs( + req: Request, + beacon_chain: Arc>, + network_chan: NetworkChannel, + log: Logger, +) -> BoxFut { + try_future!(check_content_type_for_json(&req)); + let response_builder = ResponseBuilder::new(&req); + + Box::new( + req.into_body() + .concat2() + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}", e))) + .map(|chunk| chunk.iter().cloned().collect::>()) + .and_then(|chunks| { + serde_json::from_slice(&chunks.as_slice()).map_err(|e| { + ApiError::BadRequest(format!( + "Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}", + e + )) + }) + }) + .and_then(move |signed_proofs: Vec>| { + // Verify the signatures for the aggregate and proof and if valid process the + // aggregate + // TODO: Double check speed and logic consistency of handling current fork vs + // validator fork for signatures. + // TODO: More efficient way of getting a fork? + let fork = &beacon_chain.head()?.beacon_state.fork; + + // TODO: Update to shift this task to dedicated task using await + signed_proofs.par_iter().try_for_each(|signed_proof| { + let agg_proof = &signed_proof.message; + let validator_pubkey = &beacon_chain.validator_pubkey(agg_proof.aggregator_index as usize)?.ok_or_else(|| { + warn!( + log, + "Unknown validator from local validator client"; + ); + + ApiError::ProcessingError(format!("The validator is known")) + })?; + + /* + * TODO: checking that `signed_proof.is_valid()` is not sufficient. It + * is also necessary to check that the validator is actually designated as an + * aggregator for this attestation. + * + * I (Paul H) will pick this up in a future PR. + */ + + if signed_proof.is_valid(validator_pubkey, fork, beacon_chain.genesis_validators_root, &beacon_chain.spec) { + let attestation = &agg_proof.aggregate; + + match beacon_chain.process_attestation(attestation.clone(), AttestationType::Aggregated) { + Ok(AttestationProcessingOutcome::Processed) => { + // Block was processed, publish via gossipsub + info!( + log, + "Attestation from local validator"; + "target" => attestation.data.source.epoch, + "source" => attestation.data.source.epoch, + "index" => attestation.data.index, + "slot" => attestation.data.slot, + ); + Ok(()) + } + Ok(outcome) => { + warn!( + log, + "Invalid attestation from local validator"; + "outcome" => format!("{:?}", outcome) + ); + + Err(ApiError::ProcessingError(format!( + "The Attestation could not be processed and has not been published: {:?}", + outcome + ))) + } + Err(e) => { + error!( + log, + "Error whilst processing attestation"; + "error" => format!("{:?}", e) + ); + + Err(ApiError::ServerError(format!( + "Error while processing attestation: {:?}", + e + ))) + } + } + } else { + error!( + log, + "Invalid AggregateAndProof Signature" + ); + Err(ApiError::ServerError(format!( + "Invalid AggregateAndProof Signature" + ))) + } + })?; + Ok(signed_proofs) + }) + .and_then(move |signed_proofs| { + publish_aggregate_attestations_to_network::(network_chan, signed_proofs) }) .and_then(|_| response_builder?.body_no_ssz(&())), ) diff --git a/beacon_node/rest_api/tests/test.rs b/beacon_node/rest_api/tests/test.rs index f4c47b42d..9f34dc61d 100644 --- a/beacon_node/rest_api/tests/test.rs +++ b/beacon_node/rest_api/tests/test.rs @@ -6,9 +6,9 @@ use node_test_rig::{ testing_client_config, ClientConfig, ClientGenesis, LocalBeaconNode, }; use remote_beacon_node::{ - Committee, HeadBeaconBlock, PersistedOperationPool, PublishStatus, ValidatorDuty, - ValidatorResponse, + Committee, HeadBeaconBlock, PersistedOperationPool, PublishStatus, ValidatorResponse, }; +use rest_types::ValidatorDutyBytes; use std::convert::TryInto; use std::sync::Arc; use types::{ @@ -17,7 +17,8 @@ use types::{ generate_deterministic_keypair, AttesterSlashingTestTask, ProposerSlashingTestTask, }, BeaconBlock, BeaconState, ChainSpec, Domain, Epoch, EthSpec, MinimalEthSpec, PublicKey, - RelativeEpoch, Signature, SignedBeaconBlock, SignedRoot, Slot, Validator, + RelativeEpoch, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, + Validator, }; use version; @@ -89,6 +90,7 @@ fn validator_produce_attestation() { .client .beacon_chain() .expect("client should have beacon chain"); + let genesis_validators_root = beacon_chain.genesis_validators_root; let state = beacon_chain.head().expect("should get head").beacon_state; let validator_index = 0; @@ -130,21 +132,56 @@ fn validator_produce_attestation() { .expect("should fetch duties from http api"); let duties = &duties[0]; - // Try publishing the attestation without a signature, ensure it is flagged as invalid. + // Try publishing the attestation without a signature or a committee bit set, ensure it is + // raises an error. + let publish_result = env.runtime().block_on( + remote_node + .http + .validator() + .publish_attestations(vec![attestation.clone()]), + ); + assert!( + publish_result.is_err(), + "the unsigned published attestation should return error" + ); + + // Set the aggregation bit. + attestation + .aggregation_bits + .set( + duties + .attestation_committee_position + .expect("should have committee position"), + true, + ) + .expect("should set attestation bit"); + + // Try publishing with an aggreagation bit set, but an invalid signature. let publish_status = env .runtime() .block_on( remote_node .http .validator() - .publish_attestation(attestation.clone()), + .publish_attestations(vec![attestation.clone()]), ) - .expect("should publish attestation"); + .expect("should publish attestation with invalid signature"); assert!( !publish_status.is_valid(), "the unsigned published attestation should not be valid" ); + // Un-set the aggregation bit, so signing doesn't error. + attestation + .aggregation_bits + .set( + duties + .attestation_committee_position + .expect("should have committee position"), + false, + ) + .expect("should un-set attestation bit"); + attestation .sign( &keypair.sk, @@ -164,13 +201,49 @@ fn validator_produce_attestation() { remote_node .http .validator() - .publish_attestation(attestation), + .publish_attestations(vec![attestation.clone()]), ) .expect("should publish attestation"); assert!( publish_status.is_valid(), "the signed published attestation should be valid" ); + + // Try obtaining an aggregated attestation with a matching attestation data to the previous + // one. + let aggregated_attestation = env + .runtime() + .block_on( + remote_node + .http + .validator() + .produce_aggregate_attestation(&attestation.data), + ) + .expect("should fetch aggregated attestation from http api"); + + let signed_aggregate_and_proof = SignedAggregateAndProof::from_aggregate( + validator_index as u64, + aggregated_attestation, + &keypair.sk, + &state.fork, + genesis_validators_root, + spec, + ); + + // Publish the signed aggregate. + let publish_status = env + .runtime() + .block_on( + remote_node + .http + .validator() + .publish_aggregate_and_proof(vec![signed_aggregate_and_proof]), + ) + .expect("should publish aggregate and proof"); + assert!( + publish_status.is_valid(), + "the signed aggregate and proof should be valid" + ); } #[test] @@ -226,7 +299,7 @@ fn validator_duties() { } fn check_duties( - duties: Vec, + duties: Vec, epoch: Epoch, validators: Vec, beacon_chain: Arc>, diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 8cfbca043..9a2dfc1d4 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -53,11 +53,18 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("9000") .takes_value(true), ) + .arg( + Arg::with_name("discovery-port") + .long("discovery-port") + .value_name("PORT") + .help("The UDP port that discovery will listen on. Defaults to `port`") + .takes_value(true), + ) .arg( Arg::with_name("maxpeers") .long("maxpeers") .help("The maximum number of peers.") - .default_value("10") + .default_value("50") .takes_value(true), ) .arg( @@ -69,28 +76,43 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true), ) .arg( - Arg::with_name("discovery-port") - .long("disc-port") + Arg::with_name("enr-udp-port") + .long("enr-udp-port") .value_name("PORT") - .help("The discovery UDP port.") - .default_value("9000") + .help("The UDP port of the local ENR. Set this only if you are sure other nodes can connect to your local node on this port.") .takes_value(true), ) .arg( - Arg::with_name("discovery-address") - .long("discovery-address") + Arg::with_name("enr-tcp-port") + .long("enr-tcp-port") + .value_name("PORT") + .help("The TCP port of the local ENR. Set this only if you are sure other nodes can connect to your local node on this port.\ + The --port flag is used if this is not set.") + .takes_value(true), + ) + .arg( + Arg::with_name("enr-address") + .long("enr-address") .value_name("ADDRESS") .help("The IP address to broadcast to other peers on how to reach this node. \ - Default will load previous values from disk failing this it is set to 127.0.0.1 \ - and will be updated when connecting to other nodes on the network.") + Set this only if you are sure other nodes can connect to your local node on this address. \ + Discovery will automatically find your external address,if possible. + ") .takes_value(true), ) .arg( - Arg::with_name("topics") - .long("topics") - .value_name("STRING") - .help("One or more comma-delimited gossipsub topic strings to subscribe to. Default \ - is determined automatically.") + Arg::with_name("enr-match") + .short("e") + .long("enr-match") + .help("Sets the local ENR IP address and port to match those set for lighthouse. \ + Specifically, the IP address will be the value of --listen-address and the UDP port will be --discovery-port.") + ) + .arg( + Arg::with_name("disable-enr-auto-update") + .short("x") + .long("disable-enr-auto-update") + .help("Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. \ + This disables this feature, fixing the ENR's IP/PORT to those specified on boot.") .takes_value(true), ) .arg( diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 86f34fe70..59b1457de 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -3,7 +3,7 @@ use clap::ArgMatches; use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis}; use eth2_libp2p::{Enr, Multiaddr}; use eth2_testnet_config::Eth2TestnetConfig; -use slog::{crit, warn, Logger}; +use slog::{crit, info, Logger}; use ssz::Encode; use std::fs; use std::fs::File; @@ -64,6 +64,12 @@ pub fn get_config( fs::create_dir_all(&client_config.data_dir) .map_err(|e| format!("Failed to create data dir: {}", e))?; + // logs the chosen data directory + let mut log_dir = client_config.data_dir.clone(); + // remove /beacon from the end + log_dir.pop(); + info!(log, "Data directory initialised"; "datadir" => format!("{}",log_dir.into_os_string().into_string().expect("Datadir should be a valid os string"))); + // Load the client config, if it exists . let config_file_path = client_config.data_dir.join(CLIENT_CONFIG_FILENAME); let config_file_existed = config_file_path.exists(); @@ -108,6 +114,13 @@ pub fn get_config( client_config.network.discovery_port = port; } + if let Some(port_str) = cli_args.value_of("discovery-port") { + let port = port_str + .parse::() + .map_err(|_| format!("Invalid port: {}", port_str))?; + client_config.network.discovery_port = port; + } + if let Some(boot_enr_str) = cli_args.value_of("boot-nodes") { client_config.network.boot_nodes = boot_enr_str .split(',') @@ -126,22 +139,45 @@ pub fn get_config( .collect::, _>>()?; } - if let Some(topics_str) = cli_args.value_of("topics") { - client_config.network.topics = topics_str.split(',').map(|s| s.into()).collect(); - } - - if let Some(discovery_address_str) = cli_args.value_of("discovery-address") { - client_config.network.discovery_address = Some( - discovery_address_str + if let Some(enr_address_str) = cli_args.value_of("enr-address") { + client_config.network.enr_address = Some( + enr_address_str .parse() - .map_err(|_| format!("Invalid discovery address: {:?}", discovery_address_str))?, + .map_err(|_| format!("Invalid discovery address: {:?}", enr_address_str))?, ) } - if let Some(disc_port_str) = cli_args.value_of("disc-port") { - client_config.network.discovery_port = disc_port_str - .parse::() - .map_err(|_| format!("Invalid discovery port: {}", disc_port_str))?; + if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { + client_config.network.enr_udp_port = Some( + enr_udp_port_str + .parse::() + .map_err(|_| format!("Invalid discovery port: {}", enr_udp_port_str))?, + ); + } + + if let Some(enr_tcp_port_str) = cli_args.value_of("enr-tcp-port") { + client_config.network.enr_tcp_port = Some( + enr_tcp_port_str + .parse::() + .map_err(|_| format!("Invalid ENR TCP port: {}", enr_tcp_port_str))?, + ); + } + + if cli_args.is_present("enr-match") { + // set the enr address to localhost if the address is 0.0.0.0 + if client_config.network.listen_address + == "0.0.0.0".parse::().expect("valid ip addr") + { + client_config.network.enr_address = + Some("127.0.0.1".parse::().expect("valid ip addr")); + } else { + client_config.network.enr_address = Some(client_config.network.listen_address); + } + client_config.network.enr_udp_port = Some(client_config.network.discovery_port); + } + + if cli_args.is_present("disable_enr_auto_update") { + client_config.network.discv5_config.enr_update = false; } if let Some(p2p_priv_key) = cli_args.value_of("p2p-priv-key") { @@ -271,8 +307,8 @@ pub fn get_config( * Discovery address is set to localhost by default. */ if cli_args.is_present("zero-ports") { - if client_config.network.discovery_address == Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) { - client_config.network.discovery_address = None + if client_config.network.enr_address == Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) { + client_config.network.enr_address = None } client_config.network.libp2p_port = unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?; @@ -282,16 +318,6 @@ pub fn get_config( client_config.websocket_server.port = 0; } - // ENR IP needs to be explicit for node to be discoverable - if client_config.network.discovery_address == Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) { - warn!( - log, - "Discovery address cannot be 0.0.0.0, Setting to to 127.0.0.1" - ); - client_config.network.discovery_address = - Some("127.0.0.1".parse().expect("Valid IP address")) - } - /* * Load the eth2 testnet dir to obtain some additional config values. */ diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 2b4200142..6c37b6794 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -69,7 +69,7 @@ impl ProductionBeaconNode { /// Client behaviour is defined by the given `client_config`. pub fn new( context: RuntimeContext, - client_config: ClientConfig, + mut client_config: ClientConfig, ) -> impl Future { let http_eth2_config = context.eth2_config().clone(); let spec = context.eth2_config().spec.clone(); @@ -121,7 +121,7 @@ impl ProductionBeaconNode { .system_time_slot_clock()? .websocket_event_handler(client_config.websocket_server.clone())? .build_beacon_chain()? - .libp2p_network(&client_config.network)? + .network(&mut client_config.network)? .notifier()?; let builder = if client_config.rest_api.enabled { diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index ebb5cf499..36a34738a 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "store" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/beacon_node/timer/Cargo.toml b/beacon_node/timer/Cargo.toml new file mode 100644 index 000000000..a0c132cbe --- /dev/null +++ b/beacon_node/timer/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "timer" +version = "0.2.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +beacon_chain = { path = "../beacon_chain" } +types = { path = "../../eth2/types" } +slot_clock = { path = "../../eth2/utils/slot_clock" } +tokio = "0.1.22" +slog = "2.5.2" +parking_lot = "0.10.0" +futures = "0.1.29" diff --git a/beacon_node/timer/src/lib.rs b/beacon_node/timer/src/lib.rs new file mode 100644 index 000000000..20054d854 --- /dev/null +++ b/beacon_node/timer/src/lib.rs @@ -0,0 +1,50 @@ +//! A timer service for the beacon node. +//! +//! This service allows task execution on the beacon node for various functionality. + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use futures::{future, prelude::*}; +use slog::error; +use slot_clock::SlotClock; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::runtime::TaskExecutor; +use tokio::timer::Interval; + +/// Spawns a timer service which periodically executes tasks for the beacon chain +pub fn spawn( + executor: &TaskExecutor, + beacon_chain: Arc>, + milliseconds_per_slot: u64, + log: slog::Logger, +) -> Result, &'static str> { + let (exit_signal, exit) = tokio::sync::oneshot::channel(); + + let start_instant = Instant::now() + + beacon_chain + .slot_clock + .duration_to_next_slot() + .ok_or_else(|| "slot_notifier unable to determine time to next slot")?; + + let timer_future = Interval::new(start_instant, Duration::from_millis(milliseconds_per_slot)) + .map_err(move |e| { + error!( + log, + "Beacon chain timer failed"; + "error" => format!("{:?}", e) + ) + }) + .for_each(move |_| { + beacon_chain.per_slot_task(); + future::ok(()) + }); + + executor.spawn( + exit.map_err(|_| ()) + .select(timer_future) + .map(|_| ()) + .map_err(|_| ()), + ); + + Ok(exit_signal) +} diff --git a/beacon_node/version/Cargo.toml b/beacon_node/version/Cargo.toml index 0497408f1..030aebb49 100644 --- a/beacon_node/version/Cargo.toml +++ b/beacon_node/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "version" -version = "0.1.0" +version = "0.2.0" authors = ["Age Manning "] edition = "2018" diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index f45c718e1..0ed8d628c 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -1,13 +1,12 @@ [package] name = "websocket_server" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -exit-future = "0.1.4" futures = "0.1.29" serde = "1.0.102" serde_derive = "1.0.102" diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index 26736c573..01b48ab18 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -40,7 +40,14 @@ pub fn start_server( config: &Config, executor: &TaskExecutor, log: &Logger, -) -> Result<(WebSocketSender, exit_future::Signal, SocketAddr), String> { +) -> Result< + ( + WebSocketSender, + tokio::sync::oneshot::Sender<()>, + SocketAddr, + ), + String, +> { let server_string = format!("{}:{}", config.listen_address, config.port); // Create a server that simply ignores any incoming messages. @@ -64,29 +71,31 @@ pub fn start_server( let broadcaster = server.broadcaster(); // Produce a signal/channel that can gracefully shutdown the websocket server. - let exit_signal = { - let (exit_signal, exit) = exit_future::signal(); + let exit_channel = { + let (exit_channel, exit) = tokio::sync::oneshot::channel(); let log_inner = log.clone(); let broadcaster_inner = server.broadcaster(); - let exit_future = exit.and_then(move |_| { - if let Err(e) = broadcaster_inner.shutdown() { - warn!( - log_inner, - "Websocket server errored on shutdown"; - "error" => format!("{:?}", e) - ); - } else { - info!(log_inner, "Websocket server shutdown"); - } - Ok(()) - }); + let exit_future = exit + .and_then(move |_| { + if let Err(e) = broadcaster_inner.shutdown() { + warn!( + log_inner, + "Websocket server errored on shutdown"; + "error" => format!("{:?}", e) + ); + } else { + info!(log_inner, "Websocket server shutdown"); + } + Ok(()) + }) + .map_err(|_| ()); // Place a future on the executor that will shutdown the websocket server when the // application exits. executor.spawn(exit_future); - exit_signal + exit_channel }; let log_inner = log.clone(); @@ -118,7 +127,7 @@ pub fn start_server( sender: Some(broadcaster), _phantom: PhantomData, }, - exit_signal, + exit_channel, actual_listen_addr, )) } diff --git a/book/README.md b/book/README.md index 45448c4ef..973e4f838 100644 --- a/book/README.md +++ b/book/README.md @@ -3,7 +3,7 @@ Contains an [mdBook](https://github.com/rust-lang-nursery/mdBook) that serves as the primary source of Lighthouse user documentation. -The book is hosted at [lighthouse-book.sigmaprime.io](http://lighthouse-book.sigmaprime.io).i +The book is hosted at [lighthouse-book.sigmaprime.io](http://lighthouse-book.sigmaprime.io) ## Usage diff --git a/book/book.toml b/book/book.toml index df47f4d7b..7b143710a 100644 --- a/book/book.toml +++ b/book/book.toml @@ -1,5 +1,5 @@ [book] -authors = ["Paul Hauner"] +authors = ["Paul Hauner", "Age Manning"] language = "en" multilingual = false src = "src" @@ -7,3 +7,5 @@ title = "Lighthouse Book" [output.html] additional-css =["src/css/custom.css"] +default-theme = "coal" + diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 6b072e6be..ee589d9ed 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -9,12 +9,13 @@ * [Local Testnets](./local-testnets.md) * [API](./api.md) * [HTTP (RESTful JSON)](./http.md) - * [/beacon](./http_beacon.md) - * [/validator](./http_validator.md) - * [/consensus](./http_consensus.md) - * [/network](./http_network.md) - * [/spec](./http_spec.md) - * [/advanced](./http_advanced.md) + * [/beacon](./http/beacon.md) + * [/validator](./http/validator.md) + * [/consensus](./http/consensus.md) + * [/network](./http/network.md) + * [/spec](./http/spec.md) + * [/advanced](./http/advanced.md) + * [/lighthouse](./http/lighthouse.md) * [WebSocket](./websockets.md) * [Advanced Usage](./advanced.md) * [Database Configuration](./advanced_database.md) diff --git a/book/src/cli.md b/book/src/cli.md index 0f82335c3..60c87d5dc 100644 --- a/book/src/cli.md +++ b/book/src/cli.md @@ -23,7 +23,7 @@ Typical users may install `lighthouse` to `CARGO_HOME` with `cargo install `PATH` environment variable"](https://www.rust-lang.org/tools/install) for more information. -For develeopers, we recommend building Lighthouse using the `$ cargo build --release +For developers, we recommend building Lighthouse using the `$ cargo build --release --bin lighthouse` command and executing binaries from the `/target/release` directory. This is more ergonomic when modifying and rebuilding regularly. @@ -33,7 +33,6 @@ modifying and rebuilding regularly. Each binary supports the `--help` flag, this is the best source of documentation. - ```bash $ lighthouse beacon_node --help ``` @@ -42,22 +41,15 @@ $ lighthouse beacon_node --help $ lighthouse validator_client --help ``` -## Beacon Node +## Creating a new database/testnet -The `$ lighthouse beacon_node` (or `$ lighthouse bn`) command has two primary -tasks: +Lighthouse should run out-of-the box and connect to the current testnet +maintained by Sigma Prime. -- **Resuming** an existing database with `$ lighthouse bn`. -- **Creating** a new testnet database using `$ lighthouse bn testnet`. - -## Creating a new database - -Use the `$ lighthouse bn testnet` command (see [testnets](./testnets.md) for -more information). +However, for developers, testnets can be created by following the instructions +outlined in [testnets](./testnets.md). The steps listed here will create a +local database specified to a new testnet. ## Resuming from an existing database -Once a database has been created, it can be resumed by running `$ lighthouse bn`. - -Presently, you are not allowed to call `$ lighthouse bn` unless you have first -created a database using `$ lighthouse bn testnet`. +Once a database/testnet has been created, it can be resumed by running `$ lighthouse bn`. diff --git a/book/src/http.md b/book/src/http.md index 042881af0..e07440e8d 100644 --- a/book/src/http.md +++ b/book/src/http.md @@ -14,12 +14,14 @@ detail: Endpoint | Description | | --- | -- | -[`/beacon`](./http_beacon.md) | General information about the beacon chain. -[`/validator`](./http_validator.md) | Provides functionality to validator clients. -[`/consensus`](./http_consensus.md) | Proof-of-stake voting statistics. -[`/network`](./http_network.md) | Information about the p2p network. -[`/spec`](./http_spec.md) | Information about the specs that the client is running. -[`/advanced`](./http_advanced.md) | Provides endpoints for advanced inspection of Lighthouse specific objects. +[`/node`](./http/node.md) | General information about the beacon node. +[`/beacon`](./http/beacon.md) | General information about the beacon chain. +[`/validator`](./http/validator.md) | Provides functionality to validator clients. +[`/consensus`](./http/consensus.md) | Proof-of-stake voting statistics. +[`/network`](./http/network.md) | Information about the p2p network. +[`/spec`](./http/spec.md) | Information about the specs that the client is running. +[`/advanced`](./http/advanced.md) | Provides endpoints for advanced inspection of Lighthouse specific objects. +[`/lighthouse`](./http/lighthouse.md) | Provides lighthouse specific endpoints. _Please note: The OpenAPI format at [SwaggerHub: Lighthouse REST diff --git a/book/src/http/advanced.md b/book/src/http/advanced.md new file mode 100644 index 000000000..822b6ffff --- /dev/null +++ b/book/src/http/advanced.md @@ -0,0 +1,115 @@ +# Lighthouse REST API: `/advanced` + +The `/advanced` endpoints provide information Lighthouse specific data structures for advanced debugging. + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`/advanced/fork_choice`](#advancedfork_choice) | Get the `proto_array` fork choice object. +[`/advanced/operation_pool`](#advancedoperation_pool) | Get the Lighthouse `PersistedOperationPool` object. + + +## `/advanced/fork_choice` + +Requests the `proto_array` fork choice object as represented in Lighthouse. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/advanced/fork_choice` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +{ + "prune_threshold": 256, + "justified_epoch": 25, + "finalized_epoch": 24, + "nodes": [ + { + "slot": 544, + "root": "0x27103c56d4427cb4309dd202920ead6381d54d43277c29cf0572ddf0d528e6ea", + "parent": null, + "justified_epoch": 16, + "finalized_epoch": 15, + "weight": 256000000000, + "best_child": 1, + "best_descendant": 296 + }, + { + "slot": 545, + "root": "0x09af0e8d4e781ea4280c9c969d168839c564fab3a03942e7db0bfbede7d4c745", + "parent": 0, + "justified_epoch": 16, + "finalized_epoch": 15, + "weight": 256000000000, + "best_child": 2, + "best_descendant": 296 + }, + ], + "indices": { + "0xb935bb3651eeddcb2d2961bf307156850de982021087062033f02576d5df00a3": 59, + "0x8f4ec47a34c6c1d69ede64d27165d195f7e2a97c711808ce51f1071a6e12d5b9": 189, + "0xf675eba701ef77ee2803a130dda89c3c5673a604d2782c9e25ea2be300d7d2da": 173, + "0x488a483c8d5083faaf5f9535c051b9f373ba60d5a16e77ddb1775f248245b281": 37 + } +} +``` +_Truncated for brevity._ + +## `/advanced/operation_pool` + +Requests the `PersistedOperationPool` object as represented in Lighthouse. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/advanced/operation_pool` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +{ + "attestations": [ + [ + { + "v": [39, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 118, 215, 252, 51, 186, 76, 156, 157, 99, 91, 4, 137, 195, 209, 224, 26, 233, 233, 184, 38, 89, 215, 177, 247, 97, 243, 119, 229, 69, 50, 90, 24, 0, 0, 0, 0, 0, 0, 0, 79, 37, 38, 210, 96, 235, 121, 142, 129, 136, 206, 214, 179, 132, 22, 19, 222, 213, 203, 46, 112, 192, 26, 5, 254, 26, 103, 170, 158, 205, 72, 3, 25, 0, 0, 0, 0, 0, 0, 0, 164, 50, 214, 67, 98, 13, 50, 180, 108, 232, 248, 109, 128, 45, 177, 23, 221, 24, 218, 211, 8, 152, 172, 120, 24, 86, 198, 103, 68, 164, 67, 202, 1, 0, 0, 0, 0, 0, 0, 0] + }, + [ + { + "aggregation_bits": "0x03", + "data": { + "slot": 807, + "index": 0, + "beacon_block_root": "0x7076d7fc33ba4c9c9d635b0489c3d1e01ae9e9b82659d7b1f761f377e545325a", + "source": { + "epoch": 24, + "root": "0x4f2526d260eb798e8188ced6b3841613ded5cb2e70c01a05fe1a67aa9ecd4803" + }, + "target": { + "epoch": 25, + "root": "0xa432d643620d32b46ce8f86d802db117dd18dad30898ac781856c66744a443ca" + } + }, + "signature": "0x8b1d624b0cd5a7a0e13944e90826878a230e3901db34ea87dbef5b145ade2fedbc830b6752a38a0937a1594211ab85b615d65f9eef0baccd270acca945786036695f4db969d9ff1693c505c0fe568b2fe9831ea78a74cbf7c945122231f04026" + } + ] + ] + ], + "attester_slashings": [], + "proposer_slashings": [], + "voluntary_exits": [] +} +``` +_Truncated for brevity._ diff --git a/book/src/http/beacon.md b/book/src/http/beacon.md new file mode 100644 index 000000000..eba59e3e8 --- /dev/null +++ b/book/src/http/beacon.md @@ -0,0 +1,784 @@ +# Lighthouse REST API: `/beacon` + +The `/beacon` endpoints provide information about the canonical head of the +beacon chain and also historical information about beacon blocks and states. + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`/beacon/head`](#beaconhead) | Info about the block at the head of the chain. +[`/beacon/heads`](#beaconheads) | Returns a list of all known chain heads. +[`/beacon/block`](#beaconblock) | Get a `BeaconBlock` by slot or root. +[`/beacon/block_root`](#beaconblock_root) | Resolve a slot to a block root. +[`/beacon/fork`](#beaconfork) | Get the fork of the head of the chain. +[`/beacon/genesis_time`](#beacongenesis_time) | Get the genesis time from the beacon state. +[`/beacon/genesis_validators_root`](#beacongenesis_validators_root) | Get the genesis validators root. +[`/beacon/validators`](#beaconvalidators) | Query for one or more validators. +[`/beacon/validators/all`](#beaconvalidatorsall) | Get all validators. +[`/beacon/validators/active`](#beaconvalidatorsactive) | Get all active validators. +[`/beacon/state`](#beaconstate) | Get a `BeaconState` by slot or root. +[`/beacon/state_root`](#beaconstate_root) | Resolve a slot to a state root. +[`/beacon/state/genesis`](#beaconstategenesis) | Get a `BeaconState` at genesis. +[`/beacon/committees`](#beaconcommittees) | Get the shuffling for an epoch. +[`/beacon/proposer_slashing`](#beaconproposer_slashing) | Insert a proposer slashing +[`/beacon/attester_slashing`](#beaconattester_slashing) | Insert an attester slashing + +## `/beacon/head` + +Requests information about the head of the beacon chain, from the node's +perspective. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/head` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +{ + "slot": 37923, + "block_root": "0xe865d4805395a0776b8abe46d714a9e64914ab8dc5ff66624e5a1776bcc1684b", + "state_root": "0xe500e3567ab273c9a6f8a057440deff476ab236f0983da27f201ee9494a879f0", + "finalized_slot": 37856, + "finalized_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86", + "justified_slot": 37888, + "justified_block_root": "0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4", + "previous_justified_slot": 37856, + "previous_justified_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86" +} +``` + +## `/beacon/heads` + +Returns the roots of all known head blocks. Only one of these roots is the +canonical head and that is decided by the fork choice algorithm. See [`/beacon/head`](#beaconhead) for the canonical head. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/heads` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +[ + { + "beacon_block_root": "0x226b2fd7c5f3d31dbb21444b96dfafe715f0017cd16545ecc4ffa87229496a69", + "beacon_block_slot": 38373 + }, + { + "beacon_block_root": "0x41ed5b253c4fc841cba8a6d44acbe101866bc674c3cfa3c4e9f7388f465aa15b", + "beacon_block_slot": 38375 + } +] +``` + +## `/beacon/block` + +Request that the node return a beacon chain block that matches the provided +criteria (a block `root` or beacon chain `slot`). Only one of the parameters +should be provided as a criteria. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/block` +Method | GET +JSON Encoding | Object +Query Parameters | `slot`, `root` +Typical Responses | 200, 404 + +### Parameters + +Accepts **only one** of the following parameters: + +- `slot` (`Slot`): Query by slot number. Any block returned must be in the canonical chain (i.e., +either the head or an ancestor of the head). +- `root` (`Bytes32`): Query by tree hash root. A returned block is not required to be in the +canonical chain. + +### Returns + +Returns an object containing a single [`SignedBeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblock) and the block root of the inner [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblock). + +### Example Response + +```json +{ + "root": "0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196", + "beacon_block": { + "message": { + "slot": 0, + "proposer_index": 14, + "parent_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "state_root": "0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f", + "body": { + "randao_reveal": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "eth1_data": { + "deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "deposit_count": 0, + "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000", + "proposer_slashings": [], + "attester_slashings": [], + "attestations": [], + "deposits": [], + "voluntary_exits": [] + } + }, + "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } +} +``` + +## `/beacon/block_root` + +Returns the block root for the given slot in the canonical chain. If there +is a re-org, the same slot may return a different root. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/block_root` +Method | GET +JSON Encoding | Object +Query Parameters | `slot` +Typical Responses | 200, 404 + +## Parameters + +- `slot` (`Slot`): the slot to be resolved to a root. + +### Example Response + +```json +"0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196" +``` + +## `/beacon/committees` + +Request the committees (a.k.a. "shuffling") for all slots and committee indices +in a given `epoch`. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/committees` +Method | GET +JSON Encoding | Object +Query Parameters | `epoch` +Typical Responses | 200/500 + +### Parameters + +The `epoch` (`Epoch`) query parameter is required and defines the epoch for +which the committees will be returned. All slots contained within the response will +be inside this epoch. + +### Returns + +A list of beacon committees. + +### Example Response + +```json +[ + { + "slot": 4768, + "index": 0, + "committee": [ + 1154, + 492, + 9667, + 3089, + 8987, + 1421, + 224, + 11243, + 2127, + 2329, + 188, + 482, + 486 + ] + }, + { + "slot": 4768, + "index": 1, + "committee": [ + 5929, + 8482, + 5528, + 6130, + 14343, + 9777, + 10808, + 12739, + 15234, + 12819, + 5423, + 6320, + 9991 + ] + } +] +``` + +_Truncated for brevity._ + +## `/beacon/fork` + +Request that the node return the `fork` of the current head. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/fork` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + + +### Returns + +Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#fork) of the current head. + +### Example Response + +```json +{ + "previous_version": "0x00000000", + "current_version": "0x00000000", + "epoch": 0 +} +``` + +## `/beacon/genesis_time` + +Request that the node return the genesis time from the beacon state. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/genesis_time` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + + +### Returns + +Returns an object containing the genesis time. + +### Example Response + +```json +1581576353 +``` + +## `/beacon/genesis_validators_root` + +Request that the node return the genesis validators root from the beacon state. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/genesis_validators_root` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + + +### Returns + +Returns an object containing the genesis validators root. + +### Example Response + +```json +0x4fbf23439a7a9b9dd91650e64e8124012dde5e2ea2940c552b86f04eb47f95de +``` + +## `/beacon/validators` + +Request that the node returns information about one or more validator public +keys. This request takes the form of a `POST` request to allow sending a large +number of pubkeys in the request. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/validators` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Request Body + +Expects the following object in the POST request body: + +``` +{ + state_root: Bytes32, + pubkeys: [PublicKey] +} +``` + +The `state_root` field indicates which `BeaconState` should be used to collect +the information. The `state_root` is optional and omitting it will result in +the canonical head state being used. + + +### Returns + +Returns an object describing several aspects of the given validator. + +### Example + +### Request Body + +```json +{ + "pubkeys": [ + "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", + "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" + ] +} +``` + +_Note: for demonstration purposes the second pubkey is some unknown pubkey._ + +### Response Body + +```json +[ + { + "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", + "validator_index": 14935, + "balance": 3228885987, + "validator": { + "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", + "withdrawal_credentials": "0x00b7bec22d5bda6b2cca1343d4f640d0e9ccc204a06a73703605c590d4c0d28e", + "effective_balance": 3200000000, + "slashed": false, + "activation_eligibility_epoch": 0, + "activation_epoch": 0, + "exit_epoch": 18446744073709551615, + "withdrawable_epoch": 18446744073709551615 + } + }, + { + "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", + "validator_index": null, + "balance": null, + "validator": null + } +] +``` + +## `/beacon/validators/all` + +Returns all validators. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/validators/all` +Method | GET +JSON Encoding | Object +Query Parameters | `state_root` (optional) +Typical Responses | 200 + +### Parameters + +The optional `state_root` (`Bytes32`) query parameter indicates which +`BeaconState` should be used to collect the information. When omitted, the +canonical head state will be used. + +### Returns + +The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. + + +## `/beacon/validators/active` + +Returns all validators that are active in the state defined by `state_root`. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/validators/active` +Method | GET +JSON Encoding | Object +Query Parameters | `state_root` (optional) +Typical Responses | 200 + +### Parameters + +The optional `state_root` (`Bytes32`) query parameter indicates which +`BeaconState` should be used to collect the information. When omitted, the +canonical head state will be used. + +### Returns + +The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. + + +## `/beacon/state` + +Request that the node return a beacon chain state that matches the provided +criteria (a state `root` or beacon chain `slot`). Only one of the parameters +should be provided as a criteria. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/state` +Method | GET +JSON Encoding | Object +Query Parameters | `slot`, `root` +Typical Responses | 200, 404 + +### Parameters + +Accepts **only one** of the following parameters: + +- `slot` (`Slot`): Query by slot number. Any state returned must be in the canonical chain (i.e., +either the head or an ancestor of the head). +- `root` (`Bytes32`): Query by tree hash root. A returned state is not required to be in the +canonical chain. + +### Returns + +Returns an object containing a single +[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate) +and its tree hash root. + +### Example Response + +```json +{ + "root": "0x528e54ca5d4c957729a73f40fc513ae312e054c7295775c4a2b21f423416a72b", + "beacon_state": { + "genesis_time": 1575652800, + "genesis_validators_root": "0xa8a9226edee1b2627fb4117d7dea4996e64dec2998f37f6e824f74f2ce39a538", + "slot": 18478 + } +} +``` + +_Truncated for brevity._ + +## `/beacon/state_root` + +Returns the state root for the given slot in the canonical chain. If there +is a re-org, the same slot may return a different root. + + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/state_root` +Method | GET +JSON Encoding | Object +Query Parameters | `slot` +Typical Responses | 200, 404 + +## Parameters + +- `slot` (`Slot`): the slot to be resolved to a root. + +### Example Response + +```json +"0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f" +``` + +## `/beacon/state/genesis` + +Request that the node return a beacon chain state at genesis (slot 0). + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/state/genesis` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + + +### Returns + +Returns an object containing the genesis +[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate). + +### Example Response + +```json +{ + "genesis_time": 1581576353, + "slot": 0, + "fork": { + "previous_version": "0x00000000", + "current_version": "0x00000000", + "epoch": 0 + }, +} +``` + +_Truncated for brevity._ + + +## `/beacon/state/committees` + +Request that the node return a beacon chain state at genesis (slot 0). + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/state/genesis` +Method | GET +JSON Encoding | Object +Query Parameters | `epoch` +Typical Responses | 200 + + +### Returns + +Returns an object containing the committees for a given epoch. + +### Example Response + +```json +[ + {"slot":64,"index":0,"committee":[]}, + {"slot":65,"index":0,"committee":[3]}, + {"slot":66,"index":0,"committee":[]}, + {"slot":67,"index":0,"committee":[14]}, + {"slot":68,"index":0,"committee":[]}, + {"slot":69,"index":0,"committee":[9]}, + {"slot":70,"index":0,"committee":[]}, + {"slot":71,"index":0,"committee":[11]}, + {"slot":72,"index":0,"committee":[]}, + {"slot":73,"index":0,"committee":[5]}, + {"slot":74,"index":0,"committee":[]}, + {"slot":75,"index":0,"committee":[15]}, + {"slot":76,"index":0,"committee":[]}, + {"slot":77,"index":0,"committee":[0]} +] +``` + +_Truncated for brevity._ + + +## `/beacon/attester_slashing` + +Accepts an `attester_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns a 400 error if the `attester_slashing` is invalid. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/attester_slashing` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200/400 + +### Parameters + +Expects the following object in the POST request body: + +``` +{ + attestation_1: { + attesting_indices: [u64], + data: { + slot: Slot, + index: u64, + beacon_block_root: Bytes32, + source: { + epoch: Epoch, + root: Bytes32 + }, + target: { + epoch: Epoch, + root: Bytes32 + } + } + signature: Bytes32 + }, + attestation_2: { + attesting_indices: [u64], + data: { + slot: Slot, + index: u64, + beacon_block_root: Bytes32, + source: { + epoch: Epoch, + root: Bytes32 + }, + target: { + epoch: Epoch, + root: Bytes32 + } + } + signature: Bytes32 + } +} +``` + +### Returns + +Returns `true` if the attester slashing was inserted successfully, or the corresponding error if it failed. + +### Example + +### Request Body + +```json +{ + "attestation_1": { + "attesting_indices": [0], + "data": { + "slot": 1, + "index": 0, + "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", + "source": { + "epoch": 1, + "root": "0x0000000000000000000000000000000000000000000000000100000000000000" + }, + "target": { + "epoch": 1, + "root": "0x0000000000000000000000000000000000000000000000000100000000000000" + } + }, + "signature": "0xb47f7397cd944b8d5856a13352166bbe74c85625a45b14b7347fc2c9f6f6f82acee674c65bc9ceb576fcf78387a6731c0b0eb3f8371c70db2da4e7f5dfbc451730c159d67263d3db56b6d0e009e4287a8ba3efcacac30b3ae3447e89dc71b5b9" + }, + "attestation_2": { + "attesting_indices": [0], + "data": { + "slot": 1, + "index": 0, + "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", + "source": { + "epoch": 1, + "root": "0x0000000000000000000000000000000000000000000000000100000000000000" + }, + "target": { + "epoch": 1, + "root": "0x0000000000000000000000000000000000000000000000000200000000000000" + } + }, + "signature": "0x93fef587a63acf72aaf8df627718fd43cb268035764071f802ffb4370a2969d226595cc650f4c0bf2291ae0c0a41fcac1700f318603d75d34bcb4b9f4a8368f61eeea0e1f5d969d92d5073ba5fbadec102b45ec87d418d25168d2e3c74b9fcbb" + } +} +``` + +_Note: data sent here is for demonstration purposes only_ + + + +## `/beacon/proposer_slashing` + +Accepts a `proposer_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns an 400 error if the `proposer_slashing` is invalid. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/beacon/proposer_slashing` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200/400 + +### Request Body + +Expects the following object in the POST request body: + +``` +{ + proposer_index: u64, + header_1: { + slot: Slot, + parent_root: Bytes32, + state_root: Bytes32, + body_root: Bytes32, + signature: Bytes32 + }, + header_2: { + slot: Slot, + parent_root: Bytes32, + state_root: Bytes32, + body_root: Bytes32, + signature: Bytes32 + } +} +``` + +### Returns + +Returns `true` if the proposer slashing was inserted successfully, or the corresponding error if it failed. + +### Example + +### Request Body + +```json +{ + "proposer_index": 0, + "header_1": { + "slot": 0, + "parent_root": "0x0101010101010101010101010101010101010101010101010101010101010101", + "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", + "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", + "signature": "0xb8970d1342c6d5779c700ec366efd0ca819937ca330960db3ca5a55eb370a3edd83f4cbb2f74d06e82f934fcbd4bb80609a19c2254cc8b3532a4efff9e80edf312ac735757c059d77126851e377f875593e64ba50d1dffe69a809a409202dd12" + }, + "header_2": { + "slot": 0, + "parent_root": "0x0202020202020202020202020202020202020202020202020202020202020202", + "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", + "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", + "signature": "0xb60e6b348698a34e59b22e0af96f8809f977f00f95d52375383ade8d22e9102270a66c6d52b0434214897e11ca4896871510c01b3fd74d62108a855658d5705fcfc4ced5136264a1c6496f05918576926aa191b1ad311b7e27f5aa2167aba294" + } +} +``` + +_Note: data sent here is for demonstration purposes only_ + + + + + diff --git a/book/src/http/consensus.md b/book/src/http/consensus.md new file mode 100644 index 000000000..c71b78ce3 --- /dev/null +++ b/book/src/http/consensus.md @@ -0,0 +1,189 @@ +# Lighthouse REST API: `/consensus` + +The `/consensus` endpoints provide information on results of the proof-of-stake +voting process used for finality/justification under Casper FFG. + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`/consensus/global_votes`](#consensusglobal_votes) | A global vote count for a given epoch. +[`/consensus/individual_votes`](#consensusindividual_votes) | A per-validator breakdown of votes in a given epoch. + +## `/consensus/global_votes` + +Returns a global count of votes for some given `epoch`. The results are included +both for the current and previous (`epoch - 1`) epochs since both are required +by the beacon node whilst performing per-epoch-processing. + +Generally, you should consider the "current" values to be incomplete and the +"previous" values to be final. This is because validators can continue to +include attestations from the _current_ epoch in the _next_ epoch, however this +is not the case for attestations from the _previous_ epoch. + +``` + `epoch` query parameter + | + | --------- values are calcuated here + | | + v v +Epoch: |---previous---|---current---|---next---| + + |-------------| + ^ + | + window for including "current" attestations + in a block +``` + +The votes are expressed in terms of staked _effective_ `Gwei` (i.e., not the number of +individual validators). For example, if a validator has 32 ETH staked they will +increase the `current_epoch_attesting_gwei` figure by `32,000,000,000` if they +have an attestation included in a block during the current epoch. If this +validator has more than 32 ETH, that extra ETH will not count towards their +vote (that is why it is _effective_ `Gwei`). + +The following fields are returned: + +- `current_epoch_active_gwei`: the total staked gwei that was active (i.e., + able to vote) during the current epoch. +- `current_epoch_attesting_gwei`: the total staked gwei that had one or more + attestations included in a block during the current epoch (multiple + attestations by the same validator do not increase this figure). +- `current_epoch_target_attesting_gwei`: the total staked gwei that attested to + the majority-elected Casper FFG target epoch during the current epoch. This + figure must be equal to or less than `current_epoch_attesting_gwei`. +- `previous_epoch_active_gwei`: as above, but during the previous epoch. +- `previous_epoch_attesting_gwei`: see `current_epoch_attesting_gwei`. +- `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. +- `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a + head beacon block that is in the canonical chain. + +From this data you can calculate some interesting figures: + +#### Participation Rate + +`previous_epoch_attesting_gwei / previous_epoch_active_gwei` + +Expresses the ratio of validators that managed to have an attestation +voting upon the previous epoch included in a block. + +#### Justification/Finalization Rate + +`previous_epoch_target_attesting_gwei / previous_epoch_active_gwei` + +When this value is greater than or equal to `2/3` it is possible that the +beacon chain may justify and/or finalize the epoch. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/consensus/global_votes` +Method | GET +JSON Encoding | Object +Query Parameters | `epoch` +Typical Responses | 200 + +### Parameters + +Requires the `epoch` (`Epoch`) query parameter to determine which epoch will be +considered the current epoch. + +### Returns + +A report on global validator voting participation. + +### Example + +```json +{ + "current_epoch_active_gwei": 52377600000000, + "previous_epoch_active_gwei": 52377600000000, + "current_epoch_attesting_gwei": 50740900000000, + "current_epoch_target_attesting_gwei": 49526000000000, + "previous_epoch_attesting_gwei": 52377600000000, + "previous_epoch_target_attesting_gwei": 51063400000000, + "previous_epoch_head_attesting_gwei": 9248600000000 +} +``` + +## `/consensus/individual_votes` + +Returns a per-validator summary of how that validator performed during the +current epoch. + +The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of these +individual values, please see it for definitions of terms like "current_epoch", +"previous_epoch" and "target_attester". + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/consensus/individual_votes` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Request Body + +Expects the following object in the POST request body: + +``` +{ + epoch: Epoch, + pubkeys: [PublicKey] +} +``` + +### Returns + +A report on the validators voting participation. + +### Example + +#### Request Body + +```json +{ + "epoch": 1203, + "pubkeys": [ + "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", + "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" + ] +} +``` + +_Note: for demonstration purposes the second pubkey is some unknown pubkey._ + +#### Response Body + +```json +[ + { + "epoch": 1203, + "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", + "validator_index": 14935, + "vote": { + "is_slashed": false, + "is_withdrawable_in_current_epoch": false, + "is_active_in_current_epoch": true, + "is_active_in_previous_epoch": true, + "current_epoch_effective_balance_gwei": 3200000000, + "is_current_epoch_attester": true, + "is_current_epoch_target_attester": true, + "is_previous_epoch_attester": true, + "is_previous_epoch_target_attester": true, + "is_previous_epoch_head_attester": false + } + }, + { + "epoch": 1203, + "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", + "validator_index": null, + "vote": null + } +] +``` diff --git a/book/src/http/lighthouse.md b/book/src/http/lighthouse.md new file mode 100644 index 000000000..d80c0f694 --- /dev/null +++ b/book/src/http/lighthouse.md @@ -0,0 +1,182 @@ +# Lighthouse REST API: `/lighthouse` + +The `/lighthouse` endpoints provide lighthouse-specific information about the beacon node. + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`/lighthouse/syncing`](#lighthousesyncing) | Get the node's syncing status +[`/lighthouse/peers`](#lighthousepeers) | Get the peers info known by the beacon node +[`/lighthouse/connected_peers`](#lighthousepeers) | Get the connected_peers known by the beacon node + +## `/lighthouse/syncing` + +Requests the syncing state of a Lighthouse beacon node. Lighthouse as a +custom sync protocol, this request gets Lighthouse-specific sync information. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/syncing` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +If the node is undergoing a finalization sync: +```json +{ + "SyncingFinalized": { + "start_slot": 10, + "head_slot": 20, + "head_root":"0x74020d0e3c3c02d2ea6279d5760f7d0dd376c4924beaaec4d5c0cefd1c0c4465" + } +} +``` + +If the node is undergoing a head chain sync: +```json +{ + "SyncingHead": { + "start_slot":0, + "head_slot":1195 + } +} +``` + +If the node is synced +```json +{ +"Synced" +} +``` + +## `/lighthouse/peers` + +Get all known peers info from the beacon node. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/peers` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +[ +{ + "peer_id" : "16Uiu2HAmTEinipUS3haxqucrn7d7SmCKx5XzAVbAZCiNW54ncynG", + "peer_info" : { + "_status" : "Healthy", + "client" : { + "agent_string" : "github.com/libp2p/go-libp2p", + "kind" : "Prysm", + "os_version" : "unknown", + "protocol_version" : "ipfs/0.1.0", + "version" : "unknown" + }, + "connection_status" : { + "Disconnected" : { + "since" : 3 + } + }, + "listening_addresses" : [ + "/ip4/10.3.58.241/tcp/9001", + "/ip4/35.172.14.146/tcp/9001", + "/ip4/35.172.14.146/tcp/9001" + ], + "meta_data" : { + "attnets" : "0x0000000000000000", + "seq_number" : 0 + }, + "reputation" : 20, + "sync_status" : { + "Synced" : { + "status_head_slot" : 18146 + } + } + } + }, + { + "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", + "peer_info" : { + "_status" : "Healthy", + "client" : { + "agent_string" : null, + "kind" : "Unknown", + "os_version" : "unknown", + "protocol_version" : "unknown", + "version" : "unknown" + }, + "connection_status" : { + "Disconnected" : { + "since" : 5 + } + }, + "listening_addresses" : [], + "meta_data" : { + "attnets" : "0x0900000000000000", + "seq_number" : 0 + }, + "reputation" : 20, + "sync_status" : "Unknown" + } + }, +] +``` + +## `/lighthouse/connected_peers` + +Get all known peers info from the beacon node. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/lighthouse/connected_peers` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +[ + { + "peer_id" : "16Uiu2HAm8XZfPv3YjktCjitSRtfS7UfHfEvpiUyHrdiX6uAD55xZ", + "peer_info" : { + "_status" : "Healthy", + "client" : { + "agent_string" : null, + "kind" : "Unknown", + "os_version" : "unknown", + "protocol_version" : "unknown", + "version" : "unknown" + }, + "connection_status" : { + "Connected" : { + "in" : 5, + "out" : 2 + } + }, + "listening_addresses" : [], + "meta_data" : { + "attnets" : "0x0900000000000000", + "seq_number" : 0 + }, + "reputation" : 20, + "sync_status" : "Unknown" + } + }, + ] +``` diff --git a/book/src/http/network.md b/book/src/http/network.md new file mode 100644 index 000000000..2ac0c83ba --- /dev/null +++ b/book/src/http/network.md @@ -0,0 +1,148 @@ +# Lighthouse REST API: `/network` + +The `/network` endpoints provide information about the p2p network that +Lighthouse uses to communicate with other beacon nodes. + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`/network/enr`](#networkenr) | Get the local node's `ENR` as base64 . +[`/network/peer_count`](#networkpeer_count) | Get the count of connected peers. +[`/network/peer_id`](#networkpeer_id) | Get a node's libp2p `PeerId`. +[`/network/peers`](#networkpeers) | List a node's connected peers (as `PeerIds`). +[`/network/listen_port`](#networklisten_port) | Get a node's libp2p listening port. +[`/network/listen_addresses`](#networklisten_addresses) | Get a list of libp2p multiaddr the node is listening on. + +## `network/enr` + +Requests the beacon node for its listening `ENR` address. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/network/enr` +Method | GET +JSON Encoding | String (base64) +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +"-IW4QPYyGkXJSuJ2Eji8b-m4PTNrW4YMdBsNOBrYAdCk8NLMJcddAiQlpcv6G_hdNjiLACOPTkqTBhUjnC0wtIIhyQkEgmlwhKwqAPqDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhA1sBKo0yCfw4Z_jbggwflNfftjwKACu-a-CoFAQHJnrm" +``` + +## `/network/peer_count` + +Requests the count of peers connected to the client. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/network/peer_count` +Method | GET +JSON Encoding | Number +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +5 +``` +## `/network/peer_id` + +Requests the beacon node's local `PeerId`. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/network/peer_id` +Method | GET +JSON Encoding | String (base58) +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +"QmVFcULBYZecPdCKgGmpEYDqJLqvMecfhJadVBtB371Avd" +``` + +## `/network/peers` + +Requests one `MultiAddr` for each peer connected to the beacon node. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/network/peers` +Method | GET +JSON Encoding | [String] (base58) +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +[ + "QmaPGeXcfKFMU13d8VgbnnpeTxcvoFoD9bUpnRGMUJ1L9w", + "QmZt47cP8V96MgiS35WzHKpPbKVBMqr1eoBNTLhQPqpP3m" +] +``` + + +## `/network/listen_port` + +Requests the TCP port that the client's libp2p service is listening on. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/network/listen_port` +Method | GET +JSON Encoding | Number +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +9000 +``` + +## `/network/listen_addresses` + +Requests the list of multiaddr that the client's libp2p service is listening on. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/network/listen_addresses` +Method | GET +JSON Encoding | Array +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +[ + "/ip4/127.0.0.1/tcp/9000", + "/ip4/192.168.31.115/tcp/9000", + "/ip4/172.24.0.1/tcp/9000", + "/ip4/172.21.0.1/tcp/9000", + "/ip4/172.17.0.1/tcp/9000", + "/ip4/172.18.0.1/tcp/9000", + "/ip4/172.19.0.1/tcp/9000", + "/ip4/172.42.0.1/tcp/9000", + "/ip6/::1/tcp/9000" +] +``` diff --git a/book/src/http/node.md b/book/src/http/node.md new file mode 100644 index 000000000..3c32c6c1c --- /dev/null +++ b/book/src/http/node.md @@ -0,0 +1,57 @@ +# Lighthouse REST API: `/node` + +The `/node` endpoints provide information about the lighthouse beacon node. + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`/node/version`](#nodeversion) | Get the node's version. +[`/node/syncing`](#nodesyncing) | Get the node's syncing status. + +## `/node/version` + +Requests the beacon node's version. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/node/version` +Method | GET +JSON Encoding | String +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +"Lighthouse-0.2.0-unstable" +``` + +## `/node/syncing` + +Requests the syncing status of the beacon node. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/node/syncing` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +{ + is_syncing: true, + sync_status: { + starting_slot: 0, + current_slot: 100, + highest_slot: 200, + } +} +``` diff --git a/book/src/http/spec.md b/book/src/http/spec.md new file mode 100644 index 000000000..9fb8c0c98 --- /dev/null +++ b/book/src/http/spec.md @@ -0,0 +1,154 @@ +# Lighthouse REST API: `/spec` + +The `/spec` endpoints provide information about Eth2.0 specifications that the node is running. + +## Endpoints + +HTTP Path | Description | +| --- | -- | +[`/spec`](#spec) | Get the full spec object that a node's running. +[`/spec/slots_per_epoch`](#specslots_per_epoch) | Get the number of slots per epoch. +[`/spec/eth2_config`](#specseth2_config) | Get the full Eth2 config object. + +## `/spec` + +Requests the full spec object that a node's running. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/spec` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +{ + "genesis_slot": 0, + "base_rewards_per_epoch": 4, + "deposit_contract_tree_depth": 32, + "max_committees_per_slot": 64, + "target_committee_size": 128, + "min_per_epoch_churn_limit": 4, + "churn_limit_quotient": 65536, + "shuffle_round_count": 90, + "min_genesis_active_validator_count": 16384, + "min_genesis_time": 1578009600, + "min_deposit_amount": 1000000000, + "max_effective_balance": 32000000000, + "ejection_balance": 16000000000, + "effective_balance_increment": 1000000000, + "genesis_fork_version": "0x00000000", + "bls_withdrawal_prefix_byte": "0x00", + "min_genesis_delay": 86400, + "milliseconds_per_slot": 12000, + "min_attestation_inclusion_delay": 1, + "min_seed_lookahead": 1, + "max_seed_lookahead": 4, + "min_epochs_to_inactivity_penalty": 4, + "min_validator_withdrawability_delay": 256, + "persistent_committee_period": 2048, + "base_reward_factor": 64, + "whistleblower_reward_quotient": 512, + "proposer_reward_quotient": 8, + "inactivity_penalty_quotient": 33554432, + "min_slashing_penalty_quotient": 32, + "domain_beacon_proposer": 0, + "domain_beacon_attester": 1, + "domain_randao": 2, + "domain_deposit": 3, + "domain_voluntary_exit": 4, + "safe_slots_to_update_justified": 8, + "eth1_follow_distance": 1024, + "seconds_per_eth1_block": 14, + "boot_nodes": [], + "network_id": 1 +} +``` + +## `/spec/eth2_config` + +Requests the full `Eth2Config` object. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/spec/eth2_config` +Method | GET +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +{ + "spec_constants": "mainnet", + "spec": { + "genesis_slot": 0, + "base_rewards_per_epoch": 4, + "deposit_contract_tree_depth": 32, + "max_committees_per_slot": 64, + "target_committee_size": 128, + "min_per_epoch_churn_limit": 4, + "churn_limit_quotient": 65536, + "shuffle_round_count": 90, + "min_genesis_active_validator_count": 16384, + "min_genesis_time": 1578009600, + "min_deposit_amount": 1000000000, + "max_effective_balance": 32000000000, + "ejection_balance": 16000000000, + "effective_balance_increment": 1000000000, + "genesis_fork_version": "0x00000000", + "bls_withdrawal_prefix_byte": "0x00", + "min_genesis_delay": 86400, + "milliseconds_per_slot": 12000, + "min_attestation_inclusion_delay": 1, + "min_seed_lookahead": 1, + "max_seed_lookahead": 4, + "min_epochs_to_inactivity_penalty": 4, + "min_validator_withdrawability_delay": 256, + "persistent_committee_period": 2048, + "base_reward_factor": 64, + "whistleblower_reward_quotient": 512, + "proposer_reward_quotient": 8, + "inactivity_penalty_quotient": 33554432, + "min_slashing_penalty_quotient": 32, + "domain_beacon_proposer": 0, + "domain_beacon_attester": 1, + "domain_randao": 2, + "domain_deposit": 3, + "domain_voluntary_exit": 4, + "safe_slots_to_update_justified": 8, + "eth1_follow_distance": 1024, + "seconds_per_eth1_block": 14, + "boot_nodes": [], + "network_id": 1 + } +} +``` + +## `/spec/slots_per_epoch` + +Requests the `SLOTS_PER_EPOCH` parameter from the specs that the node is running. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/spec/slots_per_epoch` +Method | GET +JSON Encoding | Number +Query Parameters | None +Typical Responses | 200 + +### Example Response + +```json +32 +``` \ No newline at end of file diff --git a/book/src/http/validator.md b/book/src/http/validator.md new file mode 100644 index 000000000..c3bf3b9cd --- /dev/null +++ b/book/src/http/validator.md @@ -0,0 +1,545 @@ +# Lighthouse REST API: `/validator` + +The `/validator` endpoints provide the minimum functionality required for a validator +client to connect to the beacon node and produce blocks and attestations. + +## Endpoints + +HTTP Path | HTTP Method | Description | +| - | - | ---- | +[`/validator/duties`](#validatorduties) | GET | Provides block and attestation production information for validators. +[`/validator/subscribe`](#validatorsubscribe) | POST | Subscribes a list of validators to the beacon node for a particular duty/slot. +[`/validator/duties/all`](#validatordutiesall) | GET |Provides block and attestation production information for all validators. +[`/validator/duties/active`](#validatordutiesactive) | GET | Provides block and attestation production information for all active validators. +[`/validator/block`](#validatorblock-get) | GET | Retrieves the current beacon block for the validator to publish. +[`/validator/block`](#validatorblock-post) | POST | Publishes a signed block to the network. +[`/validator/attestation`](#validatorattestation) | GET | Retrieves the current best attestation for a validator to publish. +[`/validator/aggregate_attestation`](#validatoraggregate_attestation) | GET | Gets an aggregate attestation for validators to sign and publish. +[`/validator/attestations`](#validatorattestations) | POST | Publishes a list of raw unaggregated attestations to their appropriate subnets. +[`/validator/aggregate_and_proofs`](#validatoraggregate_and_proofs) | POST | Publishes a list of Signed aggregate and proofs for validators who are aggregators. + +## `/validator/duties` + +Request information about when a validator must produce blocks and attestations +at some given `epoch`. The information returned always refers to the canonical +chain and the same input parameters may yield different results after a re-org. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/duties` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Request Body + +Expects the following object in the POST request body: + +``` +{ + epoch: Epoch, + pubkeys: [PublicKey] +} +``` + +Duties are assigned on a per-epoch basis, all duties returned will contain +slots that are inside the given `epoch`. A set of duties will be returned for +each of the `pubkeys`. + +Validators who are not known to the beacon chain (e.g., have not yet deposited) +will have `null` values for most fields. + + +### Returns + +A set of duties for each given pubkey. + +### Example + +#### Request Body + +```json +{ + "epoch": 1203, + "pubkeys": [ + "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", + "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" + ] +} +``` + +_Note: for demonstration purposes the second pubkey is some unknown pubkey._ + +#### Response Body + +```json +[ + { + "validator_pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", + "validator_index": 14935, + "attestation_slot": 38511, + "attestation_committee_index": 3, + "attestation_committee_position": 39, + "block_proposal_slots": [], + "aggregator_modulo": 5, + }, + { + "validator_pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", + "validator_index": null, + "attestation_slot": null, + "attestation_committee_index": null, + "attestation_committee_position": null, + "block_proposal_slots": [] + "aggregator_modulo": null, + } +] +``` + +## `/validator/duties/all` + +Returns the duties for all validators, equivalent to calling [Validator +Duties](#validator-duties) while providing all known validator public keys. + +Considering that duties for non-active validators will just be `null`, it is +generally more efficient to query using [Active Validator +Duties](#active-validator-duties). + +This endpoint will only return validators that were in the beacon state +in the given epoch. For example, if the query epoch is 10 and some validator +deposit was included in epoch 11, that validator will not be included in the +result. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/duties/all` +Method | GET +JSON Encoding | Object +Query Parameters | `epoch` +Typical Responses | 200 + +### Parameters + +The duties returned will all be inside the given `epoch` (`Epoch`) query +parameter. This parameter is required. + +### Returns + +The return format is identical to the [Validator Duties](#validator-duties) response body. + +## `/validator/duties/active` + +Returns the duties for all active validators, equivalent to calling [Validator +Duties](#validator-duties) while providing all known validator public keys that +are active in the given epoch. + +This endpoint will only return validators that were in the beacon state +in the given epoch. For example, if the query epoch is 10 and some validator +deposit was included in epoch 11, that validator will not be included in the +result. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/duties/active` +Method | GET +JSON Encoding | Object +Query Parameters | `epoch` +Typical Responses | 200 + +### Parameters + +The duties returned will all be inside the given `epoch` (`Epoch`) query +parameter. This parameter is required. + +### Returns + +The return format is identical to the [Validator Duties](#validator-duties) response body. + +## `/validator/subscribe` + +Posts a list of `ValidatorSubscription` to subscribe validators to +particular slots to perform attestation duties. + +This informs the beacon node to search for peers and subscribe to +required attestation subnets to perform the attestation duties required. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/subscribe` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200 + +### Request Body + +Expects the following object in the POST request body: + +``` +[ + { + validator_index: 10, + attestation_committee_index: 12, + slot: 3, + is_aggregator: true + } +] +``` + +The `is_aggregator` informs the beacon node if the validator is an aggregator +for this slot/committee. + +### Returns + +A null object on success and an error indicating any failures. + +## `/validator/block` GET + + +Produces and returns an unsigned `BeaconBlock` object. + +The block will be produced with the given `slot` and the parent block will be the +highest block in the canonical chain that has a slot less than `slot`. The +block will still be produced if some other block is also known to be at `slot` +(i.e., it may produce a block that would be slashable if signed). + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/block` +Method | GET +JSON Encoding | Object +Query Parameters | `slot`, `randao_reveal` +Typical Responses | 200 + +### Parameters + + +- `slot` (`Slot`): The slot number for which the block is to be produced. +- `randao_reveal` (`Signature`): 96 bytes `Signature` for the randomness. + + +### Returns + +Returns a `BeaconBlock` object. + +#### Response Body + +```json +{ + "slot": 33, + "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", + "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", + "body": { + "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", + "eth1_data": { + "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", + "deposit_count": 8, + "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" + }, + "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", + "proposer_slashings": [], + "attester_slashings": [], + "attestations": [], + "deposits": [], + "voluntary_exits": [] + } +} +``` + +## `/validator/block` POST + +Accepts a `SignedBeaconBlock` for verification. If it is valid, it will be +imported into the local database and published on the network. Invalid blocks +will not be published to the network. + +A block may be considered invalid because it is fundamentally incorrect, or its +parent has not yet been imported. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/block` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200/202 + + +### Request Body + +Expects a JSON encoded `SignedBeaconBlock` in the POST request body: + +### Returns + +Returns a null object if the block passed all block validation and is published to the network. +Else, returns a processing error description. + +### Example + +### Request Body + +```json +{ + "message": { + "slot": 33, + "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", + "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", + "body": { + "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", + "eth1_data": { + "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", + "deposit_count": 8, + "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" + }, + "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", + "proposer_slashings": [ + + ], + "attester_slashings": [ + + ], + "attestations": [ + + ], + "deposits": [ + + ], + "voluntary_exits": [ + + ] + } + }, + "signature": "0x965ced900dbabd0a78b81a0abb5d03407be0d38762104316416347f2ea6f82652b5759396f402e85df8ee18ba2c60145037c73b1c335f4272f1751a1cd89862b7b4937c035e350d0108554bd4a8930437ec3311c801a65fe8e5ba022689b5c24" +} +``` + +## `/validator/attestation` + +Produces and returns an unsigned `Attestation` from the current state. + +The attestation will reference the `beacon_block_root` of the highest block in +the canonical chain with a slot equal to or less than the given `slot`. + +An error will be returned if the given slot is more than +`SLOTS_PER_HISTORICAL_VECTOR` slots behind the current head block. + +This endpoint is not protected against slashing. Signing the returned +attestation may result in a slashable offence. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/attestation` +Method | GET +JSON Encoding | Object +Query Parameters | `slot`, `committee_index` +Typical Responses | 200 + +### Parameters + + +- `slot` (`Slot`): The slot number for which the attestation is to be produced. +- `committee_index` (`CommitteeIndex`): The index of the committee that makes the attestation. + + +### Returns + +Returns a `Attestation` object with a default signature. The `signature` field should be replaced by the valid signature. + +#### Response Body + +```json +{ + "aggregation_bits": "0x01", + "data": { + "slot": 100, + "index": 0, + "beacon_block_root": "0xf22e4ec281136d119eabcd4d9d248aeacd042eb63d8d7642f73ad3e71f1c9283", + "source": { + "epoch": 2, + "root": "0x34c1244535c923f08e7f83170d41a076e4f1ec61013846b3a615a1d109d3c329" + }, + "target": { + "epoch": 3, + "root": "0xaefd23b384994dc0c1a6b77836bdb2f24f209ebfe6c4819324d9685f4a43b4e1" + } + }, + "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} +``` + + + +## `/validator/aggregate_attestation` + +Requests an `AggregateAttestation` from the beacon node that has a +specific `attestation.data`. If no aggregate attestation is known this will +return a null object. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/aggregate_attestation` +Method | GET +JSON Encoding | Object +Query Parameters | `attestation_data` +Typical Responses | 200 + +### Returns + +Returns a null object if the attestation data passed is not known to the beacon +node. + +### Example + +### Request Body + +```json +{ + "aggregation_bits": "0x03", + "data": { + "slot": 3, + "index": 0, + "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", + "source": { + "epoch": 0, + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": 0, + "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" + } + }, + "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" +} +``` + + +## `/validator/attestations` + +Accepts a list of `Attestation` for verification. If they are valid, they will be imported +into the local database and published to the network. Invalid attestations will +not be published to the network. + +An attestation may be considered invalid because it is fundamentally incorrect +or because the beacon node has not imported the relevant blocks required to +verify it. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/attestations` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200/202 + + +### Request Body + +Expects a JSON encoded list of signed `Attestation` objects in the POST request body. In +accordance with the naive aggregation scheme, the attestation _must_ have +exactly one of the `attestation.aggregation_bits` fields set. + +### Returns + +Returns a null object if the attestation passed all validation and is published to the network. +Else, returns a processing error description. + +### Example + +### Request Body + +```json +{ + "aggregation_bits": "0x03", + "data": { + "slot": 3, + "index": 0, + "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", + "source": { + "epoch": 0, + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": 0, + "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" + } + }, + "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" +} +``` + +## `/validator/aggregate_and_proofs` + +Accepts a list of `SignedAggregateAndProof` for publication. If they are valid +(the validator is an aggregator and the signatures can be verified) these +are published to the network on the global aggregate gossip topic. + +### HTTP Specification + +| Property | Specification | +| --- |--- | +Path | `/validator/aggregate_and_proofs` +Method | POST +JSON Encoding | Object +Query Parameters | None +Typical Responses | 200/202 + +### Request Body + +Expects a JSON encoded list of `SignedAggregateAndProof` objects in the POST request body. + +### Returns + +Returns a null object if the attestation passed all validation and is published to the network. +Else, returns a processing error description. + +### Example + +### Request Body + +```json +[ + { + "message": { + "aggregator_index": 12, + "aggregate": { + "aggregation_bits": "0x03", + "data": { + "slot": 3, + "index": 0, + "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", + "source": { + "epoch": 0, + "root": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "target": { + "epoch": 0, + "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" + } + }, + "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" + }, + "selection_proof": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" + } + signature: "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" + } +] +``` +_Note: The data in this request is for demonstrating types and does not +contain real data_ diff --git a/book/src/http_advanced.md b/book/src/http_advanced.md index 822b6ffff..5dad7cab6 100644 --- a/book/src/http_advanced.md +++ b/book/src/http_advanced.md @@ -1,115 +1 @@ -# Lighthouse REST API: `/advanced` - -The `/advanced` endpoints provide information Lighthouse specific data structures for advanced debugging. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/advanced/fork_choice`](#advancedfork_choice) | Get the `proto_array` fork choice object. -[`/advanced/operation_pool`](#advancedoperation_pool) | Get the Lighthouse `PersistedOperationPool` object. - - -## `/advanced/fork_choice` - -Requests the `proto_array` fork choice object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/fork_choice` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "prune_threshold": 256, - "justified_epoch": 25, - "finalized_epoch": 24, - "nodes": [ - { - "slot": 544, - "root": "0x27103c56d4427cb4309dd202920ead6381d54d43277c29cf0572ddf0d528e6ea", - "parent": null, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 1, - "best_descendant": 296 - }, - { - "slot": 545, - "root": "0x09af0e8d4e781ea4280c9c969d168839c564fab3a03942e7db0bfbede7d4c745", - "parent": 0, - "justified_epoch": 16, - "finalized_epoch": 15, - "weight": 256000000000, - "best_child": 2, - "best_descendant": 296 - }, - ], - "indices": { - "0xb935bb3651eeddcb2d2961bf307156850de982021087062033f02576d5df00a3": 59, - "0x8f4ec47a34c6c1d69ede64d27165d195f7e2a97c711808ce51f1071a6e12d5b9": 189, - "0xf675eba701ef77ee2803a130dda89c3c5673a604d2782c9e25ea2be300d7d2da": 173, - "0x488a483c8d5083faaf5f9535c051b9f373ba60d5a16e77ddb1775f248245b281": 37 - } -} -``` -_Truncated for brevity._ - -## `/advanced/operation_pool` - -Requests the `PersistedOperationPool` object as represented in Lighthouse. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/advanced/operation_pool` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "attestations": [ - [ - { - "v": [39, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112, 118, 215, 252, 51, 186, 76, 156, 157, 99, 91, 4, 137, 195, 209, 224, 26, 233, 233, 184, 38, 89, 215, 177, 247, 97, 243, 119, 229, 69, 50, 90, 24, 0, 0, 0, 0, 0, 0, 0, 79, 37, 38, 210, 96, 235, 121, 142, 129, 136, 206, 214, 179, 132, 22, 19, 222, 213, 203, 46, 112, 192, 26, 5, 254, 26, 103, 170, 158, 205, 72, 3, 25, 0, 0, 0, 0, 0, 0, 0, 164, 50, 214, 67, 98, 13, 50, 180, 108, 232, 248, 109, 128, 45, 177, 23, 221, 24, 218, 211, 8, 152, 172, 120, 24, 86, 198, 103, 68, 164, 67, 202, 1, 0, 0, 0, 0, 0, 0, 0] - }, - [ - { - "aggregation_bits": "0x03", - "data": { - "slot": 807, - "index": 0, - "beacon_block_root": "0x7076d7fc33ba4c9c9d635b0489c3d1e01ae9e9b82659d7b1f761f377e545325a", - "source": { - "epoch": 24, - "root": "0x4f2526d260eb798e8188ced6b3841613ded5cb2e70c01a05fe1a67aa9ecd4803" - }, - "target": { - "epoch": 25, - "root": "0xa432d643620d32b46ce8f86d802db117dd18dad30898ac781856c66744a443ca" - } - }, - "signature": "0x8b1d624b0cd5a7a0e13944e90826878a230e3901db34ea87dbef5b145ade2fedbc830b6752a38a0937a1594211ab85b615d65f9eef0baccd270acca945786036695f4db969d9ff1693c505c0fe568b2fe9831ea78a74cbf7c945122231f04026" - } - ] - ] - ], - "attester_slashings": [], - "proposer_slashings": [], - "voluntary_exits": [] -} -``` -_Truncated for brevity._ +# /advanced diff --git a/book/src/http_beacon.md b/book/src/http_beacon.md index f5fd6e765..dd0313133 100644 --- a/book/src/http_beacon.md +++ b/book/src/http_beacon.md @@ -1,707 +1 @@ -# Lighthouse REST API: `/beacon` - -The `/beacon` endpoints provide information about the canonical head of the -beacon chain and also historical information about beacon blocks and states. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/beacon/attester_slashing`](#beaconattester_slashing) | Insert an attester slashing -[`/beacon/block`](#beaconblock) | Get a `BeaconBlock` by slot or root. -[`/beacon/block_root`](#beaconblock_root) | Resolve a slot to a block root. -[`/beacon/committees`](#beaconcommittees) | Get the shuffling for an epoch. -[`/beacon/head`](#beaconhead) | Info about the block at the head of the chain. -[`/beacon/heads`](#beaconheads) | Returns a list of all known chain heads. -[`/beacon/proposer_slashing`](#beaconproposer_slashing) | Insert a proposer slashing -[`/beacon/state`](#beaconstate) | Get a `BeaconState` by slot or root. -[`/beacon/state_root`](#beaconstate_root) | Resolve a slot to a state root. -[`/beacon/state/genesis`](#beaconstategenesis) | Get a `BeaconState` at genesis. -[`/beacon/genesis_time`](#beacongenesis_time) | Get the genesis time from the beacon state. -[`/beacon/fork`](#beaconfork) | Get the fork of the head of the chain. -[`/beacon/validators`](#beaconvalidators) | Query for one or more validators. -[`/beacon/validators/active`](#beaconvalidatorsactive) | Get all active validators. -[`/beacon/validators/all`](#beaconvalidatorsall) | Get all validators. - -## `/beacon/attester_slashing` - -Accepts an `attester_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns a 400 error if the `attester_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/attester_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Parameters - -Expects the following object in the POST request body: - -``` -{ - attestation_1: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - }, - attestation_2: { - attesting_indices: [u64], - data: { - slot: Slot, - index: u64, - beacon_block_root: Bytes32, - source: { - epoch: Epoch, - root: Bytes32 - }, - target: { - epoch: Epoch, - root: Bytes32 - } - } - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the attester slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "attestation_1": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - } - }, - "signature": "0xb47f7397cd944b8d5856a13352166bbe74c85625a45b14b7347fc2c9f6f6f82acee674c65bc9ceb576fcf78387a6731c0b0eb3f8371c70db2da4e7f5dfbc451730c159d67263d3db56b6d0e009e4287a8ba3efcacac30b3ae3447e89dc71b5b9" - }, - "attestation_2": { - "attesting_indices": [0], - "data": { - "slot": 1, - "index": 0, - "beacon_block_root": "0x0000000000000000000000000000000000000000000000000100000000000000", - "source": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000100000000000000" - }, - "target": { - "epoch": 1, - "root": "0x0000000000000000000000000000000000000000000000000200000000000000" - } - }, - "signature": "0x93fef587a63acf72aaf8df627718fd43cb268035764071f802ffb4370a2969d226595cc650f4c0bf2291ae0c0a41fcac1700f318603d75d34bcb4b9f4a8368f61eeea0e1f5d969d92d5073ba5fbadec102b45ec87d418d25168d2e3c74b9fcbb" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - -## `/beacon/block` - -Request that the node return a beacon chain block that matches the provided -criteria (a block `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any block returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned block is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single [`SignedBeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#signedbeaconblock) and the block root of the inner [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/v0.10.0/specs/phase0/beacon-chain.md#beaconblock). - -### Example Response - -```json -{ - "root": "0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196", - "beacon_block": { - "message": { - "slot": 0, - "proposer_index": 14, - "parent_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "state_root": "0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f", - "body": { - "randao_reveal": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "eth1_data": { - "deposit_root": "0x0000000000000000000000000000000000000000000000000000000000000000", - "deposit_count": 0, - "block_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "graffiti": "0x0000000000000000000000000000000000000000000000000000000000000000", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } -} -``` - -## `/beacon/block_root` - -Returns the block root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/block_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xc35ddf4e71c31774e0594bd7eb32dfe50b54dbc40abd594944254b4ec8895196" -``` - -## `/beacon/committees` - -Request the committees (a.k.a. "shuffling") for all slots and committee indices -in a given `epoch`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/committees` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200/500 - -### Parameters - -The `epoch` (`Epoch`) query parameter is required and defines the epoch for -which the committees will be returned. All slots contained within the response will -be inside this epoch. - -### Returns - -A list of beacon committees. - -### Example Response - -```json -[ - { - "slot": 4768, - "index": 0, - "committee": [ - 1154, - 492, - 9667, - 3089, - 8987, - 1421, - 224, - 11243, - 2127, - 2329, - 188, - 482, - 486 - ] - }, - { - "slot": 4768, - "index": 1, - "committee": [ - 5929, - 8482, - 5528, - 6130, - 14343, - 9777, - 10808, - 12739, - 15234, - 12819, - 5423, - 6320, - 9991 - ] - } -] -``` - -_Truncated for brevity._ - -## `/beacon/head` - -Requests information about the head of the beacon chain, from the node's -perspective. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/head` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "slot": 37923, - "block_root": "0xe865d4805395a0776b8abe46d714a9e64914ab8dc5ff66624e5a1776bcc1684b", - "state_root": "0xe500e3567ab273c9a6f8a057440deff476ab236f0983da27f201ee9494a879f0", - "finalized_slot": 37856, - "finalized_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86", - "justified_slot": 37888, - "justified_block_root": "0x01c2f516a407d8fdda23cad4ed4381e4ab8913d638f935a2fe9bd00d6ced5ec4", - "previous_justified_slot": 37856, - "previous_justified_block_root": "0xbdae152b62acef1e5c332697567d2b89e358628790b8273729096da670b23e86" -} -``` - -## `/beacon/heads` - -Returns the roots of all known head blocks. Only one of these roots is the -canonical head and that is decided by the fork choice algorithm. See [`/beacon/head`](#beaconhead) for the canonical head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/heads` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - { - "beacon_block_root": "0x226b2fd7c5f3d31dbb21444b96dfafe715f0017cd16545ecc4ffa87229496a69", - "beacon_block_slot": 38373 - }, - { - "beacon_block_root": "0x41ed5b253c4fc841cba8a6d44acbe101866bc674c3cfa3c4e9f7388f465aa15b", - "beacon_block_slot": 38375 - } -] -``` - -## `/beacon/proposer_slashing` - -Accepts a `proposer_slashing` and verifies it. If it is valid, it is added to the operations pool for potential inclusion in a future block. Returns an 400 error if the `proposer_slashing` is invalid. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/proposer_slashing` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/400 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - proposer_index: u64, - header_1: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - }, - header_2: { - slot: Slot, - parent_root: Bytes32, - state_root: Bytes32, - body_root: Bytes32, - signature: Bytes32 - } -} -``` - -### Returns - -Returns `true` if the proposer slashing was inserted successfully, or the corresponding error if it failed. - -### Example - -### Request Body - -```json -{ - "proposer_index": 0, - "header_1": { - "slot": 0, - "parent_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb8970d1342c6d5779c700ec366efd0ca819937ca330960db3ca5a55eb370a3edd83f4cbb2f74d06e82f934fcbd4bb80609a19c2254cc8b3532a4efff9e80edf312ac735757c059d77126851e377f875593e64ba50d1dffe69a809a409202dd12" - }, - "header_2": { - "slot": 0, - "parent_root": "0x0202020202020202020202020202020202020202020202020202020202020202", - "state_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "body_root": "0x0101010101010101010101010101010101010101010101010101010101010101", - "signature": "0xb60e6b348698a34e59b22e0af96f8809f977f00f95d52375383ade8d22e9102270a66c6d52b0434214897e11ca4896871510c01b3fd74d62108a855658d5705fcfc4ced5136264a1c6496f05918576926aa191b1ad311b7e27f5aa2167aba294" - } -} -``` - -_Note: data sent here is for demonstration purposes only_ - - - -## `/beacon/state` - -Request that the node return a beacon chain state that matches the provided -criteria (a state `root` or beacon chain `slot`). Only one of the parameters -should be provided as a criteria. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `root` -Typical Responses | 200, 404 - -### Parameters - -Accepts **only one** of the following parameters: - -- `slot` (`Slot`): Query by slot number. Any state returned must be in the canonical chain (i.e., -either the head or an ancestor of the head). -- `root` (`Bytes32`): Query by tree hash root. A returned state is not required to be in the -canonical chain. - -### Returns - -Returns an object containing a single -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate) -and its tree hash root. - -### Example Response - -```json -{ - "root": "0x528e54ca5d4c957729a73f40fc513ae312e054c7295775c4a2b21f423416a72b", - "beacon_state": { - "genesis_time": 1575652800, - "genesis_validators_root": "0xa8a9226edee1b2627fb4117d7dea4996e64dec2998f37f6e824f74f2ce39a538", - "slot": 18478 - } -} -``` - -_Truncated for brevity._ - -## `/beacon/state_root` - -Returns the state root for the given slot in the canonical chain. If there -is a re-org, the same slot may return a different root. - - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state_root` -Method | GET -JSON Encoding | Object -Query Parameters | `slot` -Typical Responses | 200, 404 - -## Parameters - -- `slot` (`Slot`): the slot to be resolved to a root. - -### Example Response - -```json -"0xf15690b6be4ed42ea1ee0741eb4bfd4619d37be8229b84b4ddd480fb028dcc8f" -``` - -## `/beacon/state/genesis` - -Request that the node return a beacon chain state at genesis (slot 0). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/state/genesis` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis -[`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#beaconstate). - -### Example Response - -```json -{ - "genesis_time": 1581576353, - "slot": 0, - "fork": { - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 - }, -} -``` - -_Truncated for brevity._ - -## `/beacon/genesis_time` - -Request that the node return the genesis time from the beacon state. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/genesis_time` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the genesis time. - -### Example Response - -```json -1581576353 -``` - -## `/beacon/fork` - -Request that the node return the `fork` of the current head. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/fork` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - - -### Returns - -Returns an object containing the [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/beacon-chain.md#fork) of the current head. - -### Example Response - -```json -{ - "previous_version": "0x00000000", - "current_version": "0x00000000", - "epoch": 0 -} -``` - -## `/beacon/validators` - -Request that the node returns information about one or more validator public -keys. This request takes the form of a `POST` request to allow sending a large -number of pubkeys in the request. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - state_root: Bytes32, - pubkeys: [PublicKey] -} -``` - -The `state_root` field indicates which `BeaconState` should be used to collect -the information. The `state_root` is optional and omitting it will result in -the canonical head state being used. - - -### Returns - -Returns an object describing several aspects of the given validator. - -### Example - -### Request Body - -```json -{ - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -### Response Body - -```json -[ - { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "balance": 3228885987, - "validator": { - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "withdrawal_credentials": "0x00b7bec22d5bda6b2cca1343d4f640d0e9ccc204a06a73703605c590d4c0d28e", - "effective_balance": 3200000000, - "slashed": false, - "activation_eligibility_epoch": 0, - "activation_epoch": 0, - "exit_epoch": 18446744073709551615, - "withdrawable_epoch": 18446744073709551615 - } - }, - { - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "balance": null, - "validator": null - } -] -``` - -## `/beacon/validators/active` - -Returns all validators that are active in the state defined by `state_root`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/active` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. - -## `/beacon/validators/all` - -Returns all validators. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/beacon/validators/all` -Method | GET -JSON Encoding | Object -Query Parameters | `state_root` (optional) -Typical Responses | 200 - -### Parameters - -The optional `state_root` (`Bytes32`) query parameter indicates which -`BeaconState` should be used to collect the information. When omitted, the -canonical head state will be used. - -### Returns - -The return format is identical to the [`/beacon/validators`](#beaconvalidators) response body. +# /beacon diff --git a/book/src/http_consensus.md b/book/src/http_consensus.md index c71b78ce3..2d8b80aa1 100644 --- a/book/src/http_consensus.md +++ b/book/src/http_consensus.md @@ -1,189 +1 @@ -# Lighthouse REST API: `/consensus` - -The `/consensus` endpoints provide information on results of the proof-of-stake -voting process used for finality/justification under Casper FFG. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/consensus/global_votes`](#consensusglobal_votes) | A global vote count for a given epoch. -[`/consensus/individual_votes`](#consensusindividual_votes) | A per-validator breakdown of votes in a given epoch. - -## `/consensus/global_votes` - -Returns a global count of votes for some given `epoch`. The results are included -both for the current and previous (`epoch - 1`) epochs since both are required -by the beacon node whilst performing per-epoch-processing. - -Generally, you should consider the "current" values to be incomplete and the -"previous" values to be final. This is because validators can continue to -include attestations from the _current_ epoch in the _next_ epoch, however this -is not the case for attestations from the _previous_ epoch. - -``` - `epoch` query parameter - | - | --------- values are calcuated here - | | - v v -Epoch: |---previous---|---current---|---next---| - - |-------------| - ^ - | - window for including "current" attestations - in a block -``` - -The votes are expressed in terms of staked _effective_ `Gwei` (i.e., not the number of -individual validators). For example, if a validator has 32 ETH staked they will -increase the `current_epoch_attesting_gwei` figure by `32,000,000,000` if they -have an attestation included in a block during the current epoch. If this -validator has more than 32 ETH, that extra ETH will not count towards their -vote (that is why it is _effective_ `Gwei`). - -The following fields are returned: - -- `current_epoch_active_gwei`: the total staked gwei that was active (i.e., - able to vote) during the current epoch. -- `current_epoch_attesting_gwei`: the total staked gwei that had one or more - attestations included in a block during the current epoch (multiple - attestations by the same validator do not increase this figure). -- `current_epoch_target_attesting_gwei`: the total staked gwei that attested to - the majority-elected Casper FFG target epoch during the current epoch. This - figure must be equal to or less than `current_epoch_attesting_gwei`. -- `previous_epoch_active_gwei`: as above, but during the previous epoch. -- `previous_epoch_attesting_gwei`: see `current_epoch_attesting_gwei`. -- `previous_epoch_target_attesting_gwei`: see `current_epoch_target_attesting_gwei`. -- `previous_epoch_head_attesting_gwei`: the total staked gwei that attested to a - head beacon block that is in the canonical chain. - -From this data you can calculate some interesting figures: - -#### Participation Rate - -`previous_epoch_attesting_gwei / previous_epoch_active_gwei` - -Expresses the ratio of validators that managed to have an attestation -voting upon the previous epoch included in a block. - -#### Justification/Finalization Rate - -`previous_epoch_target_attesting_gwei / previous_epoch_active_gwei` - -When this value is greater than or equal to `2/3` it is possible that the -beacon chain may justify and/or finalize the epoch. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/consensus/global_votes` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -Requires the `epoch` (`Epoch`) query parameter to determine which epoch will be -considered the current epoch. - -### Returns - -A report on global validator voting participation. - -### Example - -```json -{ - "current_epoch_active_gwei": 52377600000000, - "previous_epoch_active_gwei": 52377600000000, - "current_epoch_attesting_gwei": 50740900000000, - "current_epoch_target_attesting_gwei": 49526000000000, - "previous_epoch_attesting_gwei": 52377600000000, - "previous_epoch_target_attesting_gwei": 51063400000000, - "previous_epoch_head_attesting_gwei": 9248600000000 -} -``` - -## `/consensus/individual_votes` - -Returns a per-validator summary of how that validator performed during the -current epoch. - -The [Global Votes](#consensusglobal_votes) endpoint is the summation of all of these -individual values, please see it for definitions of terms like "current_epoch", -"previous_epoch" and "target_attester". - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/consensus/individual_votes` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -### Returns - -A report on the validators voting participation. - -### Example - -#### Request Body - -```json -{ - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "epoch": 1203, - "pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "vote": { - "is_slashed": false, - "is_withdrawable_in_current_epoch": false, - "is_active_in_current_epoch": true, - "is_active_in_previous_epoch": true, - "current_epoch_effective_balance_gwei": 3200000000, - "is_current_epoch_attester": true, - "is_current_epoch_target_attester": true, - "is_previous_epoch_attester": true, - "is_previous_epoch_target_attester": true, - "is_previous_epoch_head_attester": false - } - }, - { - "epoch": 1203, - "pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "vote": null - } -] -``` +# /consensus diff --git a/book/src/http_network.md b/book/src/http_network.md index 26e007d52..e224ebc3a 100644 --- a/book/src/http_network.md +++ b/book/src/http_network.md @@ -1,148 +1 @@ -# Lighthouse REST API: `/network` - -The `/network` endpoints provide information about the p2p network that -Lighthouse uses to communicate with other beacon nodes. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/network/peer_id`](#networkpeer_id) | Get a node's libp2p `PeerId`. -[`/network/peer_count`](#networkpeer_count) | Get the count of connected peers. -[`/network/peers`](#networkpeers) | List a node's libp2p peers (as `PeerIds`). -[`/network/enr`](#networkenr) | Get a node's discovery `ENR` address. -[`/network/listen_port`](#networklisten_port) | Get a node's libp2p listening port. -[`/network/listen_addresses`](#networklisten_addresses) | Get a list of libp2p multiaddr the node is listening on. - -## `/network/peer_id` - -Requests the beacon node's local `PeerId`. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_id` -Method | GET -JSON Encoding | String (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"QmVFcULBYZecPdCKgGmpEYDqJLqvMecfhJadVBtB371Avd" -``` - -## `/network/peer_count` - -Requests the count of peers connected to the client. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peer_count` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -5 -``` - -## `/network/peers` - -Requests one `MultiAddr` for each peer connected to the beacon node. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/peers` -Method | GET -JSON Encoding | [String] (base58) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "QmaPGeXcfKFMU13d8VgbnnpeTxcvoFoD9bUpnRGMUJ1L9w", - "QmZt47cP8V96MgiS35WzHKpPbKVBMqr1eoBNTLhQPqpP3m" -] -``` - -## `network/enr` - -Requests the beacon node for its listening `ENR` address. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/enr` -Method | GET -JSON Encoding | String (base64) -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -"-IW4QPYyGkXJSuJ2Eji8b-m4PTNrW4YMdBsNOBrYAdCk8NLMJcddAiQlpcv6G_hdNjiLACOPTkqTBhUjnC0wtIIhyQkEgmlwhKwqAPqDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhA1sBKo0yCfw4Z_jbggwflNfftjwKACu-a-CoFAQHJnrm" -``` - -## `/network/listen_port` - -Requests the TCP port that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_port` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -9000 -``` - -## `/network/listen_addresses` - -Requests the list of multiaddr that the client's libp2p service is listening on. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/network/listen_addresses` -Method | GET -JSON Encoding | Array -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -[ - "/ip4/127.0.0.1/tcp/9000", - "/ip4/192.168.31.115/tcp/9000", - "/ip4/172.24.0.1/tcp/9000", - "/ip4/172.21.0.1/tcp/9000", - "/ip4/172.17.0.1/tcp/9000", - "/ip4/172.18.0.1/tcp/9000", - "/ip4/172.19.0.1/tcp/9000", - "/ip4/172.42.0.1/tcp/9000", - "/ip6/::1/tcp/9000" -] -``` \ No newline at end of file +# /network diff --git a/book/src/http_spec.md b/book/src/http_spec.md index 9fb8c0c98..dc94c2005 100644 --- a/book/src/http_spec.md +++ b/book/src/http_spec.md @@ -1,154 +1 @@ -# Lighthouse REST API: `/spec` - -The `/spec` endpoints provide information about Eth2.0 specifications that the node is running. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/spec`](#spec) | Get the full spec object that a node's running. -[`/spec/slots_per_epoch`](#specslots_per_epoch) | Get the number of slots per epoch. -[`/spec/eth2_config`](#specseth2_config) | Get the full Eth2 config object. - -## `/spec` - -Requests the full spec object that a node's running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "min_genesis_delay": 86400, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "persistent_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 -} -``` - -## `/spec/eth2_config` - -Requests the full `Eth2Config` object. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/eth2_config` -Method | GET -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -{ - "spec_constants": "mainnet", - "spec": { - "genesis_slot": 0, - "base_rewards_per_epoch": 4, - "deposit_contract_tree_depth": 32, - "max_committees_per_slot": 64, - "target_committee_size": 128, - "min_per_epoch_churn_limit": 4, - "churn_limit_quotient": 65536, - "shuffle_round_count": 90, - "min_genesis_active_validator_count": 16384, - "min_genesis_time": 1578009600, - "min_deposit_amount": 1000000000, - "max_effective_balance": 32000000000, - "ejection_balance": 16000000000, - "effective_balance_increment": 1000000000, - "genesis_fork_version": "0x00000000", - "bls_withdrawal_prefix_byte": "0x00", - "min_genesis_delay": 86400, - "milliseconds_per_slot": 12000, - "min_attestation_inclusion_delay": 1, - "min_seed_lookahead": 1, - "max_seed_lookahead": 4, - "min_epochs_to_inactivity_penalty": 4, - "min_validator_withdrawability_delay": 256, - "persistent_committee_period": 2048, - "base_reward_factor": 64, - "whistleblower_reward_quotient": 512, - "proposer_reward_quotient": 8, - "inactivity_penalty_quotient": 33554432, - "min_slashing_penalty_quotient": 32, - "domain_beacon_proposer": 0, - "domain_beacon_attester": 1, - "domain_randao": 2, - "domain_deposit": 3, - "domain_voluntary_exit": 4, - "safe_slots_to_update_justified": 8, - "eth1_follow_distance": 1024, - "seconds_per_eth1_block": 14, - "boot_nodes": [], - "network_id": 1 - } -} -``` - -## `/spec/slots_per_epoch` - -Requests the `SLOTS_PER_EPOCH` parameter from the specs that the node is running. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/spec/slots_per_epoch` -Method | GET -JSON Encoding | Number -Query Parameters | None -Typical Responses | 200 - -### Example Response - -```json -32 -``` \ No newline at end of file +# /spec diff --git a/book/src/http_validator.md b/book/src/http_validator.md index da0d4bbbd..29c0bac2e 100644 --- a/book/src/http_validator.md +++ b/book/src/http_validator.md @@ -1,390 +1 @@ -# Lighthouse REST API: `/validator` - -The `/validator` endpoints provide the minimum functionality required for a validator -client to connect to the beacon node and produce blocks and attestations. - -## Endpoints - -HTTP Path | Description | -| --- | -- | -[`/validator/duties`](#validatorduties) | Provides block and attestation production information for validators. -[`/validator/duties/all`](#validatordutiesall) | Provides block and attestation production information for all validators. -[`/validator/duties/active`](#validatordutiesactive) | Provides block and attestation production information for all active validators. -[`/validator/block`](#validatorblock) | Produces a `BeaconBlock` object from current state. -[`/validator/attestation`](#validatorattestation) | Produces an unsigned `Attestation` object from current state. -[`/validator/block`](#validatorblock) | Processes a `SignedBeaconBlock` object and publishes it to the network. -[`/validator/attestation`](#validatorattestation) | Processes a signed `Attestation` and publishes it to the network. - - -## `/validator/duties` - -Request information about when a validator must produce blocks and attestations -at some given `epoch`. The information returned always refers to the canonical -chain and the same input parameters may yield different results after a re-org. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200 - -### Request Body - -Expects the following object in the POST request body: - -``` -{ - epoch: Epoch, - pubkeys: [PublicKey] -} -``` - -Duties are assigned on a per-epoch basis, all duties returned will contain -slots that are inside the given `epoch`. A set of duties will be returned for -each of the `pubkeys`. - -Validators who are not known to the beacon chain (e.g., have not yet deposited) -will have `null` values for most fields. - - -### Returns - -A set of duties for each given pubkey. - -### Example - -#### Request Body - -```json -{ - "epoch": 1203, - "pubkeys": [ - "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42" - ] -} -``` - -_Note: for demonstration purposes the second pubkey is some unknown pubkey._ - -#### Response Body - -```json -[ - { - "validator_pubkey": "0x98f87bc7c8fa10408425bbeeeb3dc387e3e0b4bd92f57775b60b39156a16f9ec80b273a64269332d97bdb7d93ae05a16", - "validator_index": 14935, - "attestation_slot": 38511, - "attestation_committee_index": 3, - "attestation_committee_position": 39, - "block_proposal_slots": [] - }, - { - "validator_pubkey": "0x42f87bc7c8fa10408425bbeeeb3dc3874242b4bd92f57775b60b39142426f9ec80b273a64269332d97bdb7d93ae05a42", - "validator_index": null, - "attestation_slot": null, - "attestation_committee_index": null, - "attestation_committee_position": null, - "block_proposal_slots": [] - } -] -``` - -## `/validator/duties/all` - -Returns the duties for all validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys. - -Considering that duties for non-active validators will just be `null`, it is -generally more efficient to query using [Active Validator -Duties](#active-validator-duties). - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/all` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/duties/active` - -Returns the duties for all active validators, equivalent to calling [Validator -Duties](#validator-duties) while providing all known validator public keys that -are active in the given epoch. - -This endpoint will only return validators that were in the beacon state -in the given epoch. For example, if the query epoch is 10 and some validator -deposit was included in epoch 11, that validator will not be included in the -result. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/duties/active` -Method | GET -JSON Encoding | Object -Query Parameters | `epoch` -Typical Responses | 200 - -### Parameters - -The duties returned will all be inside the given `epoch` (`Epoch`) query -parameter. This parameter is required. - -### Returns - -The return format is identical to the [Validator Duties](#validator-duties) response body. - -## `/validator/block` - -Produces and returns an unsigned `BeaconBlock` object. - -The block will be produced with the given `slot` and the parent block will be the -highest block in the canonical chain that has a slot less than `slot`. The -block will still be produced if some other block is also known to be at `slot` -(i.e., it may produce a block that would be slashable if signed). - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `randao_reveal` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the block is to be produced. -- `randao_reveal` (`Signature`): 96 bytes `Signature` for the randomness. - - -### Returns - -Returns a `BeaconBlock` object. - -#### Response Body - -```json -{ - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [], - "attester_slashings": [], - "attestations": [], - "deposits": [], - "voluntary_exits": [] - } -} -``` - -## `/validator/attestation` - -Produces and returns an unsigned `Attestation` from the current state. - -The attestation will reference the `beacon_block_root` of the highest block in -the canonical chain with a slot equal to or less than the given `slot`. - -An error will be returned if the given slot is more than -`SLOTS_PER_HISTORICAL_VECTOR` slots behind the current head block. - -This endpoint is not protected against slashing. Signing the returned -attestation may result in a slashable offence. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestation` -Method | GET -JSON Encoding | Object -Query Parameters | `slot`, `committee_index` -Typical Responses | 200 - -### Parameters - - -- `slot` (`Slot`): The slot number for which the attestation is to be produced. -- `committee_index` (`CommitteeIndex`): The index of the committee that makes the attestation. - - -### Returns - -Returns a `Attestation` object with a default signature. The `signature` field should be replaced by the valid signature. - -#### Response Body - -```json -{ - "aggregation_bits": "0x01", - "data": { - "slot": 100, - "index": 0, - "beacon_block_root": "0xf22e4ec281136d119eabcd4d9d248aeacd042eb63d8d7642f73ad3e71f1c9283", - "source": { - "epoch": 2, - "root": "0x34c1244535c923f08e7f83170d41a076e4f1ec61013846b3a615a1d109d3c329" - }, - "target": { - "epoch": 3, - "root": "0xaefd23b384994dc0c1a6b77836bdb2f24f209ebfe6c4819324d9685f4a43b4e1" - } - }, - "signature": "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" -} -``` - -## `/validator/block` - -Accepts a `SignedBeaconBlock` for verification. If it is valid, it will be -imported into the local database and published on the network. Invalid blocks -will not be published to the network. - -A block may be considered invalid because it is fundamentally incorrect, or its -parent has not yet been imported. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/block` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded `SignedBeaconBlock` in the POST request body: - -### Returns - -Returns a null object if the block passed all block validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "message": { - "slot": 33, - "parent_root": "0xf54de54bd33e33aee4706cffff4bd991bcbf522f2551ab007180479c63f4fe912", - "state_root": "0x615c887bad27bc05754d627d941e1730e1b4c77b2eb4378c195ac8a8203bbf26", - "body": { - "randao_reveal": "0x8d7b2a32b026e9c79aae6ec6b83eabae89d60cacd65ac41ed7d2f4be9dd8c89c1bf7cd3d700374e18d03d12f6a054c23006f64f0e4e8b7cf37d6ac9a4c7d815c858120c54673b7d3cb2bb1550a4d659eaf46e34515677c678b70d6f62dbf89f", - "eth1_data": { - "deposit_root": "0x66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925", - "deposit_count": 8, - "block_hash": "0x2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e" - }, - "graffiti": "0x736967702f6c69676874686f7573652d302e312e312d7076572656c65617365", - "proposer_slashings": [ - - ], - "attester_slashings": [ - - ], - "attestations": [ - - ], - "deposits": [ - - ], - "voluntary_exits": [ - - ] - } - }, - "signature": "0x965ced900dbabd0a78b81a0abb5d03407be0d38762104316416347f2ea6f82652b5759396f402e85df8ee18ba2c60145037c73b1c335f4272f1751a1cd89862b7b4937c035e350d0108554bd4a8930437ec3311c801a65fe8e5ba022689b5c24" -} -``` - -## `/validator/attestation` - -Accepts an `Attestation` for verification. If it is valid, it will be imported -into the local database and published to the network. Invalid attestations will -not be published to the network. - -An attestation may be considered invalid because it is fundamentally incorrect -or because the beacon node has not imported the relevant blocks required to -verify it. - -### HTTP Specification - -| Property | Specification | -| --- |--- | -Path | `/validator/attestation` -Method | POST -JSON Encoding | Object -Query Parameters | None -Typical Responses | 200/202 - - -### Request Body - -Expects a JSON encoded signed`Attestation` object in the POST request body: - -### Returns - -Returns a null object if the attestation passed all validation and is published to the network. -Else, returns a processing error description. - -### Example - -### Request Body - -```json -{ - "aggregation_bits": "0x03", - "data": { - "slot": 3, - "index": 0, - "beacon_block_root": "0x0b6a1f7a9baa38d00ef079ba861b7587662565ca2502fb9901741c1feb8bb3c9", - "source": { - "epoch": 0, - "root": "0x0000000000000000000000000000000000000000000000000000000000000000" - }, - "target": { - "epoch": 0, - "root": "0xad2c360ab8c8523db278a7d7ced22f3810800f2fdc282defb6db216689d376bd" - } - }, - "signature": "0xb76a1768c18615b5ade91a92e7d2ed0294f7e088e56e30fbe7e3aa6799c443b11bccadd578ca2cbd95d395ab689b9e4d03c88a56641791ab38dfa95dc1f4d24d1b19b9d36c96c20147ad03$649bd3c6c7e8a39cf2ffb99e07b4964d52854559f" -} -``` +# /validator diff --git a/book/src/local-testnets.md b/book/src/local-testnets.md index 1c269a66a..3b15cefdd 100644 --- a/book/src/local-testnets.md +++ b/book/src/local-testnets.md @@ -18,9 +18,10 @@ TL;DR isn't adequate. ## TL;DR ```bash +make install-lcli lcli new-testnet lcli interop-genesis 128 -lighthouse bn --testnet-dir ~/.lighthouse/testnet --dummy-eth1 --http +lighthouse bn --testnet-dir ~/.lighthouse/testnet --dummy-eth1 --http --enr-match lighthouse vc --testnet-dir ~/.lighthouse/testnet --allow-unsynced testnet insecure 0 128 ``` @@ -40,7 +41,7 @@ used for starting testnets and debugging. Install `lcli` from the root directory of this repository with: ```bash -cargo install --path lcli --force +make install-lcli ``` ### 1.2 Create a testnet directory @@ -55,7 +56,7 @@ Once you have `lcli` installed, create a new testnet directory with: lcli new-testnet ``` -> - This will create a "mainnet" spec testnet. To create a minimal spec use `lcli --spec minim new-testnet`. +> - This will create a "mainnet" spec testnet. To create a minimal spec use `lcli --spec minimal new-testnet`. > - The `lcli new-testnet` command has many options, use `lcli new-testnet --help` to see them. ### 1.3 Create a genesis state @@ -83,12 +84,13 @@ start a beacon node and validator client. Start a beacon node: ```bash -lighthouse bn --testnet-dir ~/.lighthouse/testnet --dummy-eth1 --http +lighthouse bn --testnet-dir ~/.lighthouse/testnet --dummy-eth1 --http --enr-match ``` > - `--testnet-dir` instructs the beacon node to use the spec we generated earlier. > - `--dummy-eth1` uses deterministic "junk data" for linking to the eth1 chain, avoiding the requirement for an eth1 node. The downside is that new validators cannot be on-boarded after genesis. > - `--http` starts the REST API so the validator client can produce blocks. +> - `--enr-match` sets the local ENR to use the local IP address and port which allows other nodes to connect. This node can then behave as a bootnode for other nodes. ### 2.2 Start a validator client @@ -104,3 +106,48 @@ lighthouse vc --testnet-dir ~/.lighthouse/testnet --allow-unsynced testnet insec > - `testnet insecure 0 128` instructs the validator client to use insecure > testnet private keys and that it should control validators from `0` to > `127` (inclusive). + +## 3. Connect other nodes + +Other nodes can now join this local testnet. + +The initial node will output the ENR on boot. The ENR can also be obtained via +the http: +```bash +curl localhost:5052/network/enr +``` +or from it's default directory: +``` +~/.lighthouse/beacon/network/enr.dat +``` + +Once the ENR of the first node is obtained, another nodes may connect and +participate in the local network. Simply run: + +```bash +lighthouse bn --testnet-dir ~/.lighthouse/testnet --dummy-eth1 --http --http-port 5053 --port 9002 --boot-nodes +``` + +> - `--testnet-dir` instructs the beacon node to use the spec we generated earlier. +> - `--dummy-eth1` uses deterministic "junk data" for linking to the eth1 chain, avoiding the requirement for an eth1 node. The downside is that new validators cannot be on-boarded after genesis. +> - `--http` starts the REST API so the validator client can produce blocks. +> - `--http-port` sets the REST API port to a non-standard port to avoid conflicts with the first local node. +> - `--port` sets the ports of the lighthouse client to a non-standard value to avoid conflicts with the original node. +> - `--boot-nodes` provides the ENR of the original node to connect to. Note all nodes can use this ENR and should discover each other automatically via the discv5 discovery. + +Note: The `--enr-match` is only required for the boot node. The local ENR of +all subsequent nodes will update automatically. + + +This node should now connect to the original node, sync and follow it's head. + +## 4. Updating genesis time + +To re-use a testnet directory one may simply update the genesis time and repeat +the process. + +To update the genesis time of a `genesis.ssz` file, use the following command: + +```bash +$ lcli change-genesis-time ~/.lighthouse/testnet/genesis.ssz $(date +%s) +``` diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md new file mode 100644 index 000000000..9d9933a99 --- /dev/null +++ b/book/src/simple-testnet.md @@ -0,0 +1 @@ +# Simple Local Testnet diff --git a/eth2/proto_array_fork_choice/Cargo.toml b/eth2/proto_array_fork_choice/Cargo.toml index 687b6aefc..f17515acc 100644 --- a/eth2/proto_array_fork_choice/Cargo.toml +++ b/eth2/proto_array_fork_choice/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "proto_array_fork_choice" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index ccd198436..6387a59ae 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "state_processing" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/state_processing/src/common/initiate_validator_exit.rs b/eth2/state_processing/src/common/initiate_validator_exit.rs index 411734bfa..328aa1978 100644 --- a/eth2/state_processing/src/common/initiate_validator_exit.rs +++ b/eth2/state_processing/src/common/initiate_validator_exit.rs @@ -10,7 +10,7 @@ pub fn initiate_validator_exit( spec: &ChainSpec, ) -> Result<(), Error> { if index >= state.validators.len() { - return Err(Error::UnknownValidator); + return Err(Error::UnknownValidator(index as u64)); } // Return if the validator already initiated exit diff --git a/eth2/state_processing/src/common/slash_validator.rs b/eth2/state_processing/src/common/slash_validator.rs index 769289506..9c6731535 100644 --- a/eth2/state_processing/src/common/slash_validator.rs +++ b/eth2/state_processing/src/common/slash_validator.rs @@ -13,7 +13,7 @@ pub fn slash_validator( spec: &ChainSpec, ) -> Result<(), Error> { if slashed_index >= state.validators.len() || slashed_index >= state.balances.len() { - return Err(BeaconStateError::UnknownValidator); + return Err(BeaconStateError::UnknownValidator(slashed_index as u64)); } let epoch = state.current_epoch(); diff --git a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs index 30b057f53..a3d824cbd 100644 --- a/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/eth2/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -1,10 +1,9 @@ #![allow(clippy::integer_arithmetic)] use super::signature_sets::{Error as SignatureSetError, Result as SignatureSetResult, *}; - use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; -use bls::{verify_signature_sets, G1Point, SignatureSet}; +use bls::{verify_signature_sets, SignatureSet}; use rayon::prelude::*; use std::borrow::Cow; use types::{ @@ -12,6 +11,8 @@ use types::{ SignedBeaconBlock, }; +pub use bls::G1Point; + pub type Result = std::result::Result; #[derive(Debug, PartialEq)] @@ -54,7 +55,6 @@ where T: EthSpec, F: Fn(usize) -> Option> + Clone, { - block: &'a SignedBeaconBlock, get_pubkey: F, state: &'a BeaconState, spec: &'a ChainSpec, @@ -68,15 +68,9 @@ where { /// Create a new verifier without any included signatures. See the `include...` functions to /// add signatures, and the `verify` - pub fn new( - state: &'a BeaconState, - get_pubkey: F, - block: &'a SignedBeaconBlock, - spec: &'a ChainSpec, - ) -> Self { + pub fn new(state: &'a BeaconState, get_pubkey: F, spec: &'a ChainSpec) -> Self { Self { - block, - get_pubkey, + get_pubkey: get_pubkey, state, spec, sets: vec![], @@ -97,18 +91,8 @@ where block_root: Option, spec: &'a ChainSpec, ) -> Result<()> { - let mut verifier = Self::new(state, get_pubkey, block, spec); - - verifier.include_block_proposal(block_root)?; - verifier.include_randao_reveal()?; - verifier.include_proposer_slashings()?; - verifier.include_attester_slashings()?; - verifier.include_attestations()?; - /* - * Deposits are not included because they can legally have invalid signatures. - */ - verifier.include_exits()?; - + let mut verifier = Self::new(state, get_pubkey, spec); + verifier.include_all_signatures(block, block_root)?; verifier.verify() } @@ -142,12 +126,49 @@ where } } + /// Includes all signatures on the block (except the deposit signatures) for verification. + pub fn include_all_signatures( + &mut self, + block: &'a SignedBeaconBlock, + block_root: Option, + ) -> Result<()> { + self.include_block_proposal(block, block_root)?; + self.include_randao_reveal(block)?; + self.include_proposer_slashings(block)?; + self.include_attester_slashings(block)?; + self.include_attestations(block)?; + // Deposits are not included because they can legally have invalid signatures. + self.include_exits(block)?; + + Ok(()) + } + + /// Includes all signatures on the block (except the deposit signatures and the proposal + /// signature) for verification. + pub fn include_all_signatures_except_proposal( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result<()> { + self.include_randao_reveal(block)?; + self.include_proposer_slashings(block)?; + self.include_attester_slashings(block)?; + self.include_attestations(block)?; + // Deposits are not included because they can legally have invalid signatures. + self.include_exits(block)?; + + Ok(()) + } + /// Includes the block signature for `self.block` for verification. - fn include_block_proposal(&mut self, block_root: Option) -> Result<()> { + pub fn include_block_proposal( + &mut self, + block: &'a SignedBeaconBlock, + block_root: Option, + ) -> Result<()> { let set = block_proposal_signature_set( self.state, self.get_pubkey.clone(), - self.block, + block, block_root, self.spec, )?; @@ -156,11 +177,11 @@ where } /// Includes the randao signature for `self.block` for verification. - fn include_randao_reveal(&mut self) -> Result<()> { + pub fn include_randao_reveal(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { let set = randao_signature_set( self.state, self.get_pubkey.clone(), - &self.block.message, + &block.message, self.spec, )?; self.sets.push(set); @@ -168,9 +189,8 @@ where } /// Includes all signatures in `self.block.body.proposer_slashings` for verification. - fn include_proposer_slashings(&mut self) -> Result<()> { - let mut sets: Vec = self - .block + pub fn include_proposer_slashings(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + let mut sets: Vec = block .message .body .proposer_slashings @@ -194,8 +214,8 @@ where } /// Includes all signatures in `self.block.body.attester_slashings` for verification. - fn include_attester_slashings(&mut self) -> Result<()> { - self.block + pub fn include_attester_slashings(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + block .message .body .attester_slashings @@ -216,8 +236,11 @@ where } /// Includes all signatures in `self.block.body.attestations` for verification. - fn include_attestations(&mut self) -> Result>> { - self.block + pub fn include_attestations( + &mut self, + block: &'a SignedBeaconBlock, + ) -> Result>> { + block .message .body .attestations @@ -244,9 +267,8 @@ where } /// Includes all signatures in `self.block.body.voluntary_exits` for verification. - fn include_exits(&mut self) -> Result<()> { - let mut sets = self - .block + pub fn include_exits(&mut self, block: &'a SignedBeaconBlock) -> Result<()> { + let mut sets = block .message .body .voluntary_exits diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 83e2a6ac2..514699ee6 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -263,6 +263,8 @@ pub enum AttestationInvalid { committee_len: usize, bitfield_len: usize, }, + /// The attestation was not disjoint compared to already seen attestations. + NotDisjoint, /// The validator index was unknown. UnknownValidator(u64), /// The attestation signature verification failed. diff --git a/eth2/state_processing/src/per_block_processing/signature_sets.rs b/eth2/state_processing/src/per_block_processing/signature_sets.rs index cd5f4f01f..8a58cebd5 100644 --- a/eth2/state_processing/src/per_block_processing/signature_sets.rs +++ b/eth2/state_processing/src/per_block_processing/signature_sets.rs @@ -117,7 +117,7 @@ where state.genesis_validators_root, ); - let message = state.current_epoch().signing_root(domain); + let message = block.slot.epoch(T::slots_per_epoch()).signing_root(domain); Ok(SignatureSet::single( &block.body.randao_reveal, diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index c442a8600..48c98a67f 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "types" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2018" diff --git a/eth2/types/src/aggregate_and_proof.rs b/eth2/types/src/aggregate_and_proof.rs new file mode 100644 index 000000000..f5376c06e --- /dev/null +++ b/eth2/types/src/aggregate_and_proof.rs @@ -0,0 +1,74 @@ +use super::{ + Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, SelectionProof, + Signature, SignedRoot, +}; +use crate::test_utils::TestRandom; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A Validators aggregate attestation and selection proof. +/// +/// Spec v0.10.1 +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[serde(bound = "T: EthSpec")] +pub struct AggregateAndProof { + /// The index of the validator that created the attestation. + pub aggregator_index: u64, + /// The aggregate attestation. + pub aggregate: Attestation, + /// A proof provided by the validator that permits them to publish on the + /// `beacon_aggregate_and_proof` gossipsub topic. + pub selection_proof: Signature, +} + +impl AggregateAndProof { + /// Produces a new `AggregateAndProof` with a `selection_proof` generated by signing + /// `aggregate.data.slot` with `secret_key`. + pub fn from_aggregate( + aggregator_index: u64, + aggregate: Attestation, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let selection_proof = SelectionProof::new::( + aggregate.data.slot, + secret_key, + fork, + genesis_validators_root, + spec, + ) + .into(); + + Self { + aggregator_index, + aggregate, + selection_proof, + } + } + + /// Returns `true` if `validator_pubkey` signed over `self.aggregate.data.slot`. + pub fn is_valid_selection_proof( + &self, + validator_pubkey: &PublicKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> bool { + let target_epoch = self.aggregate.data.slot.epoch(T::slots_per_epoch()); + let domain = spec.get_domain( + target_epoch, + Domain::SelectionProof, + fork, + genesis_validators_root, + ); + let message = self.aggregate.data.slot.signing_root(domain); + self.selection_proof + .verify(message.as_bytes(), validator_pubkey) + } +} + +impl SignedRoot for AggregateAndProof {} diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 6372bb784..34c11f26f 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -1,8 +1,9 @@ use super::{ AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - Signature, SignedRoot, + Signature, SignedRoot, SubnetId, }; use crate::{test_utils::TestRandom, Hash256}; +use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -13,6 +14,7 @@ use tree_hash_derive::TreeHash; pub enum Error { SszTypesError(ssz_types::Error), AlreadySigned(usize), + SubnetCountIsZero(ArithError), } /// Details an attestation that can be slashable. @@ -81,6 +83,18 @@ impl Attestation { Ok(()) } } + + /// Returns the subnet id associated with the attestation. + /// + /// Note, this will return the subnet id for an aggregated attestation. This is done + /// to avoid checking aggregate bits every time we wish to get an id. + pub fn subnet_id(&self, spec: &ChainSpec) -> Result { + self.data + .index + .safe_rem(spec.attestation_subnet_count) + .map(SubnetId::new) + .map_err(Error::SubnetCountIsZero) + } } #[cfg(test)] diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index fc28732de..15d9920f7 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -10,7 +10,18 @@ use tree_hash_derive::TreeHash; /// /// Spec v0.11.1 #[derive( - Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, + Debug, + Clone, + PartialEq, + Eq, + Serialize, + Deserialize, + Hash, + Encode, + Decode, + TreeHash, + TestRandom, + Default, )] pub struct AttestationData { pub slot: Slot, diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 5c8efa85d..e2fed9143 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -50,6 +50,100 @@ impl BeaconBlock { } } + /// Return a block where the block has the max possible operations. + pub fn full(spec: &ChainSpec) -> BeaconBlock { + let header = BeaconBlockHeader { + slot: Slot::new(1), + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body_root: Hash256::zero(), + }; + + let signed_header = SignedBeaconBlockHeader { + message: header, + signature: Signature::empty_signature(), + }; + let indexed_attestation: IndexedAttestation = IndexedAttestation { + attesting_indices: VariableList::new(vec![ + 0 as u64; + T::MaxValidatorsPerCommittee::to_usize() + ]) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::new(), + }; + + let deposit_data = DepositData { + pubkey: PublicKeyBytes::empty(), + withdrawal_credentials: Hash256::zero(), + amount: 0, + signature: SignatureBytes::empty(), + }; + let proposer_slashing = ProposerSlashing { + signed_header_1: signed_header.clone(), + signed_header_2: signed_header.clone(), + }; + + let attester_slashing = AttesterSlashing { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation.clone(), + }; + + let attestation: Attestation = Attestation { + aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::new(), + }; + + let deposit = Deposit { + proof: FixedVector::from_elem(Hash256::zero()), + data: deposit_data, + }; + + let voluntary_exit = VoluntaryExit { + epoch: Epoch::new(1), + validator_index: 1, + }; + + let signed_voluntary_exit = SignedVoluntaryExit { + message: voluntary_exit, + signature: Signature::empty_signature(), + }; + + let mut block: BeaconBlock = BeaconBlock::empty(spec); + for _ in 0..T::MaxProposerSlashings::to_usize() { + block + .body + .proposer_slashings + .push(proposer_slashing.clone()) + .unwrap(); + } + for _ in 0..T::MaxDeposits::to_usize() { + block.body.deposits.push(deposit.clone()).unwrap(); + } + for _ in 0..T::MaxVoluntaryExits::to_usize() { + block + .body + .voluntary_exits + .push(signed_voluntary_exit.clone()) + .unwrap(); + } + for _ in 0..T::MaxAttesterSlashings::to_usize() { + block + .body + .attester_slashings + .push(attester_slashing.clone()) + .unwrap(); + } + + for _ in 0..T::MaxAttestations::to_usize() { + block.body.attestations.push(attestation.clone()).unwrap(); + } + block + } + /// Returns the epoch corresponding to `self.slot`. pub fn epoch(&self) -> Epoch { self.slot.epoch(T::slots_per_epoch()) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 1f8e9145d..4f980a019 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -12,6 +12,7 @@ use serde_derive::{Deserialize, Serialize}; use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; +use std::convert::TryInto; use std::fmt; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; @@ -38,7 +39,7 @@ const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; pub enum Error { EpochOutOfBounds, SlotOutOfBounds, - UnknownValidator, + UnknownValidator(u64), UnableToDetermineProducer, InvalidBitfield, ValidatorIsWithdrawable, @@ -373,6 +374,7 @@ impl BeaconState { /// /// Spec v0.11.1 pub fn get_active_validator_indices(&self, epoch: Epoch) -> Vec { + // FIXME(sproul): put a bounds check on here based on the maximum lookahead get_active_validator_indices(&self.validators, epoch) } @@ -470,6 +472,31 @@ impl BeaconState { } } + /// Return `true` if the validator who produced `slot_signature` is eligible to aggregate. + /// + /// Spec v0.10.1 + pub fn is_aggregator( + &self, + slot: Slot, + index: CommitteeIndex, + slot_signature: &Signature, + spec: &ChainSpec, + ) -> Result { + let committee = self.get_beacon_committee(slot, index)?; + let modulo = std::cmp::max( + 1, + (committee.committee.len() as u64).safe_div(spec.target_aggregators_per_committee)?, + ); + let signature_hash = hash(&slot_signature.as_bytes()); + let signature_hash_int = u64::from_le_bytes( + signature_hash[0..8] + .try_into() + .expect("first 8 bytes of signature should always convert to fixed array"), + ); + + Ok(signature_hash_int.safe_rem(modulo)? == 0) + } + /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. /// /// Spec v0.11.1 @@ -765,7 +792,7 @@ impl BeaconState { self.validators .get(validator_index) .map(|v| v.effective_balance) - .ok_or_else(|| Error::UnknownValidator) + .ok_or_else(|| Error::UnknownValidator(validator_index as u64)) } /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. @@ -790,7 +817,7 @@ impl BeaconState { )) } - /// Returns the `slot`, `index` and `committee_position` for which a validator must produce an + /// Returns the `slot`, `index`, `committee_position` and `committee_len` for which a validator must produce an /// attestation. /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index cd7e603ef..5fafddeb2 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -114,11 +114,43 @@ pub struct ChainSpec { pub eth1_follow_distance: u64, pub seconds_per_eth1_block: u64, + /* + * Networking + */ pub boot_nodes: Vec, pub network_id: u8, + pub attestation_propagation_slot_range: u64, + pub maximum_gossip_clock_disparity_millis: u64, + pub target_aggregators_per_committee: u64, + pub attestation_subnet_count: u64, + pub random_subnets_per_validator: u64, + pub epochs_per_random_subnet_subscription: u64, } impl ChainSpec { + /// Returns an `EnrForkId` for the given `slot`. + /// + /// Presently, we don't have any forks so we just ignore the slot. In the future this function + /// may return something different based upon the slot. + pub fn enr_fork_id(&self, _slot: Slot, genesis_validators_root: Hash256) -> EnrForkId { + EnrForkId { + fork_digest: Self::compute_fork_digest( + self.genesis_fork_version, + genesis_validators_root, + ), + next_fork_version: self.genesis_fork_version, + next_fork_epoch: self.far_future_epoch, + } + } + + /// Returns the epoch of the next scheduled change in the `fork.current_version`. + /// + /// There are no future forks scheduled so this function always returns `None`. This may not + /// always be the case in the future, though. + pub fn next_fork_epoch(&self) -> Option { + None + } + /// Get the domain number, unmodified by the fork. /// /// Spec v0.11.1 @@ -297,6 +329,12 @@ impl ChainSpec { */ boot_nodes: vec![], network_id: 1, // mainnet network id + attestation_propagation_slot_range: 32, + attestation_subnet_count: 64, + random_subnets_per_validator: 1, + maximum_gossip_clock_disparity_millis: 500, + target_aggregators_per_committee: 16, + epochs_per_random_subnet_subscription: 256, } } diff --git a/eth2/types/src/enr_fork_id.rs b/eth2/types/src/enr_fork_id.rs new file mode 100644 index 000000000..f1c5ac6bf --- /dev/null +++ b/eth2/types/src/enr_fork_id.rs @@ -0,0 +1,36 @@ +use crate::test_utils::TestRandom; +use crate::utils::{fork_from_hex_str, fork_to_hex_str}; +use crate::Epoch; + +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// Specifies a fork which allows nodes to identify each other on the network. This fork is used in +/// a nodes local ENR. +/// +/// Spec v0.11 +#[derive( + Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, +)] +pub struct EnrForkId { + #[serde( + serialize_with = "fork_to_hex_str", + deserialize_with = "fork_from_hex_str" + )] + pub fork_digest: [u8; 4], + #[serde( + serialize_with = "fork_to_hex_str", + deserialize_with = "fork_from_hex_str" + )] + pub next_fork_version: [u8; 4], + pub next_fork_epoch: Epoch, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(EnrForkId); +} diff --git a/eth2/types/src/eth_spec.rs b/eth2/types/src/eth_spec.rs index a4883551a..18ee3e5a9 100644 --- a/eth2/types/src/eth_spec.rs +++ b/eth2/types/src/eth_spec.rs @@ -13,6 +13,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { */ type GenesisEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; type JustificationBitsLength: Unsigned + Clone + Sync + Send + Debug + PartialEq + Default; + type SubnetBitfieldLength: Unsigned + Clone + Sync + Send + Debug + PartialEq + Default; /* * Misc */ @@ -135,6 +136,7 @@ pub struct MainnetEthSpec; impl EthSpec for MainnetEthSpec { type JustificationBitsLength = U4; + type SubnetBitfieldLength = U64; type MaxValidatorsPerCommittee = U2048; type GenesisEpoch = U0; type SlotsPerEpoch = U32; @@ -176,6 +178,7 @@ impl EthSpec for MinimalEthSpec { params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, + SubnetBitfieldLength, MaxValidatorsPerCommittee, GenesisEpoch, HistoricalRootsLimit, @@ -209,6 +212,7 @@ impl EthSpec for InteropEthSpec { params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, + SubnetBitfieldLength, MaxValidatorsPerCommittee, GenesisEpoch, HistoricalRootsLimit, diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 91d5d6580..97e371006 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -8,6 +8,7 @@ #[macro_use] pub mod test_utils; +pub mod aggregate_and_proof; pub mod attestation; pub mod attestation_data; pub mod attestation_duty; @@ -22,6 +23,7 @@ pub mod checkpoint; pub mod deposit; pub mod deposit_data; pub mod deposit_message; +pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; pub mod fork; @@ -32,6 +34,8 @@ pub mod indexed_attestation; pub mod pending_attestation; pub mod proposer_slashing; pub mod relative_epoch; +pub mod selection_proof; +pub mod signed_aggregate_and_proof; pub mod signed_beacon_block; pub mod signed_beacon_block_header; pub mod signed_voluntary_exit; @@ -42,10 +46,12 @@ pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; pub mod slot_epoch; +pub mod subnet_id; mod tree_hash_impls; use ethereum_types::{H160, H256}; +pub use crate::aggregate_and_proof::AggregateAndProof; pub use crate::attestation::{Attestation, Error as AttestationError}; pub use crate::attestation_data::AttestationData; pub use crate::attestation_duty::AttestationDuty; @@ -60,6 +66,7 @@ pub use crate::checkpoint::Checkpoint; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; +pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::fork::Fork; pub use crate::fork_data::ForkData; @@ -69,11 +76,14 @@ pub use crate::indexed_attestation::IndexedAttestation; pub use crate::pending_attestation::PendingAttestation; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use crate::selection_proof::SelectionProof; +pub use crate::signed_aggregate_and_proof::SignedAggregateAndProof; pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash}; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; pub use crate::signing_root::{SignedRoot, SigningRoot}; pub use crate::slot_epoch::{Epoch, Slot}; +pub use crate::subnet_id::SubnetId; pub use crate::validator::Validator; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/eth2/types/src/selection_proof.rs b/eth2/types/src/selection_proof.rs new file mode 100644 index 000000000..18c62ba40 --- /dev/null +++ b/eth2/types/src/selection_proof.rs @@ -0,0 +1,45 @@ +use crate::{ChainSpec, Domain, EthSpec, Fork, Hash256, SecretKey, Signature, SignedRoot, Slot}; +use safe_arith::{ArithError, SafeArith}; +use std::convert::TryInto; +use tree_hash::TreeHash; + +#[derive(PartialEq, Debug, Clone)] +pub struct SelectionProof(Signature); + +impl SelectionProof { + pub fn new( + slot: Slot, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let domain = spec.get_domain( + slot.epoch(T::slots_per_epoch()), + Domain::SelectionProof, + fork, + genesis_validators_root, + ); + let message = slot.signing_root(domain); + + Self(Signature::new(message.as_bytes(), secret_key)) + } + + pub fn is_aggregator(&self, modulo: u64) -> Result { + let signature_hash = self.0.tree_hash_root(); + let signature_hash_int = u64::from_le_bytes( + signature_hash[0..8] + .as_ref() + .try_into() + .expect("first 8 bytes of signature should always convert to fixed array"), + ); + + signature_hash_int.safe_rem(modulo).map(|rem| rem == 0) + } +} + +impl Into for SelectionProof { + fn into(self) -> Signature { + self.0 + } +} diff --git a/eth2/types/src/signed_aggregate_and_proof.rs b/eth2/types/src/signed_aggregate_and_proof.rs new file mode 100644 index 000000000..750818167 --- /dev/null +++ b/eth2/types/src/signed_aggregate_and_proof.rs @@ -0,0 +1,95 @@ +use super::{ + AggregateAndProof, Attestation, ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, + SecretKey, Signature, SignedRoot, +}; +use crate::test_utils::TestRandom; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +/// A Validators signed aggregate proof to publish on the `beacon_aggregate_and_proof` +/// gossipsub topic. +/// +/// Spec v0.10.1 +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] +#[serde(bound = "T: EthSpec")] +pub struct SignedAggregateAndProof { + /// The `AggregateAndProof` that was signed. + pub message: AggregateAndProof, + /// The aggregate attestation. + pub signature: Signature, +} + +impl SignedAggregateAndProof { + /// Produces a new `SignedAggregateAndProof` with a `selection_proof` generated by signing + /// `aggregate.data.slot` with `secret_key`. + pub fn from_aggregate( + aggregator_index: u64, + aggregate: Attestation, + secret_key: &SecretKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> Self { + let message = AggregateAndProof::from_aggregate( + aggregator_index, + aggregate, + secret_key, + fork, + genesis_validators_root, + spec, + ); + + let target_epoch = message.aggregate.data.slot.epoch(T::slots_per_epoch()); + let domain = spec.get_domain( + target_epoch, + Domain::AggregateAndProof, + fork, + genesis_validators_root, + ); + let signing_message = message.signing_root(domain); + + SignedAggregateAndProof { + message, + signature: Signature::new(signing_message.as_bytes(), &secret_key), + } + } + + /// Verifies the signature of the `AggregateAndProof` + pub fn is_valid_signature( + &self, + validator_pubkey: &PublicKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> bool { + let target_epoch = self.message.aggregate.data.slot.epoch(T::slots_per_epoch()); + let domain = spec.get_domain( + target_epoch, + Domain::AggregateAndProof, + fork, + genesis_validators_root, + ); + let message = self.message.signing_root(domain); + self.signature.verify(message.as_bytes(), validator_pubkey) + } + + /// Verifies the signature of the `AggregateAndProof` as well the underlying selection_proof in + /// the contained `AggregateAndProof`. + pub fn is_valid( + &self, + validator_pubkey: &PublicKey, + fork: &Fork, + genesis_validators_root: Hash256, + spec: &ChainSpec, + ) -> bool { + self.is_valid_signature(validator_pubkey, fork, genesis_validators_root, spec) + && self.message.is_valid_selection_proof( + validator_pubkey, + fork, + genesis_validators_root, + spec, + ) + } +} diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 290dfdc65..7f3b45ce4 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -93,6 +93,7 @@ impl Epoch { } impl SignedRoot for Epoch {} +impl SignedRoot for Slot {} pub struct SlotIter<'a> { current_iteration: u64, diff --git a/eth2/types/src/subnet_id.rs b/eth2/types/src/subnet_id.rs new file mode 100644 index 000000000..6699d6cc0 --- /dev/null +++ b/eth2/types/src/subnet_id.rs @@ -0,0 +1,26 @@ +//! Identifies each shard by an integer identifier. +use serde_derive::{Deserialize, Serialize}; +use std::ops::{Deref, DerefMut}; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct SubnetId(u64); + +impl SubnetId { + pub fn new(id: u64) -> Self { + SubnetId(id) + } +} + +impl Deref for SubnetId { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for SubnetId { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 4156c513d..719fd2e3f 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -14,4 +14,4 @@ pub use generate_deterministic_keypairs::load_keypairs_from_yaml; pub use keypairs_file::KeypairsFile; pub use rand::{RngCore, SeedableRng}; pub use rand_xorshift::XorShiftRng; -pub use test_random::TestRandom; +pub use test_random::{test_random_instance, TestRandom}; diff --git a/eth2/types/src/test_utils/test_random.rs b/eth2/types/src/test_utils/test_random.rs index fa1a41815..f92d26717 100644 --- a/eth2/types/src/test_utils/test_random.rs +++ b/eth2/types/src/test_utils/test_random.rs @@ -1,5 +1,7 @@ use crate::*; use rand::RngCore; +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; use ssz_types::typenum::Unsigned; mod address; @@ -12,6 +14,11 @@ mod secret_key; mod signature; mod signature_bytes; +pub fn test_random_instance() -> T { + let mut rng = XorShiftRng::from_seed([0x42; 16]); + T::random_for_test(&mut rng) +} + pub trait TestRandom { fn random_for_test(rng: &mut impl RngCore) -> Self; } diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index e568e9715..f22263e31 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bls" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/clap_utils/Cargo.toml b/eth2/utils/clap_utils/Cargo.toml new file mode 100644 index 000000000..f1916c4ba --- /dev/null +++ b/eth2/utils/clap_utils/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "clap_utils" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33.0" +hex = "0.3" +dirs = "2.0" +types = { path = "../../types" } +eth2_testnet_config = { path = "../eth2_testnet_config" } +eth2_ssz = { path = "../ssz" } diff --git a/eth2/utils/clap_utils/src/lib.rs b/eth2/utils/clap_utils/src/lib.rs new file mode 100644 index 000000000..d8002d76f --- /dev/null +++ b/eth2/utils/clap_utils/src/lib.rs @@ -0,0 +1,113 @@ +//! A helper library for parsing values from `clap::ArgMatches`. + +use clap::ArgMatches; +use eth2_testnet_config::Eth2TestnetConfig; +use hex; +use ssz::Decode; +use std::path::PathBuf; +use std::str::FromStr; +use types::EthSpec; + +/// Attempts to load the testnet dir at the path if `name` is in `matches`, returning an error if +/// the path cannot be found or the testnet dir is invalid. +/// +/// If `name` is not in `matches`, attempts to return the "hard coded" testnet dir. +pub fn parse_testnet_dir_with_hardcoded_default( + matches: &ArgMatches, + name: &'static str, +) -> Result, String> { + if let Some(path) = parse_optional::(matches, name)? { + Eth2TestnetConfig::load(path.clone()) + .map_err(|e| format!("Unable to open testnet dir at {:?}: {}", path, e)) + } else { + Eth2TestnetConfig::hard_coded().map_err(|e| { + format!( + "The hard-coded testnet directory was invalid. \ + This happens when Lighthouse is migrating between spec versions. \ + Error : {}", + e + ) + }) + } +} + +/// If `name` is in `matches`, parses the value as a path. Otherwise, attempts to find the user's +/// home directory and appends `default` to it. +pub fn parse_path_with_default_in_home_dir( + matches: &ArgMatches, + name: &'static str, + default: PathBuf, +) -> Result { + matches + .value_of(name) + .map(|dir| { + dir.parse::() + .map_err(|e| format!("Unable to parse {}: {}", name, e)) + }) + .unwrap_or_else(|| { + dirs::home_dir() + .map(|home| home.join(default)) + .ok_or_else(|| format!("Unable to locate home directory. Try specifying {}", name)) + }) +} + +/// Returns the value of `name` or an error if it is not in `matches` or does not parse +/// successfully using `std::string::FromStr`. +pub fn parse_required(matches: &ArgMatches, name: &'static str) -> Result +where + T: FromStr, + ::Err: std::fmt::Display, +{ + parse_optional(matches, name)?.ok_or_else(|| format!("{} not specified", name)) +} + +/// Returns the value of `name` (if present) or an error if it does not parse successfully using +/// `std::string::FromStr`. +pub fn parse_optional(matches: &ArgMatches, name: &'static str) -> Result, String> +where + T: FromStr, + ::Err: std::fmt::Display, +{ + matches + .value_of(name) + .map(|val| { + val.parse() + .map_err(|e| format!("Unable to parse {}: {}", name, e)) + }) + .transpose() +} + +/// Returns the value of `name` or an error if it is not in `matches` or does not parse +/// successfully using `ssz::Decode`. +/// +/// Expects the value of `name` to be 0x-prefixed ASCII-hex. +pub fn parse_ssz_required( + matches: &ArgMatches, + name: &'static str, +) -> Result { + parse_ssz_optional(matches, name)?.ok_or_else(|| format!("{} not specified", name)) +} + +/// Returns the value of `name` (if present) or an error if it does not parse successfully using +/// `ssz::Decode`. +/// +/// Expects the value of `name` (if any) to be 0x-prefixed ASCII-hex. +pub fn parse_ssz_optional( + matches: &ArgMatches, + name: &'static str, +) -> Result, String> { + matches + .value_of(name) + .map(|val| { + if val.starts_with("0x") { + let vec = hex::decode(&val[2..]) + .map_err(|e| format!("Unable to parse {} as hex: {:?}", name, e))?; + + T::from_ssz_bytes(&vec) + .map_err(|e| format!("Unable to parse {} as SSZ: {:?}", name, e)) + } else { + Err(format!("Unable to parse {}, must have 0x prefix", name)) + } + }) + .transpose() +} diff --git a/eth2/utils/compare_fields/Cargo.toml b/eth2/utils/compare_fields/Cargo.toml index 33826c71d..ee5fe5c23 100644 --- a/eth2/utils/compare_fields/Cargo.toml +++ b/eth2/utils/compare_fields/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "compare_fields" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/compare_fields_derive/Cargo.toml b/eth2/utils/compare_fields_derive/Cargo.toml index 8832e26d3..485b2708d 100644 --- a/eth2/utils/compare_fields_derive/Cargo.toml +++ b/eth2/utils/compare_fields_derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "compare_fields_derive" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/deposit_contract/Cargo.toml b/eth2/utils/deposit_contract/Cargo.toml index c3698ba1b..877fa388b 100644 --- a/eth2/utils/deposit_contract/Cargo.toml +++ b/eth2/utils/deposit_contract/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "deposit_contract" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/deposit_contract/src/lib.rs b/eth2/utils/deposit_contract/src/lib.rs index 11a831c0f..2a5ea514e 100644 --- a/eth2/utils/deposit_contract/src/lib.rs +++ b/eth2/utils/deposit_contract/src/lib.rs @@ -22,7 +22,7 @@ impl From for DecodeError { } pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000; -pub const DEPOSIT_GAS: usize = 4_000_000; +pub const DEPOSIT_GAS: usize = 400_000; pub const ABI: &[u8] = include_bytes!("../contracts/v0.11.1_validator_registration.json"); pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.11.1_validator_registration.bytecode"); pub const DEPOSIT_DATA_LEN: usize = 420; // lol diff --git a/eth2/utils/eth2_config/Cargo.toml b/eth2/utils/eth2_config/Cargo.toml index f186a90a0..c9af2fef3 100644 --- a/eth2/utils/eth2_config/Cargo.toml +++ b/eth2/utils/eth2_config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_config" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index f6a6cd346..641f82c25 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_interop_keypairs" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/eth2_testnet_config/Cargo.toml b/eth2/utils/eth2_testnet_config/Cargo.toml index 13c4d387f..3df6897a4 100644 --- a/eth2/utils/eth2_testnet_config/Cargo.toml +++ b/eth2/utils/eth2_testnet_config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_testnet_config" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/hashmap_delay/Cargo.toml b/eth2/utils/hashmap_delay/Cargo.toml new file mode 100644 index 000000000..918a8568f --- /dev/null +++ b/eth2/utils/hashmap_delay/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "hashmap_delay" +version = "0.2.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +tokio-timer = "0.2.12" +futures = "0.1.29" diff --git a/eth2/utils/hashmap_delay/src/hashmap_delay.rs b/eth2/utils/hashmap_delay/src/hashmap_delay.rs new file mode 100644 index 000000000..ea4fa1457 --- /dev/null +++ b/eth2/utils/hashmap_delay/src/hashmap_delay.rs @@ -0,0 +1,161 @@ +//! A simple hashmap object coupled with a `delay_queue` which has entries that expire after a +//! fixed time. +//! +//! A `HashMapDelay` implements `Stream` which removes expired items from the map. + +/// The default delay for entries, in seconds. This is only used when `insert()` is used to add +/// entries. +const DEFAULT_DELAY: u64 = 30; + +use futures::prelude::*; +use std::collections::HashMap; +use std::time::Duration; +use tokio_timer::delay_queue::{self, DelayQueue}; + +pub struct HashMapDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + /// The given entries. + entries: HashMap>, + /// A queue holding the timeouts of each entry. + expirations: DelayQueue, + /// The default expiration timeout of an entry. + default_entry_timeout: Duration, +} + +/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key. +struct MapEntry { + /// The expiration key for the entry. + key: delay_queue::Key, + /// The actual entry. + value: V, +} + +impl Default for HashMapDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + fn default() -> Self { + HashMapDelay::new(Duration::from_secs(DEFAULT_DELAY)) + } +} + +impl HashMapDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + /// Creates a new instance of `HashMapDelay`. + pub fn new(default_entry_timeout: Duration) -> Self { + HashMapDelay { + entries: HashMap::new(), + expirations: DelayQueue::new(), + default_entry_timeout, + } + } + + /// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`. + pub fn insert(&mut self, key: K, value: V) { + self.insert_at(key, value, self.default_entry_timeout); + } + + /// Inserts an entry that will expire at a given instant. + pub fn insert_at(&mut self, key: K, value: V, entry_duration: Duration) { + let delay_key = self.expirations.insert(key.clone(), entry_duration); + let entry = MapEntry { + key: delay_key, + value, + }; + self.entries.insert(key, entry); + } + + /// Gets a reference to an entry if it exists. + /// + /// Returns None if the entry does not exist. + pub fn get(&self, key: &K) -> Option<&V> { + self.entries.get(key).map(|entry| &entry.value) + } + + /// Gets a mutable reference to an entry if it exists. + /// + /// Returns None if the entry does not exist. + pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { + self.entries.get_mut(key).map(|entry| &mut entry.value) + } + + /// Returns true if the key exists, false otherwise. + pub fn contains_key(&self, key: &K) -> bool { + self.entries.contains_key(key) + } + + /// Returns the length of the mapping. + pub fn len(&self) -> usize { + self.entries.len() + } + + /// Updates the timeout for a given key. Returns true if the key existed, false otherwise. + /// + /// Panics if the duration is too far in the future. + pub fn update_timeout(&mut self, key: &K, timeout: Duration) -> bool { + if let Some(entry) = self.entries.get(key) { + self.expirations.reset(&entry.key, timeout); + true + } else { + false + } + } + + /// Removes a key from the map returning the value associated with the key that was in the map. + /// + /// Return None if the key was not in the map. + pub fn remove(&mut self, key: &K) -> Option { + if let Some(entry) = self.entries.remove(key) { + self.expirations.remove(&entry.key); + return Some(entry.value); + } + return None; + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false. + pub fn retain bool>(&mut self, mut f: F) { + let expiration = &mut self.expirations; + self.entries.retain(|key, entry| { + let result = f(key, &mut entry.value); + if !result { + expiration.remove(&entry.key); + } + result + }) + } + + /// Removes all entries from the map. + pub fn clear(&mut self) { + self.entries.clear(); + self.expirations.clear(); + } +} + +impl Stream for HashMapDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + type Item = (K, V); + type Error = &'static str; + + fn poll(&mut self) -> Poll, Self::Error> { + match self.expirations.poll() { + Ok(Async::Ready(Some(key))) => { + let key = key.into_inner(); + match self.entries.remove(&key) { + Some(entry) => Ok(Async::Ready(Some((key, entry.value)))), + None => Err("Value no longer exists in expirations"), + } + } + Ok(Async::Ready(None)) => Ok(Async::Ready(None)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(_) => Err("Error polling HashMapDelay"), + } + } +} diff --git a/eth2/utils/hashmap_delay/src/hashset_delay.rs b/eth2/utils/hashmap_delay/src/hashset_delay.rs new file mode 100644 index 000000000..bd93d6c8e --- /dev/null +++ b/eth2/utils/hashmap_delay/src/hashset_delay.rs @@ -0,0 +1,163 @@ +//NOTE: This is just a specific case of a HashMapDelay. +// The code has been copied to make unique `insert` and `insert_at` functions. + +/// The default delay for entries, in seconds. This is only used when `insert()` is used to add +/// entries. +const DEFAULT_DELAY: u64 = 30; + +use futures::prelude::*; +use std::collections::HashMap; +use std::time::{Duration, Instant}; +use tokio_timer::delay_queue::{self, DelayQueue}; + +pub struct HashSetDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + /// The given entries. + entries: HashMap, + /// A queue holding the timeouts of each entry. + expirations: DelayQueue, + /// The default expiration timeout of an entry. + default_entry_timeout: Duration, +} + +/// A wrapping around entries that adds the link to the entry's expiration, via a `delay_queue` key. +struct MapEntry { + /// The expiration key for the entry. + key: delay_queue::Key, + /// The actual entry. + value: Instant, +} + +impl Default for HashSetDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + fn default() -> Self { + HashSetDelay::new(Duration::from_secs(DEFAULT_DELAY)) + } +} + +impl HashSetDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + /// Creates a new instance of `HashSetDelay`. + pub fn new(default_entry_timeout: Duration) -> Self { + HashSetDelay { + entries: HashMap::new(), + expirations: DelayQueue::new(), + default_entry_timeout, + } + } + + /// Insert an entry into the mapping. Entries will expire after the `default_entry_timeout`. + pub fn insert(&mut self, key: K) { + self.insert_at(key, self.default_entry_timeout); + } + + /// Inserts an entry that will expire at a given instant. If the entry already exists, the + /// timeout is updated. + pub fn insert_at(&mut self, key: K, entry_duration: Duration) { + if self.contains(&key) { + // update the timeout + self.update_timeout(&key, entry_duration); + } else { + let delay_key = self.expirations.insert(key.clone(), entry_duration.clone()); + let entry = MapEntry { + key: delay_key, + value: Instant::now() + entry_duration, + }; + self.entries.insert(key, entry); + } + } + + /// Gets a reference to an entry if it exists. + /// + /// Returns None if the entry does not exist. + pub fn get(&self, key: &K) -> Option<&Instant> { + self.entries.get(key).map(|entry| &entry.value) + } + + /// Returns true if the key exists, false otherwise. + pub fn contains(&self, key: &K) -> bool { + self.entries.contains_key(key) + } + + /// Returns the length of the mapping. + pub fn len(&self) -> usize { + self.entries.len() + } + + /// Updates the timeout for a given key. Returns true if the key existed, false otherwise. + /// + /// Panics if the duration is too far in the future. + pub fn update_timeout(&mut self, key: &K, timeout: Duration) -> bool { + if let Some(entry) = self.entries.get(key) { + self.expirations.reset(&entry.key, timeout); + true + } else { + false + } + } + + /// Removes a key from the map returning the value associated with the key that was in the map. + /// + /// Return false if the key was not in the map. + pub fn remove(&mut self, key: &K) -> bool { + if let Some(entry) = self.entries.remove(key) { + self.expirations.remove(&entry.key); + return true; + } + return false; + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns false. + pub fn retain bool>(&mut self, mut f: F) { + let expiration = &mut self.expirations; + self.entries.retain(|key, entry| { + let result = f(key); + if !result { + expiration.remove(&entry.key); + } + result + }) + } + + /// Removes all entries from the map. + pub fn clear(&mut self) { + self.entries.clear(); + self.expirations.clear(); + } + + /// Returns a vector of referencing all keys in the map. + pub fn keys_vec(&self) -> Vec<&K> { + self.entries.keys().collect() + } +} + +impl Stream for HashSetDelay +where + K: std::cmp::Eq + std::hash::Hash + std::clone::Clone, +{ + type Item = K; + type Error = &'static str; + + fn poll(&mut self) -> Poll, Self::Error> { + match self.expirations.poll() { + Ok(Async::Ready(Some(key))) => { + let key = key.into_inner(); + match self.entries.remove(&key) { + Some(_) => Ok(Async::Ready(Some(key))), + None => Err("Value no longer exists in expirations"), + } + } + Ok(Async::Ready(None)) => Ok(Async::Ready(None)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(_) => Err("Error polling HashSetDelay"), + } + } +} diff --git a/eth2/utils/hashmap_delay/src/lib.rs b/eth2/utils/hashmap_delay/src/lib.rs new file mode 100644 index 000000000..140106b42 --- /dev/null +++ b/eth2/utils/hashmap_delay/src/lib.rs @@ -0,0 +1,21 @@ +//! This crate provides two objects: +//! - `HashMapDelay` +//! - `HashSetDelay` +//! +//! # HashMapDelay +//! +//! This provides a `HashMap` coupled with a `DelayQueue`. Objects that are inserted into +//! the map are inserted with an expiry. `Stream` is implemented on the `HashMapDelay` +//! which return objects that have expired. These objects are removed from the mapping. +//! +//! # HashSetDelay +//! +//! This is similar to a `HashMapDelay` except the mapping maps to the expiry time. This +//! allows users to add objects and check their expiry deadlines before the `Stream` +//! consumes them. + +mod hashmap_delay; +mod hashset_delay; + +pub use crate::hashmap_delay::HashMapDelay; +pub use crate::hashset_delay::HashSetDelay; diff --git a/eth2/utils/int_to_bytes/Cargo.toml b/eth2/utils/int_to_bytes/Cargo.toml index 48c52548b..c24f657c6 100644 --- a/eth2/utils/int_to_bytes/Cargo.toml +++ b/eth2/utils/int_to_bytes/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "int_to_bytes" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml new file mode 100644 index 000000000..460d12d85 --- /dev/null +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "lighthouse_bootstrap" +version = "0.2.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eth2_config = { path = "../eth2_config" } +eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p" } +reqwest = "0.9.22" +url = "1.2" +types = { path = "../../types" } +serde = "1.0.102" +slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } diff --git a/eth2/utils/lighthouse_metrics/Cargo.toml b/eth2/utils/lighthouse_metrics/Cargo.toml index 3b01c63e4..ed4b49253 100644 --- a/eth2/utils/lighthouse_metrics/Cargo.toml +++ b/eth2/utils/lighthouse_metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse_metrics" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/logging/Cargo.toml b/eth2/utils/logging/Cargo.toml index 0fe056663..c7ccd3617 100644 --- a/eth2/utils/logging/Cargo.toml +++ b/eth2/utils/logging/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "logging" -version = "0.1.0" +version = "0.2.0" authors = ["blacktemplar "] edition = "2018" diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index 355bf1bac..d4127a53f 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "merkle_proof" -version = "0.1.0" +version = "0.2.0" authors = ["Michael Sproul "] edition = "2018" diff --git a/eth2/utils/remote_beacon_node/Cargo.toml b/eth2/utils/remote_beacon_node/Cargo.toml index ad1b8d469..cb4323578 100644 --- a/eth2/utils/remote_beacon_node/Cargo.toml +++ b/eth2/utils/remote_beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "remote_beacon_node" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" @@ -12,7 +12,7 @@ url = "1.2" serde = "1.0" futures = "0.1.25" types = { path = "../../../eth2/types" } -rest_api = { path = "../../../beacon_node/rest_api" } +rest_types = { path = "../rest_types" } hex = "0.3" eth2_ssz = { path = "../../../eth2/utils/ssz" } serde_json = "^1.0" diff --git a/eth2/utils/remote_beacon_node/src/lib.rs b/eth2/utils/remote_beacon_node/src/lib.rs index 0727dc4ac..2b9af924f 100644 --- a/eth2/utils/remote_beacon_node/src/lib.rs +++ b/eth2/utils/remote_beacon_node/src/lib.rs @@ -14,17 +14,18 @@ use ssz::Encode; use std::marker::PhantomData; use std::time::Duration; use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex, Epoch, EthSpec, Fork, - Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, + Attestation, AttestationData, AttesterSlashing, BeaconBlock, BeaconState, CommitteeIndex, + Epoch, EthSpec, Fork, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, Signature, + SignedAggregateAndProof, SignedBeaconBlock, Slot, }; use url::Url; pub use operation_pool::PersistedOperationPool; pub use proto_array_fork_choice::core::ProtoArray; -pub use rest_api::{ +pub use rest_types::{ CanonicalHeadResponse, Committee, HeadBeaconBlock, IndividualVotesRequest, - IndividualVotesResponse, ValidatorDutiesRequest, ValidatorDuty, ValidatorRequest, - ValidatorResponse, + IndividualVotesResponse, SyncingResponse, ValidatorDutiesRequest, ValidatorDutyBytes, + ValidatorRequest, ValidatorResponse, ValidatorSubscription, }; // Setting a long timeout for debug ensures that crypto-heavy operations can still succeed. @@ -64,6 +65,8 @@ pub enum Error { SerdeJsonError(serde_json::Error), /// The server responded to the request, however it did not return a 200-type success code. DidNotSucceed { status: StatusCode, body: String }, + /// The request input was invalid. + InvalidInput, } #[derive(Clone)] @@ -171,7 +174,7 @@ pub enum PublishStatus { Valid, /// The object was not valid and may or may not have been published to the network. Invalid(String), - /// The server responsed with an unknown status code. The object may or may not have been + /// The server responded with an unknown status code. The object may or may not have been /// published to the network. Unknown, } @@ -183,7 +186,7 @@ impl PublishStatus { } } -/// Provides the functions on the `/beacon` endpoint of the node. +/// Provides the functions on the `/validator` endpoint of the node. #[derive(Clone)] pub struct Validator(HttpClient); @@ -212,13 +215,29 @@ impl Validator { .and_then(move |url| client.json_get(url, query_params)) } - /// Posts an attestation to the beacon node, expecting it to verify it and publish it to the network. - pub fn publish_attestation( + /// Produces an aggregate attestation. + pub fn produce_aggregate_attestation( &self, - attestation: Attestation, + attestation_data: &AttestationData, + ) -> impl Future, Error = Error> { + let query_params = vec![( + "attestation_data".into(), + as_ssz_hex_string(attestation_data), + )]; + + let client = self.0.clone(); + self.url("aggregate_attestation") + .into_future() + .and_then(move |url| client.json_get(url, query_params)) + } + + /// Posts a list of attestations to the beacon node, expecting it to verify it and publish it to the network. + pub fn publish_attestations( + &self, + attestation: Vec>, ) -> impl Future { let client = self.0.clone(); - self.url("attestation") + self.url("attestations") .into_future() .and_then(move |url| client.json_post::<_>(url, attestation)) .and_then(|mut response| { @@ -237,12 +256,37 @@ impl Validator { }) } + /// Posts a list of signed aggregates and proofs to the beacon node, expecting it to verify it and publish it to the network. + pub fn publish_aggregate_and_proof( + &self, + signed_aggregate_and_proofs: Vec>, + ) -> impl Future { + let client = self.0.clone(); + self.url("aggregate_and_proofs") + .into_future() + .and_then(move |url| client.json_post::<_>(url, signed_aggregate_and_proofs)) + .and_then(|mut response| { + response + .text() + .map(|text| (response, text)) + .map_err(Error::from) + }) + .and_then(|(response, text)| match response.status() { + StatusCode::OK => Ok(PublishStatus::Valid), + StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)), + _ => response + .error_for_status() + .map_err(Error::from) + .map(|_| PublishStatus::Unknown), + }) + } + /// Returns the duties required of the given validator pubkeys in the given epoch. pub fn get_duties( &self, epoch: Epoch, validator_pubkeys: &[PublicKey], - ) -> impl Future, Error = Error> { + ) -> impl Future, Error = Error> { let client = self.0.clone(); let bulk_request = ValidatorDutiesRequest { @@ -297,11 +341,36 @@ impl Validator { url, vec![ ("slot".into(), format!("{}", slot.as_u64())), - ("randao_reveal".into(), signature_as_string(&randao_reveal)), + ("randao_reveal".into(), as_ssz_hex_string(&randao_reveal)), ], ) }) } + + /// Subscribes a list of validators to particular slots for attestation production/publication. + pub fn subscribe( + &self, + subscriptions: Vec, + ) -> impl Future { + let client = self.0.clone(); + self.url("subscribe") + .into_future() + .and_then(move |url| client.json_post::<_>(url, subscriptions)) + .and_then(|mut response| { + response + .text() + .map(|text| (response, text)) + .map_err(Error::from) + }) + .and_then(|(response, text)| match response.status() { + StatusCode::OK => Ok(PublishStatus::Valid), + StatusCode::ACCEPTED => Ok(PublishStatus::Invalid(text)), + _ => response + .error_for_status() + .map_err(Error::from) + .map(|_| PublishStatus::Unknown), + }) + } } /// Provides the functions on the `/beacon` endpoint of the node. @@ -585,6 +654,13 @@ impl Node { .into_future() .and_then(move |url| client.json_get(url, vec![])) } + + pub fn syncing_status(&self) -> impl Future { + let client = self.0.clone(); + self.url("syncing") + .into_future() + .and_then(move |url| client.json_get(url, vec![])) + } } /// Provides the functions on the `/advanced` endpoint of the node. @@ -677,8 +753,8 @@ fn root_as_string(root: Hash256) -> String { format!("0x{:?}", root) } -fn signature_as_string(signature: &Signature) -> String { - format!("0x{}", hex::encode(signature.as_ssz_bytes())) +fn as_ssz_hex_string(item: &T) -> String { + format!("0x{}", hex::encode(item.as_ssz_bytes())) } impl From for Error { diff --git a/eth2/utils/rest_types/Cargo.toml b/eth2/utils/rest_types/Cargo.toml new file mode 100644 index 000000000..c2745c060 --- /dev/null +++ b/eth2/utils/rest_types/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "rest_types" +version = "0.2.0" +authors = ["Age Manning "] +edition = "2018" + +[dependencies] +types = { path = "../../types" } +eth2_ssz_derive = { path = "../ssz_derive" } +eth2_ssz = { path = "../ssz" } +eth2_hashing = { path = "../eth2_hashing" } +tree_hash = { path = "../tree_hash" } +state_processing = { path = "../../state_processing" } +bls = { path = "../bls" } +serde = { version = "1.0.102", features = ["derive"] } +rayon = "1.3.0" diff --git a/eth2/utils/rest_types/src/beacon.rs b/eth2/utils/rest_types/src/beacon.rs new file mode 100644 index 000000000..0a141ea28 --- /dev/null +++ b/eth2/utils/rest_types/src/beacon.rs @@ -0,0 +1,65 @@ +//! A collection of REST API types for interaction with the beacon node. + +use bls::PublicKeyBytes; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use types::beacon_state::EthSpec; +use types::{BeaconState, CommitteeIndex, Hash256, SignedBeaconBlock, Slot, Validator}; + +/// Information about a block that is at the head of a chain. May or may not represent the +/// canonical head. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +pub struct HeadBeaconBlock { + pub beacon_block_root: Hash256, + pub beacon_block_slot: Slot, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +#[serde(bound = "T: EthSpec")] +pub struct BlockResponse { + pub root: Hash256, + pub beacon_block: SignedBeaconBlock, +} + +/// Information about the block and state that are at head of the beacon chain. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +pub struct CanonicalHeadResponse { + pub slot: Slot, + pub block_root: Hash256, + pub state_root: Hash256, + pub finalized_slot: Slot, + pub finalized_block_root: Hash256, + pub justified_slot: Slot, + pub justified_block_root: Hash256, + pub previous_justified_slot: Slot, + pub previous_justified_block_root: Hash256, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +pub struct ValidatorResponse { + pub pubkey: PublicKeyBytes, + pub validator_index: Option, + pub balance: Option, + pub validator: Option, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +pub struct ValidatorRequest { + /// If set to `None`, uses the canonical head state. + pub state_root: Option, + pub pubkeys: Vec, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +pub struct Committee { + pub slot: Slot, + pub index: CommitteeIndex, + pub committee: Vec, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +#[serde(bound = "T: EthSpec")] +pub struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} diff --git a/eth2/utils/rest_types/src/consensus.rs b/eth2/utils/rest_types/src/consensus.rs new file mode 100644 index 000000000..519b1ae24 --- /dev/null +++ b/eth2/utils/rest_types/src/consensus.rs @@ -0,0 +1,66 @@ +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use state_processing::per_epoch_processing::ValidatorStatus; +use types::{Epoch, PublicKeyBytes}; + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct IndividualVotesRequest { + pub epoch: Epoch, + pub pubkeys: Vec, +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct IndividualVote { + /// True if the validator has been slashed, ever. + pub is_slashed: bool, + /// True if the validator can withdraw in the current epoch. + pub is_withdrawable_in_current_epoch: bool, + /// True if the validator was active in the state's _current_ epoch. + pub is_active_in_current_epoch: bool, + /// True if the validator was active in the state's _previous_ epoch. + pub is_active_in_previous_epoch: bool, + /// The validator's effective balance in the _current_ epoch. + pub current_epoch_effective_balance_gwei: u64, + /// True if the validator had an attestation included in the _current_ epoch. + pub is_current_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _current_ + /// epoch matches the block root known to the state. + pub is_current_epoch_target_attester: bool, + /// True if the validator had an attestation included in the _previous_ epoch. + pub is_previous_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _previous_ + /// epoch matches the block root known to the state. + pub is_previous_epoch_target_attester: bool, + /// True if the validator's beacon block root attestation in the _previous_ epoch at the + /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. + pub is_previous_epoch_head_attester: bool, +} + +impl Into for ValidatorStatus { + fn into(self) -> IndividualVote { + IndividualVote { + is_slashed: self.is_slashed, + is_withdrawable_in_current_epoch: self.is_withdrawable_in_current_epoch, + is_active_in_current_epoch: self.is_active_in_current_epoch, + is_active_in_previous_epoch: self.is_active_in_previous_epoch, + current_epoch_effective_balance_gwei: self.current_epoch_effective_balance, + is_current_epoch_attester: self.is_current_epoch_attester, + is_current_epoch_target_attester: self.is_current_epoch_target_attester, + is_previous_epoch_attester: self.is_previous_epoch_attester, + is_previous_epoch_target_attester: self.is_previous_epoch_target_attester, + is_previous_epoch_head_attester: self.is_previous_epoch_head_attester, + } + } +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct IndividualVotesResponse { + /// The epoch which is considered the "current" epoch. + pub epoch: Epoch, + /// The validators public key. + pub pubkey: PublicKeyBytes, + /// The index of the validator in state.validators. + pub validator_index: Option, + /// Voting statistics for the validator, if they voted in the given epoch. + pub vote: Option, +} diff --git a/eth2/utils/rest_types/src/lib.rs b/eth2/utils/rest_types/src/lib.rs new file mode 100644 index 000000000..2c834f6e7 --- /dev/null +++ b/eth2/utils/rest_types/src/lib.rs @@ -0,0 +1,21 @@ +//! A collection of types used to pass data across the rest HTTP API. +//! +//! This is primarily used by the validator client and the beacon node rest API. + +mod beacon; +mod consensus; +mod node; +mod validator; + +pub use beacon::{ + BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse, + ValidatorRequest, ValidatorResponse, +}; + +pub use validator::{ + ValidatorDutiesRequest, ValidatorDuty, ValidatorDutyBytes, ValidatorSubscription, +}; + +pub use consensus::{IndividualVote, IndividualVotesRequest, IndividualVotesResponse}; + +pub use node::{SyncingResponse, SyncingStatus}; diff --git a/eth2/utils/rest_types/src/node.rs b/eth2/utils/rest_types/src/node.rs new file mode 100644 index 000000000..ecacacc1c --- /dev/null +++ b/eth2/utils/rest_types/src/node.rs @@ -0,0 +1,32 @@ +//! Collection of types for the /node HTTP +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use types::Slot; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +/// The current syncing status of the node. +pub struct SyncingStatus { + /// The starting slot of sync. + /// + /// For a finalized sync, this is the start slot of the current finalized syncing + /// chain. + /// + /// For head sync this is the last finalized slot. + pub starting_slot: Slot, + /// The current slot. + pub current_slot: Slot, + /// The highest known slot. For the current syncing chain. + /// + /// For a finalized sync, the target finalized slot. + /// For head sync, this is the highest known slot of all head chains. + pub highest_slot: Slot, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Encode, Decode)] +/// The response for the /node/syncing HTTP GET. +pub struct SyncingResponse { + /// Is the node syncing. + pub is_syncing: bool, + /// The current sync status. + pub sync_status: SyncingStatus, +} diff --git a/eth2/utils/rest_types/src/validator.rs b/eth2/utils/rest_types/src/validator.rs new file mode 100644 index 000000000..a3bd34e7e --- /dev/null +++ b/eth2/utils/rest_types/src/validator.rs @@ -0,0 +1,72 @@ +use bls::{PublicKey, PublicKeyBytes, Signature}; +use eth2_hashing::hash; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use std::convert::TryInto; +use types::{CommitteeIndex, Epoch, Slot}; + +/// A Validator duty with the validator public key represented a `PublicKeyBytes`. +pub type ValidatorDutyBytes = ValidatorDutyBase; +/// A validator duty with the pubkey represented as a `PublicKey`. +pub type ValidatorDuty = ValidatorDutyBase; + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] +pub struct ValidatorDutyBase { + /// The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + pub validator_pubkey: T, + /// The validator's index in `state.validators` + pub validator_index: Option, + /// The slot at which the validator must attest. + pub attestation_slot: Option, + /// The index of the committee within `slot` of which the validator is a member. + pub attestation_committee_index: Option, + /// The position of the validator in the committee. + pub attestation_committee_position: Option, + /// The slots in which a validator must propose a block (can be empty). + pub block_proposal_slots: Vec, + /// This provides the modulo: `max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)` + /// which allows the validator client to determine if this duty requires the validator to be + /// aggregate attestations. + pub aggregator_modulo: Option, +} + +impl ValidatorDutyBase { + /// Given a `slot_signature` determines if the validator of this duty is an aggregator. + // Note that we assume the signature is for the associated pubkey to avoid the signature + // verification + pub fn is_aggregator(&self, slot_signature: &Signature) -> bool { + if let Some(modulo) = self.aggregator_modulo { + let signature_hash = hash(&slot_signature.as_bytes()); + let signature_hash_int = u64::from_le_bytes( + signature_hash[0..8] + .try_into() + .expect("first 8 bytes of signature should always convert to fixed array"), + ); + signature_hash_int % modulo == 0 + } else { + false + } + } +} + +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct ValidatorDutiesRequest { + pub epoch: Epoch, + pub pubkeys: Vec, +} + +/// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation +/// duties. +#[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] +pub struct ValidatorSubscription { + /// The validators index. + pub validator_index: u64, + /// The index of the committee within `slot` of which the validator is a member. Used by the + /// beacon node to quickly evaluate the associated `SubnetId`. + pub attestation_committee_index: CommitteeIndex, + /// The slot in which to subscribe. + pub slot: Slot, + /// If true, the validator is an aggregator and the beacon node should aggregate attestations + /// for this slot. + pub is_aggregator: bool, +} diff --git a/eth2/utils/serde_hex/Cargo.toml b/eth2/utils/serde_hex/Cargo.toml index 06102f24e..b4d7bf619 100644 --- a/eth2/utils/serde_hex/Cargo.toml +++ b/eth2/utils/serde_hex/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "serde_hex" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index e9566a9a0..69e1b8cac 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -35,6 +35,9 @@ pub trait SlotClock: Send + Sync + Sized { /// Returns the duration between slots fn slot_duration(&self) -> Duration; + /// Returns the duration from now until `slot`. + fn duration_to_slot(&self, slot: Slot) -> Option; + /// Returns the duration until the next slot. fn duration_to_next_slot(&self) -> Option; diff --git a/eth2/utils/slot_clock/src/manual_slot_clock.rs b/eth2/utils/slot_clock/src/manual_slot_clock.rs index fc0e27e3a..d198e24e1 100644 --- a/eth2/utils/slot_clock/src/manual_slot_clock.rs +++ b/eth2/utils/slot_clock/src/manual_slot_clock.rs @@ -135,6 +135,10 @@ impl SlotClock for ManualSlotClock { self.slot_duration } + fn duration_to_slot(&self, slot: Slot) -> Option { + self.duration_to_slot(slot, *self.current_time.read()) + } + fn genesis_slot(&self) -> Slot { self.genesis_slot } diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 81a5e5912..adfb68b25 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -44,6 +44,11 @@ impl SlotClock for SystemTimeSlotClock { self.clock.slot_duration() } + fn duration_to_slot(&self, slot: Slot) -> Option { + let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + self.clock.duration_to_slot(slot, now) + } + fn genesis_slot(&self) -> Slot { self.clock.genesis_slot() } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs deleted file mode 100644 index 7eaee4a1b..000000000 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ /dev/null @@ -1,64 +0,0 @@ -use super::SlotClock; -use std::sync::RwLock; -use std::time::Duration; -use types::Slot; - -/// A slot clock where the slot is manually set instead of being determined by the system time. -/// -/// Useful for testing scenarios. -pub struct TestingSlotClock { - slot: RwLock, -} - -impl TestingSlotClock { - pub fn set_slot(&self, slot: u64) { - *self.slot.write().expect("TestingSlotClock poisoned.") = Slot::from(slot); - } - - pub fn advance_slot(&self) { - self.set_slot(self.now().unwrap().as_u64() + 1) - } -} - -impl SlotClock for TestingSlotClock { - fn new(genesis_slot: Slot, _genesis_duration: Duration, _slot_duration: Duration) -> Self { - TestingSlotClock { - slot: RwLock::new(genesis_slot), - } - } - - fn now(&self) -> Option { - let slot = *self.slot.read().expect("TestingSlotClock poisoned."); - Some(slot) - } - - /// Always returns a duration of 1 second. - fn duration_to_next_slot(&self) -> Option { - Some(Duration::from_secs(1)) - } - - /// Always returns a duration of `1 * slots_per_epoch` second. - fn duration_to_next_epoch(&self, slots_per_epoch: u64) -> Option { - Some(Duration::from_secs(slots_per_epoch)) - } - - /// Always returns a slot duration of 0 seconds. - fn slot_duration(&self) -> Duration { - Duration::from_secs(0) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_slot_now() { - let null = Duration::from_secs(0); - - let clock = TestingSlotClock::new(Slot::new(10), null, null); - assert_eq!(clock.now(), Some(Slot::new(10))); - clock.set_slot(123); - assert_eq!(clock.now(), Some(Slot::new(123))); - } -} diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index 974cdb228..4192a8a4f 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -302,7 +302,7 @@ impl Bitfield { /// Returns the value of the `i`'th bit. /// - /// Returns `None` if `i` is out-of-bounds of `self`. + /// Returns `Error` if `i` is out-of-bounds of `self`. pub fn get(&self, i: usize) -> Result { if i < self.len { let byte = self diff --git a/eth2/utils/swap_or_not_shuffle/Cargo.toml b/eth2/utils/swap_or_not_shuffle/Cargo.toml index a106ff87e..ef86d2569 100644 --- a/eth2/utils/swap_or_not_shuffle/Cargo.toml +++ b/eth2/utils/swap_or_not_shuffle/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "swap_or_not_shuffle" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/eth2/utils/test_random_derive/Cargo.toml b/eth2/utils/test_random_derive/Cargo.toml index 4559befaf..494e9d8eb 100644 --- a/eth2/utils/test_random_derive/Cargo.toml +++ b/eth2/utils/test_random_derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test_random_derive" -version = "0.1.0" +version = "0.2.0" authors = ["thojest "] edition = "2018" description = "Procedural derive macros for implementation of TestRandom trait" diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 7dcf4f83d..61eba90c6 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" @@ -27,3 +27,5 @@ dirs = "2.0" genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../eth2/utils/deposit_contract" } tree_hash = { path = "../eth2/utils/tree_hash" } +clap_utils = { path = "../eth2/utils/clap_utils" } +eth2-libp2p = { path = "../beacon_node/eth2-libp2p" } diff --git a/lcli/src/check_deposit_data.rs b/lcli/src/check_deposit_data.rs index af2b6fb47..56f18f998 100644 --- a/lcli/src/check_deposit_data.rs +++ b/lcli/src/check_deposit_data.rs @@ -1,12 +1,12 @@ -use crate::helpers::{parse_hex_bytes, parse_u64}; use clap::ArgMatches; +use clap_utils::{parse_required, parse_ssz_required}; use deposit_contract::{decode_eth1_tx_data, DEPOSIT_DATA_LEN}; use tree_hash::TreeHash; use types::EthSpec; pub fn run(matches: &ArgMatches) -> Result<(), String> { - let rlp_bytes = parse_hex_bytes(matches, "deposit-data")?; - let amount = parse_u64(matches, "deposit-amount")?; + let rlp_bytes = parse_ssz_required::>(matches, "deposit-data")?; + let amount = parse_required(matches, "deposit-amount")?; if rlp_bytes.len() != DEPOSIT_DATA_LEN { return Err(format!( diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index 095da6b66..22fd84fc6 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -1,31 +1,35 @@ use clap::ArgMatches; +use clap_utils; +use deposit_contract::{ + testnet::{ABI, BYTECODE}, + CONTRACT_DEPLOY_GAS, +}; use environment::Environment; -use eth1_test_rig::DepositContract; -use std::fs::File; -use std::io::Read; +use futures::{Future, IntoFuture}; +use std::path::PathBuf; use types::EthSpec; -use web3::{transports::Http, Web3}; +use web3::{ + contract::{Contract, Options}, + transports::Ipc, + types::{Address, U256}, + Web3, +}; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { - let confirmations = matches - .value_of("confirmations") - .ok_or_else(|| "Confirmations not specified")? - .parse::() - .map_err(|e| format!("Failed to parse confirmations: {}", e))?; + let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; + let from_address: Address = clap_utils::parse_required(matches, "from-address")?; + let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; - let password = parse_password(matches)?; + let (_event_loop_handle, transport) = + Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; + let web3 = Web3::new(transport); - let endpoint = matches - .value_of("eth1-endpoint") - .ok_or_else(|| "eth1-endpoint not specified")?; - - let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { + let bytecode = String::from_utf8(BYTECODE.to_vec()).map_err(|e| { format!( - "Failed to start HTTP transport connected to ganache: {:?}", + "Unable to parse deposit contract bytecode as utf-8: {:?}", e ) })?; - let web3 = Web3::new(transport); // It's unlikely that this will be the _actual_ deployment block, however it'll be close // enough to serve our purposes. @@ -37,54 +41,26 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< .block_on(web3.eth().block_number()) .map_err(|e| format!("Failed to get block number: {}", e))?; - info!("Present eth1 block number is {}", deploy_block); + let address = env.runtime().block_on( + Contract::deploy(web3.eth(), &ABI) + .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? + .confirmations(confirmations) + .options(Options { + gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), + ..Options::default() + }) + .execute(bytecode, (), from_address) + .into_future() + .map_err(|e| format!("Unable to execute deployment: {:?}", e)) + .and_then(|pending| { + pending.map_err(|e| format!("Unable to await pending contract: {:?}", e)) + }) + .map(|tx_receipt| tx_receipt.address()) + .map_err(|e| format!("Failed to execute deployment: {:?}", e)), + )?; - info!("Deploying the bytecode at https://github.com/sigp/unsafe-eth2-deposit-contract",); - - info!( - "Submitting deployment transaction, waiting for {} confirmations", - confirmations - ); - - let deposit_contract = env - .runtime() - .block_on(DepositContract::deploy_testnet( - web3, - confirmations, - password, - )) - .map_err(|e| format!("Failed to deploy contract: {}", e))?; - - info!( - "Deposit contract deployed. address: {}, deploy_block: {}", - deposit_contract.address(), - deploy_block - ); + println!("deposit_contract_address: {:?}", address); + println!("deposit_contract_deploy_block: {}", deploy_block); Ok(()) } - -pub fn parse_password(matches: &ArgMatches) -> Result, String> { - if let Some(password_path) = matches.value_of("password") { - Ok(Some( - File::open(password_path) - .map_err(|e| format!("Unable to open password file: {:?}", e)) - .and_then(|mut file| { - let mut password = String::new(); - file.read_to_string(&mut password) - .map_err(|e| format!("Unable to read password file to string: {:?}", e)) - .map(|_| password) - }) - .map(|password| { - // Trim the linefeed from the end. - if password.ends_with('\n') { - password[0..password.len() - 1].to_string() - } else { - password - } - })?, - )) - } else { - Ok(None) - } -} diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs new file mode 100644 index 000000000..2d6e685a6 --- /dev/null +++ b/lcli/src/generate_bootnode_enr.rs @@ -0,0 +1,60 @@ +use clap::ArgMatches; +use eth2_libp2p::{ + discovery::{build_enr, CombinedKey, Keypair, ENR_FILENAME}, + NetworkConfig, NETWORK_KEY_FILENAME, +}; +use std::convert::TryInto; +use std::fs; +use std::fs::File; +use std::io::Write; +use std::net::IpAddr; +use std::path::PathBuf; +use types::{EnrForkId, EthSpec}; + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let ip: IpAddr = clap_utils::parse_required(matches, "ip")?; + let udp_port: u16 = clap_utils::parse_required(matches, "udp-port")?; + let tcp_port: u16 = clap_utils::parse_required(matches, "tcp-port")?; + let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; + + if output_dir.exists() { + return Err(format!( + "{:?} already exists, will not override", + output_dir + )); + } + + let mut config = NetworkConfig::default(); + config.enr_address = Some(ip); + config.enr_udp_port = Some(udp_port); + config.enr_tcp_port = Some(tcp_port); + + let local_keypair = Keypair::generate_secp256k1(); + let enr_key: CombinedKey = local_keypair + .clone() + .try_into() + .map_err(|e| format!("Unable to convert keypair: {:?}", e))?; + let enr = build_enr::(&enr_key, &config, EnrForkId::default()) + .map_err(|e| format!("Unable to create ENR: {:?}", e))?; + + fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; + + let mut enr_file = File::create(output_dir.join(ENR_FILENAME)) + .map_err(|e| format!("Unable to create {}: {:?}", ENR_FILENAME, e))?; + enr_file + .write_all(&enr.to_base64().as_bytes()) + .map_err(|e| format!("Unable to write ENR to {}: {:?}", ENR_FILENAME, e))?; + + let secret_bytes = match local_keypair { + Keypair::Secp256k1(key) => key.secret().to_bytes(), + _ => return Err("Key is not a secp256k1 key".into()), + }; + + let mut key_file = File::create(output_dir.join(NETWORK_KEY_FILENAME)) + .map_err(|e| format!("Unable to create {}: {:?}", NETWORK_KEY_FILENAME, e))?; + key_file + .write_all(&secret_bytes) + .map_err(|e| format!("Unable to write key to {}: {:?}", NETWORK_KEY_FILENAME, e))?; + + Ok(()) +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 4da64b826..c7e54badf 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -5,11 +5,10 @@ mod change_genesis_time; mod check_deposit_data; mod deploy_deposit_contract; mod eth1_genesis; -mod helpers; +mod generate_bootnode_enr; mod interop_genesis; mod new_testnet; mod parse_hex; -mod parse_ssz; mod refund_deposit_contract; mod transition_blocks; @@ -19,6 +18,7 @@ use log::Level; use parse_hex::run_parse_hex; use std::fs::File; use std::path::PathBuf; +use std::process; use std::time::{SystemTime, UNIX_EPOCH}; use transition_blocks::run_transition_blocks; use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; @@ -28,8 +28,7 @@ fn main() { let matches = App::new("Lighthouse CLI Tool") .about( - "Performs various testing-related tasks, modelled after zcli. \ - by @protolambda.", + "Performs various testing-related tasks, including defining testnets.", ) .arg( Arg::with_name("spec") @@ -37,9 +36,19 @@ fn main() { .long("spec") .value_name("STRING") .takes_value(true) + .required(true) .possible_values(&["minimal", "mainnet"]) .default_value("mainnet") ) + .arg( + Arg::with_name("testnet-dir") + .short("d") + .long("testnet-dir") + .value_name("PATH") + .takes_value(true) + .global(true) + .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), + ) .subcommand( SubCommand::with_name("genesis_yaml") .about("Generates a genesis YAML file") @@ -94,27 +103,6 @@ fn main() { .help("Path to output a SSZ file."), ), ) - .subcommand( - SubCommand::with_name("pretty-ssz") - .about("Parses a file of raw (not hex-encoded) SSZ bytes") - .arg( - Arg::with_name("type") - .index(1) - .value_name("TYPE") - .takes_value(true) - .required(true) - .possible_values(&["SignedBeaconBlock"]) - .help("The schema of the supplied SSZ."), - ) - .arg( - Arg::with_name("path") - .index(2) - .value_name("SSZ_FILE") - .takes_value(true) - .required(true) - .help("A file contains SSZ bytes"), - ), - ) .subcommand( SubCommand::with_name("pretty-hex") .about("Parses SSZ encoded as ASCII 0x-prefixed hex") @@ -140,13 +128,22 @@ fn main() { "Deploy a testing eth1 deposit contract.", ) .arg( - Arg::with_name("eth1-endpoint") + Arg::with_name("eth1-ipc") + .long("eth1-ipc") .short("e") - .long("eth1-endpoint") - .value_name("HTTP_SERVER") + .value_name("ETH1_IPC_PATH") + .help("Path to an Eth1 JSON-RPC IPC endpoint") .takes_value(true) - .default_value("http://localhost:8545") - .help("The URL to the eth1 JSON-RPC http API."), + .required(true) + ) + .arg( + Arg::with_name("from-address") + .long("from-address") + .short("f") + .value_name("FROM_ETH1_ADDRESS") + .help("The address that will submit the contract creation. Must be unlocked.") + .takes_value(true) + .required(true) ) .arg( Arg::with_name("confirmations") @@ -156,13 +153,6 @@ fn main() { .default_value("3") .help("The number of block confirmations before declaring the contract deployed."), ) - .arg( - Arg::with_name("password") - .long("password") - .value_name("FILE") - .takes_value(true) - .help("The password file to unlock the eth1 account (see --index)"), - ) ) .subcommand( SubCommand::with_name("refund-deposit-contract") @@ -170,37 +160,32 @@ fn main() { "Calls the steal() function on a testnet eth1 contract.", ) .arg( - Arg::with_name("testnet-dir") - .short("d") - .long("testnet-dir") - .value_name("PATH") - .takes_value(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), - ) - .arg( - Arg::with_name("eth1-endpoint") + Arg::with_name("eth1-ipc") + .long("eth1-ipc") .short("e") - .long("eth1-endpoint") - .value_name("HTTP_SERVER") + .value_name("ETH1_IPC_PATH") + .help("Path to an Eth1 JSON-RPC IPC endpoint") .takes_value(true) - .default_value("http://localhost:8545") - .help("The URL to the eth1 JSON-RPC http API."), + .required(true) ) .arg( - Arg::with_name("password") - .long("password") - .value_name("FILE") + Arg::with_name("from-address") + .long("from-address") + .short("f") + .value_name("FROM_ETH1_ADDRESS") + .help("The address that will submit the contract creation. Must be unlocked.") .takes_value(true) - .help("The password file to unlock the eth1 account (see --index)"), + .required(true) ) .arg( - Arg::with_name("account-index") - .short("i") - .long("account-index") - .value_name("INDEX") + Arg::with_name("contract-address") + .long("contract-address") + .short("c") + .value_name("CONTRACT_ETH1_ADDRESS") + .help("The address of the contract to be refunded. Its owner must match + --from-address.") .takes_value(true) - .default_value("0") - .help("The eth1 accounts[] index which will send the transaction"), + .required(true) ) ) .subcommand( @@ -208,14 +193,6 @@ fn main() { .about( "Listens to the eth1 chain and finds the genesis beacon state", ) - .arg( - Arg::with_name("testnet-dir") - .short("d") - .long("testnet-dir") - .value_name("PATH") - .takes_value(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), - ) .arg( Arg::with_name("eth1-endpoint") .short("e") @@ -231,14 +208,6 @@ fn main() { .about( "Produces an interop-compatible genesis state using deterministic keypairs", ) - .arg( - Arg::with_name("testnet-dir") - .short("d") - .long("testnet-dir") - .value_name("PATH") - .takes_value(true) - .help("The testnet dir. Defaults to ~/.lighthouse/testnet"), - ) .arg( Arg::with_name("validator-count") .long("validator-count") @@ -282,14 +251,8 @@ fn main() { .subcommand( SubCommand::with_name("new-testnet") .about( - "Produce a new testnet directory.", - ) - .arg( - Arg::with_name("testnet-dir") - .long("testnet-dir") - .value_name("DIRECTORY") - .takes_value(true) - .help("The output path for the new testnet directory. Defaults to ~/.lighthouse/testnet"), + "Produce a new testnet directory. If any of the optional flags are not + supplied the values will remain the default for the --spec flag", ) .arg( Arg::with_name("min-genesis-time") @@ -304,7 +267,6 @@ fn main() { .long("min-genesis-active-validator-count") .value_name("INTEGER") .takes_value(true) - .default_value("16384") .help("The number of validators required to trigger eth2 genesis."), ) .arg( @@ -312,7 +274,6 @@ fn main() { .long("min-genesis-delay") .value_name("SECONDS") .takes_value(true) - .default_value("3600") // 10 minutes .help("The delay between sufficient eth1 deposits and eth2 genesis."), ) .arg( @@ -320,7 +281,6 @@ fn main() { .long("min-deposit-amount") .value_name("GWEI") .takes_value(true) - .default_value("100000000") // 0.1 Eth .help("The minimum permitted deposit amount."), ) .arg( @@ -328,7 +288,6 @@ fn main() { .long("max-effective-balance") .value_name("GWEI") .takes_value(true) - .default_value("3200000000") // 3.2 Eth .help("The amount required to become a validator."), ) .arg( @@ -336,7 +295,6 @@ fn main() { .long("effective-balance-increment") .value_name("GWEI") .takes_value(true) - .default_value("100000000") // 0.1 Eth .help("The steps in effective balance calculation."), ) .arg( @@ -344,7 +302,6 @@ fn main() { .long("ejection-balance") .value_name("GWEI") .takes_value(true) - .default_value("1600000000") // 1.6 Eth .help("The balance at which a validator gets ejected."), ) .arg( @@ -352,7 +309,6 @@ fn main() { .long("eth1-follow-distance") .value_name("ETH1_BLOCKS") .takes_value(true) - .default_value("16") .help("The distance to follow behind the eth1 chain head."), ) .arg( @@ -360,7 +316,6 @@ fn main() { .long("genesis-fork-version") .value_name("HEX") .takes_value(true) - .default_value("0x01030307") // [1, 3, 3, 7] .help("Used to avoid reply attacks between testnets. Recommended to set to non-default."), ) @@ -369,7 +324,7 @@ fn main() { .long("deposit-contract-address") .value_name("ETH1_ADDRESS") .takes_value(true) - .default_value("0x0000000000000000000000000000000000000000") + .required(true) .help("The address of the deposit contract."), ) .arg( @@ -405,11 +360,55 @@ fn main() { function signature."), ) ) + .subcommand( + SubCommand::with_name("generate-bootnode-enr") + .about( + "Generates an ENR address to be used as a pre-genesis boot node..", + ) + .arg( + Arg::with_name("ip") + .long("ip") + .value_name("IP_ADDRESS") + .takes_value(true) + .required(true) + .help("The IP address to be included in the ENR and used for discovery"), + ) + .arg( + Arg::with_name("udp-port") + .long("udp-port") + .value_name("UDP_PORT") + .takes_value(true) + .required(true) + .help("The UDP port to be included in the ENR and used for discovery"), + ) + .arg( + Arg::with_name("tcp-port") + .long("tcp-port") + .value_name("TCP_PORT") + .takes_value(true) + .required(true) + .help("The TCP port to be included in the ENR and used for application comms"), + ) + .arg( + Arg::with_name("output-dir") + .long("output-dir") + .value_name("OUTPUT_DIRECTORY") + .takes_value(true) + .required(true) + .help("The directory in which to create the network dir"), + ) + ) .get_matches(); macro_rules! run_with_spec { ($env_builder: expr) => { - run($env_builder, &matches) + match run($env_builder, &matches) { + Ok(()) => process::exit(0), + Err(e) => { + println!("Failed to run lcli: {}", e); + process::exit(1) + } + } }; } @@ -424,14 +423,14 @@ fn main() { } } -fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) { +fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> Result<(), String> { let env = env_builder .multi_threaded_tokio_runtime() - .expect("should start tokio runtime") + .map_err(|e| format!("should start tokio runtime: {:?}", e))? .async_logger("trace", None) - .expect("should start null logger") + .map_err(|e| format!("should start null logger: {:?}", e))? .build() - .expect("should build env"); + .map_err(|e| format!("should build env: {:?}", e))?; match matches.subcommand() { ("genesis_yaml", Some(matches)) => { @@ -470,38 +469,34 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) { _ => unreachable!("guarded by slog possible_values"), }; info!("Genesis state YAML file created. Exiting successfully."); + Ok(()) } ("transition-blocks", Some(matches)) => run_transition_blocks::(matches) - .unwrap_or_else(|e| error!("Failed to transition blocks: {}", e)), - ("pretty-ssz", Some(sub_matches)) => { - let result = match matches.value_of("spec").expect("spec is required by slog") { - "minimal" => parse_ssz::run::(sub_matches), - "mainnet" => parse_ssz::run::(sub_matches), - _ => unreachable!("guarded by slog possible_values"), - }; - result.unwrap_or_else(|e| error!("Failed to run eth1-genesis command: {}", e)) + .map_err(|e| format!("Failed to transition blocks: {}", e)), + ("pretty-hex", Some(matches)) => { + run_parse_hex::(matches).map_err(|e| format!("Failed to pretty print hex: {}", e)) } - ("pretty-hex", Some(matches)) => run_parse_hex::(matches) - .unwrap_or_else(|e| error!("Failed to pretty print hex: {}", e)), ("deploy-deposit-contract", Some(matches)) => { deploy_deposit_contract::run::(env, matches) - .unwrap_or_else(|e| error!("Failed to run deploy-deposit-contract command: {}", e)) + .map_err(|e| format!("Failed to run deploy-deposit-contract command: {}", e)) } ("refund-deposit-contract", Some(matches)) => { refund_deposit_contract::run::(env, matches) - .unwrap_or_else(|e| error!("Failed to run refund-deposit-contract command: {}", e)) + .map_err(|e| format!("Failed to run refund-deposit-contract command: {}", e)) } ("eth1-genesis", Some(matches)) => eth1_genesis::run::(env, matches) - .unwrap_or_else(|e| error!("Failed to run eth1-genesis command: {}", e)), + .map_err(|e| format!("Failed to run eth1-genesis command: {}", e)), ("interop-genesis", Some(matches)) => interop_genesis::run::(env, matches) - .unwrap_or_else(|e| error!("Failed to run interop-genesis command: {}", e)), + .map_err(|e| format!("Failed to run interop-genesis command: {}", e)), ("change-genesis-time", Some(matches)) => change_genesis_time::run::(matches) - .unwrap_or_else(|e| error!("Failed to run change-genesis-time command: {}", e)), + .map_err(|e| format!("Failed to run change-genesis-time command: {}", e)), ("new-testnet", Some(matches)) => new_testnet::run::(matches) - .unwrap_or_else(|e| error!("Failed to run new_testnet command: {}", e)), + .map_err(|e| format!("Failed to run new_testnet command: {}", e)), ("check-deposit-data", Some(matches)) => check_deposit_data::run::(matches) - .unwrap_or_else(|e| error!("Failed to run check-deposit-data command: {}", e)), - (other, _) => error!("Unknown subcommand {}. See --help.", other), + .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), + ("generate-bootnode-enr", Some(matches)) => generate_bootnode_enr::run::(matches) + .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), + (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 447bffdfc..44eb1ab65 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,8 +1,10 @@ -use crate::helpers::*; use clap::ArgMatches; +use clap_utils::{ + parse_optional, parse_path_with_default_in_home_dir, parse_required, parse_ssz_optional, +}; use eth2_testnet_config::Eth2TestnetConfig; use std::path::PathBuf; -use types::{EthSpec, YamlConfig}; +use types::{Address, EthSpec, YamlConfig}; pub fn run(matches: &ArgMatches) -> Result<(), String> { let testnet_dir_path = parse_path_with_default_in_home_dir( @@ -10,18 +12,8 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { "testnet-dir", PathBuf::from(".lighthouse/testnet"), )?; - let min_genesis_time = parse_u64_opt(matches, "min-genesis-time")?; - let min_genesis_delay = parse_u64(matches, "min-genesis-delay")?; - let min_genesis_active_validator_count = - parse_u64(matches, "min-genesis-active-validator-count")?; - let min_deposit_amount = parse_u64(matches, "min-deposit-amount")?; - let max_effective_balance = parse_u64(matches, "max-effective-balance")?; - let effective_balance_increment = parse_u64(matches, "effective-balance-increment")?; - let ejection_balance = parse_u64(matches, "ejection-balance")?; - let eth1_follow_distance = parse_u64(matches, "eth1-follow-distance")?; - let deposit_contract_deploy_block = parse_u64(matches, "deposit-contract-deploy-block")?; - let genesis_fork_version = parse_fork_opt(matches, "genesis-fork-version")?; - let deposit_contract_address = parse_address(matches, "deposit-contract-address")?; + let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?; + let deposit_contract_deploy_block = parse_required(matches, "deposit-contract-deploy-block")?; if testnet_dir_path.exists() { return Err(format!( @@ -31,19 +23,29 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { } let mut spec = T::default_spec(); - if let Some(time) = min_genesis_time { - spec.min_genesis_time = time; - } else { - spec.min_genesis_time = time_now()?; + + // Update the spec value if the flag was defined. Otherwise, leave it as the default. + macro_rules! maybe_update { + ($flag: tt, $var: ident) => { + if let Some(val) = parse_optional(matches, $flag)? { + spec.$var = val + } + }; } - spec.min_deposit_amount = min_deposit_amount; - spec.min_genesis_active_validator_count = min_genesis_active_validator_count; - spec.max_effective_balance = max_effective_balance; - spec.effective_balance_increment = effective_balance_increment; - spec.ejection_balance = ejection_balance; - spec.eth1_follow_distance = eth1_follow_distance; - spec.min_genesis_delay = min_genesis_delay; - if let Some(v) = genesis_fork_version { + + maybe_update!("min-genesis-time", min_genesis_time); + maybe_update!("min-deposit-amount", min_deposit_amount); + maybe_update!( + "min-genesis-active-validator-count", + min_genesis_active_validator_count + ); + maybe_update!("max-effective-balance", max_effective_balance); + maybe_update!("effective-balance-increment", effective_balance_increment); + maybe_update!("ejection-balance", ejection_balance); + maybe_update!("eth1-follow_distance", eth1_follow_distance); + maybe_update!("min-genesis-delay", min_genesis_delay); + + if let Some(v) = parse_ssz_optional(matches, "genesis-fork-version")? { spec.genesis_fork_version = v; } diff --git a/lcli/src/refund_deposit_contract.rs b/lcli/src/refund_deposit_contract.rs index d413b7f5c..719a8ef1b 100644 --- a/lcli/src/refund_deposit_contract.rs +++ b/lcli/src/refund_deposit_contract.rs @@ -1,12 +1,10 @@ -use crate::deploy_deposit_contract::parse_password; use clap::ArgMatches; use environment::Environment; -use eth2_testnet_config::Eth2TestnetConfig; -use futures::{future, Future}; +use futures::Future; use std::path::PathBuf; use types::EthSpec; use web3::{ - transports::Http, + transports::Ipc, types::{Address, TransactionRequest, U256}, Web3, }; @@ -15,102 +13,28 @@ use web3::{ pub const STEAL_FN_SIGNATURE: &[u8] = &[0xcf, 0x7a, 0x89, 0x65]; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { - let endpoint = matches - .value_of("eth1-endpoint") - .ok_or_else(|| "eth1-endpoint not specified")?; + let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?; + let from: Address = clap_utils::parse_required(matches, "from-address")?; + let contract_address: Address = clap_utils::parse_required(matches, "contract-address")?; - let account_index = matches - .value_of("account-index") - .ok_or_else(|| "No account-index".to_string())? - .parse::() - .map_err(|e| format!("Unable to parse account-index: {}", e))?; + let (_event_loop_handle, transport) = + Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?; + let web3 = Web3::new(transport); - let password_opt = parse_password(matches)?; - - let testnet_dir = matches - .value_of("testnet-dir") - .ok_or_else(|| ()) - .and_then(|dir| dir.parse::().map_err(|_| ())) - .unwrap_or_else(|_| { - dirs::home_dir() - .map(|home| home.join(".lighthouse").join("testnet")) - .expect("should locate home directory") - }); - - let eth2_testnet_config: Eth2TestnetConfig = Eth2TestnetConfig::load(testnet_dir)?; - - let (_event_loop, transport) = Http::new(&endpoint).map_err(|e| { - format!( - "Failed to start HTTP transport connected to ganache: {:?}", - e - ) - })?; - - let web3_1 = Web3::new(transport); - let web3_2 = web3_1.clone(); - - // Convert from `types::Address` to `web3::types::Address`. - let deposit_contract = Address::from_slice( - eth2_testnet_config - .deposit_contract_address()? - .as_fixed_bytes(), - ); - - let future = web3_1 - .eth() - .accounts() - .map_err(|e| format!("Failed to get accounts: {:?}", e)) - .and_then(move |accounts| { - accounts - .get(account_index) - .cloned() - .ok_or_else(|| "Insufficient accounts for deposit".to_string()) - }) - .and_then(move |from_address| { - let future: Box + Send> = - if let Some(password) = password_opt { - // Unlock for only a single transaction. - let duration = None; - - let future = web3_1 - .personal() - .unlock_account(from_address, &password, duration) - .then(move |result| match result { - Ok(true) => Ok(from_address), - Ok(false) => Err("Eth1 node refused to unlock account".to_string()), - Err(e) => Err(format!("Eth1 unlock request failed: {:?}", e)), - }); - - Box::new(future) - } else { - Box::new(future::ok(from_address)) - }; - - future - }) - .and_then(move |from| { - let tx_request = TransactionRequest { + env.runtime().block_on( + web3.eth() + .send_transaction(TransactionRequest { from, - to: Some(deposit_contract), + to: Some(contract_address), gas: Some(U256::from(400_000)), gas_price: None, value: Some(U256::zero()), data: Some(STEAL_FN_SIGNATURE.into()), nonce: None, condition: None, - }; - - web3_2 - .eth() - .send_transaction(tx_request) - .map_err(|e| format!("Failed to call deposit fn: {:?}", e)) - }) - .map(move |tx| info!("Refund transaction submitted: eth1_tx_hash: {:?}", tx)) - .map_err(move |e| error!("Unable to submit refund transaction: error: {}", e)); - - env.runtime() - .block_on(future) - .map_err(|()| "Failed to send transaction".to_string())?; + }) + .map_err(|e| format!("Failed to call deposit fn: {:?}", e)), + )?; Ok(()) } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 134f6719f..6f7527694 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "0.1.1" +version = "0.2.0" authors = ["Sigma Prime "] edition = "2018" @@ -19,3 +19,4 @@ environment = { path = "./environment" } futures = "0.1.25" validator_client = { "path" = "../validator_client" } account_manager = { "path" = "../account_manager" } +clap_utils = { path = "../eth2/utils/clap_utils" } diff --git a/lighthouse/environment/Cargo.toml b/lighthouse/environment/Cargo.toml index 4d9a39e40..7f4c25094 100644 --- a/lighthouse/environment/Cargo.toml +++ b/lighthouse/environment/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "environment" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 08c6d0a5d..20e54b097 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -28,6 +28,7 @@ pub struct EnvironmentBuilder { log: Option, eth_spec_instance: E, eth2_config: Eth2Config, + testnet: Option>, } impl EnvironmentBuilder { @@ -38,6 +39,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: MinimalEthSpec, eth2_config: Eth2Config::minimal(), + testnet: None, } } } @@ -50,6 +52,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: MainnetEthSpec, eth2_config: Eth2Config::mainnet(), + testnet: None, } } } @@ -62,6 +65,7 @@ impl EnvironmentBuilder { log: None, eth_spec_instance: InteropEthSpec, eth2_config: Eth2Config::interop(), + testnet: None, } } } @@ -140,7 +144,7 @@ impl EnvironmentBuilder { /// Setups eth2 config using the CLI arguments. pub fn eth2_testnet_config( mut self, - eth2_testnet_config: &Eth2TestnetConfig, + eth2_testnet_config: Eth2TestnetConfig, ) -> Result { // Create a new chain spec from the default configuration. self.eth2_config.spec = eth2_testnet_config @@ -155,6 +159,8 @@ impl EnvironmentBuilder { ) })?; + self.testnet = Some(eth2_testnet_config); + Ok(self) } @@ -169,6 +175,7 @@ impl EnvironmentBuilder { .ok_or_else(|| "Cannot build environment without log".to_string())?, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, + testnet: self.testnet, }) } } @@ -211,6 +218,7 @@ pub struct Environment { log: Logger, eth_spec_instance: E, pub eth2_config: Eth2Config, + pub testnet: Option>, } impl Environment { diff --git a/lighthouse/environment/tests/environment_builder.rs b/lighthouse/environment/tests/environment_builder.rs index 7ed4f4e61..17f7c4310 100644 --- a/lighthouse/environment/tests/environment_builder.rs +++ b/lighthouse/environment/tests/environment_builder.rs @@ -1,8 +1,9 @@ -#![cfg(test)] /* * * TODO: disabled until hardcoded testnet config is updated for v0.11 * + * +#![cfg(test)] use clap::ArgMatches; use environment::EnvironmentBuilder; diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index 5cf616c95..dbb3c9039 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -1,8 +1,9 @@ #[macro_use] extern crate clap; -use beacon_node::{get_eth2_testnet_config, get_testnet_dir, ProductionBeaconNode}; +use beacon_node::ProductionBeaconNode; use clap::{App, Arg, ArgMatches}; +use clap_utils; use env_logger::{Builder, Env}; use environment::EnvironmentBuilder; use slog::{crit, info, warn}; @@ -59,7 +60,7 @@ fn main() { Arg::with_name("debug-level") .long("debug-level") .value_name("LEVEL") - .help("The title of the spec constants for chain config.") + .help("The verbosity level for emitting logs.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) .default_value("info"), @@ -123,12 +124,13 @@ fn run( .ok_or_else(|| "Expected --debug-level flag".to_string())?; let log_format = matches.value_of("log-format"); - let eth2_testnet_config = get_eth2_testnet_config(&get_testnet_dir(matches))?; + let eth2_testnet_config = + clap_utils::parse_testnet_dir_with_hardcoded_default(matches, "testnet-dir")?; let mut environment = environment_builder .async_logger(debug_level, log_format)? .multi_threaded_tokio_runtime()? - .eth2_testnet_config(ð2_testnet_config)? + .eth2_testnet_config(eth2_testnet_config)? .build()?; let log = environment.core_context().log; @@ -164,7 +166,7 @@ fn run( if let Some(sub_matches) = matches.subcommand_matches("account_manager") { // Pass the entire `environment` to the account manager so it can run blocking operations. - account_manager::run(sub_matches, environment); + account_manager::run(sub_matches, environment)?; // Exit as soon as account manager returns control. return Ok(()); diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index e893ea8e2..77aabac04 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ef_tests" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/tests/eth1_test_rig/Cargo.toml b/tests/eth1_test_rig/Cargo.toml index 8038fd51d..11552f463 100644 --- a/tests/eth1_test_rig/Cargo.toml +++ b/tests/eth1_test_rig/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "eth1_test_rig" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" [dependencies] -web3 = "0.8.0" +web3 = "0.10.0" tokio = "0.1.22" futures = "0.1.25" types = { path = "../../eth2/types"} diff --git a/tests/node_test_rig/Cargo.toml b/tests/node_test_rig/Cargo.toml index c8e1ebcc6..013cbe027 100644 --- a/tests/node_test_rig/Cargo.toml +++ b/tests/node_test_rig/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node_test_rig" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/tests/node_test_rig/src/lib.rs b/tests/node_test_rig/src/lib.rs index fafdb887f..9cfcc7eea 100644 --- a/tests/node_test_rig/src/lib.rs +++ b/tests/node_test_rig/src/lib.rs @@ -91,7 +91,7 @@ pub fn testing_client_config() -> ClientConfig { client_config } -/// Provids a validator client that is running in the current process on a given tokio executor (it +/// Provides a validator client that is running in the current process on a given tokio executor (it /// is _local_ to this process). /// /// Intended for use in testing and simulation. Not for production. diff --git a/tests/simulator/Cargo.toml b/tests/simulator/Cargo.toml index 189c24f63..65b661470 100644 --- a/tests/simulator/Cargo.toml +++ b/tests/simulator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "simulator" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/tests/simulator/src/cli.rs b/tests/simulator/src/cli.rs index dbc6bca07..444e67010 100644 --- a/tests/simulator/src/cli.rs +++ b/tests/simulator/src/cli.rs @@ -6,7 +6,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .author("Sigma Prime ") .about("Options for interacting with simulator") .subcommand( - SubCommand::with_name("beacon-chain-sim") + SubCommand::with_name("eth1-sim") .about( "Lighthouse Beacon Chain Simulator creates `n` beacon node and validator clients, \ each with `v` validators. A deposit contract is deployed at the start of the \ @@ -22,17 +22,48 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("n") .long("nodes") .takes_value(true) - .help("Number of beacon nodes (default 4)")) + .default_value("4") + .help("Number of beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") .takes_value(true) - .help("Number of validators (default 20)")) + .default_value("20") + .help("Number of validators")) .arg(Arg::with_name("speed_up_factor") .short("s") .long("speed_up_factor") .takes_value(true) - .help("Speed up factor (default 4)")) + .default_value("4") + .help("Speed up factor")) + .arg(Arg::with_name("end_after_checks") + .short("e") + .long("end_after_checks") + .takes_value(false) + .help("End after checks (default true)")) + ) + .subcommand( + SubCommand::with_name("no-eth1-sim") + .about("Runs a simulator that bypasses the eth1 chain. Useful for faster testing of + components that don't rely upon eth1") + .arg(Arg::with_name("nodes") + .short("n") + .long("nodes") + .takes_value(true) + .default_value("4") + .help("Number of beacon nodes")) + .arg(Arg::with_name("validators_per_node") + .short("v") + .long("validators_per_node") + .takes_value(true) + .default_value("20") + .help("Number of validators")) + .arg(Arg::with_name("speed_up_factor") + .short("s") + .long("speed_up_factor") + .takes_value(true) + .default_value("4") + .help("Speed up factor")) .arg(Arg::with_name("end_after_checks") .short("e") .long("end_after_checks") @@ -47,27 +78,31 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .short("s") .long("speedup") .takes_value(true) - .help("Speed up factor for eth1 blocks and slot production (default 15)"), + .default_value("15") + .help("Speed up factor for eth1 blocks and slot production"), ) .arg( Arg::with_name("initial_delay") .short("i") .long("initial_delay") .takes_value(true) - .help("Epoch delay for new beacon node to start syncing (default 50)"), + .default_value("5") + .help("Epoch delay for new beacon node to start syncing"), ) .arg( Arg::with_name("sync_timeout") .long("sync_timeout") .takes_value(true) - .help("Number of epochs after which newly added beacon nodes must be synced (default 10)"), + .default_value("10") + .help("Number of epochs after which newly added beacon nodes must be synced"), ) .arg( Arg::with_name("strategy") .long("strategy") .takes_value(true) + .default_value("all") .possible_values(&["one-node", "two-nodes", "mixed", "all"]) - .help("Sync verification strategy to run. (default all)"), + .help("Sync verification strategy to run."), ), ) } diff --git a/tests/simulator/src/eth1_sim.rs b/tests/simulator/src/eth1_sim.rs new file mode 100644 index 000000000..3413701c5 --- /dev/null +++ b/tests/simulator/src/eth1_sim.rs @@ -0,0 +1,208 @@ +use crate::{checks, LocalNetwork, E}; +use clap::ArgMatches; +use eth1_test_rig::GanacheEth1Instance; +use futures::{future, stream, Future, Stream}; +use node_test_rig::{ + environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, +}; +use std::net::{IpAddr, Ipv4Addr}; +use std::time::{Duration, Instant}; +use tokio::timer::Interval; + +pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { + let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); + let validators_per_node = value_t!(matches, "validators_per_node", usize) + .expect("missing validators_per_node default"); + let speed_up_factor = + value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); + let mut end_after_checks = true; + if matches.is_present("end_after_checks") { + end_after_checks = false; + } + + println!("Beacon Chain Simulator:"); + println!(" nodes:{}", node_count); + println!(" validators_per_node:{}", validators_per_node); + println!(" end_after_checks:{}", end_after_checks); + + let log_level = "debug"; + let log_format = None; + + let mut env = EnvironmentBuilder::minimal() + .async_logger(log_level, log_format)? + .multi_threaded_tokio_runtime()? + .build()?; + + let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); + + let spec = &mut env.eth2_config.spec; + + spec.milliseconds_per_slot /= speed_up_factor; + spec.eth1_follow_distance = 16; + spec.min_genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 64; + spec.seconds_per_eth1_block = 1; + + let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); + let initial_validator_count = spec.min_genesis_active_validator_count as usize; + let total_validator_count = validators_per_node * node_count; + let deposit_amount = env.eth2_config.spec.max_effective_balance; + + let context = env.core_context(); + let executor = context.executor.clone(); + + let future = GanacheEth1Instance::new() + /* + * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit + * validators. + */ + .map(move |ganache_eth1_instance| { + let deposit_contract = ganache_eth1_instance.deposit_contract; + let ganache = ganache_eth1_instance.ganache; + let eth1_endpoint = ganache.endpoint(); + let deposit_contract_address = deposit_contract.address(); + + // Start a timer that produces eth1 blocks on an interval. + executor.spawn( + Interval::new(Instant::now(), eth1_block_time) + .map_err(|_| eprintln!("Eth1 block timer failed")) + .for_each(move |_| ganache.evm_mine().map_err(|_| ())) + .map_err(|_| eprintln!("Eth1 evm_mine failed")) + .map(|_| ()), + ); + + // Submit deposits to the deposit contract. + executor.spawn( + stream::unfold(0..total_validator_count, move |mut iter| { + iter.next().map(|i| { + println!("Submitting deposit for validator {}...", i); + deposit_contract + .deposit_deterministic_async::(i, deposit_amount) + .map(|_| ((), iter)) + }) + }) + .collect() + .map(|_| ()) + .map_err(|e| eprintln!("Error submitting deposit: {}", e)), + ); + + let mut beacon_config = testing_client_config(); + + beacon_config.genesis = ClientGenesis::DepositContract; + beacon_config.eth1.endpoint = eth1_endpoint; + beacon_config.eth1.deposit_contract_address = deposit_contract_address; + beacon_config.eth1.deposit_contract_deploy_block = 0; + beacon_config.eth1.lowest_cached_block_number = 0; + beacon_config.eth1.follow_distance = 1; + beacon_config.dummy_eth1_backend = false; + beacon_config.sync_eth1_chain = true; + + beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + + beacon_config + }) + /* + * Create a new `LocalNetwork` with one beacon node. + */ + .and_then(move |beacon_config| { + LocalNetwork::new(context, beacon_config.clone()) + .map(|network| (network, beacon_config)) + }) + /* + * One by one, add beacon nodes to the network. + */ + .and_then(move |(network, beacon_config)| { + let network_1 = network.clone(); + + stream::unfold(0..node_count - 1, move |mut iter| { + iter.next().map(|_| { + network_1 + .add_beacon_node(beacon_config.clone()) + .map(|()| ((), iter)) + }) + }) + .collect() + .map(|_| network) + }) + /* + * One by one, add validator clients to the network. Each validator client is attached to + * a single corresponding beacon node. + */ + .and_then(move |network| { + let network_1 = network.clone(); + + // Note: presently the validator client future will only resolve once genesis time + // occurs. This is great for this scenario, but likely to change in the future. + // + // If the validator client future behaviour changes, we would need to add a new future + // that delays until genesis. Otherwise, all of the checks that start in the next + // future will start too early. + + stream::unfold(0..node_count, move |mut iter| { + iter.next().map(|i| { + let indices = (i * validators_per_node..(i + 1) * validators_per_node) + .collect::>(); + + network_1 + .add_validator_client(ValidatorConfig::default(), i, indices) + .map(|()| ((), iter)) + }) + }) + .collect() + .map(|_| network) + }) + /* + * Start the processes that will run checks on the network as it runs. + */ + .and_then(move |network| { + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + let final_future: Box + Send> = + if end_after_checks { + Box::new(future::ok(()).map_err(|()| "".to_string())) + } else { + Box::new(future::empty().map_err(|()| "".to_string())) + }; + + future::ok(()) + // Check that the chain finalizes at the first given opportunity. + .join(checks::verify_first_finalization( + network.clone(), + slot_duration, + )) + // Check that the chain starts with the expected validator count. + .join(checks::verify_initial_validator_count( + network.clone(), + slot_duration, + initial_validator_count, + )) + // Check that validators greater than `spec.min_genesis_active_validator_count` are + // onboarded at the first possible opportunity. + .join(checks::verify_validator_onboarding( + network.clone(), + slot_duration, + total_validator_count, + )) + // End now or run forever, depending on the `end_after_checks` flag. + .join(final_future) + .map(|_| network) + }) + /* + * End the simulation by dropping the network. This will kill all running beacon nodes and + * validator clients. + */ + .map(|network| { + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); + + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network) + }); + + env.runtime().block_on(future) +} diff --git a/tests/simulator/src/local_network.rs b/tests/simulator/src/local_network.rs index ad01eaf25..22ffc4d36 100644 --- a/tests/simulator/src/local_network.rs +++ b/tests/simulator/src/local_network.rs @@ -46,9 +46,10 @@ impl LocalNetwork { context: RuntimeContext, mut beacon_config: ClientConfig, ) -> impl Future { - // Fix bootnode ports beacon_config.network.discovery_port = BOOTNODE_PORT; beacon_config.network.libp2p_port = BOOTNODE_PORT; + beacon_config.network.enr_udp_port = Some(BOOTNODE_PORT); + beacon_config.network.enr_tcp_port = Some(BOOTNODE_PORT); LocalBeaconNode::production(context.service_context("boot_node".into()), beacon_config).map( |beacon_node| Self { inner: Arc::new(Inner { diff --git a/tests/simulator/src/main.rs b/tests/simulator/src/main.rs index 21946641e..742f11087 100644 --- a/tests/simulator/src/main.rs +++ b/tests/simulator/src/main.rs @@ -18,21 +18,14 @@ extern crate clap; mod checks; mod cli; +mod eth1_sim; mod local_network; +mod no_eth1_sim; mod sync_sim; -use clap::ArgMatches; use cli::cli_app; use env_logger::{Builder, Env}; -use eth1_test_rig::GanacheEth1Instance; -use futures::{future, stream, Future, Stream}; use local_network::LocalNetwork; -use node_test_rig::{ - environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, -}; -use std::time::{Duration, Instant}; -use sync_sim::*; -use tokio::timer::Interval; use types::MinimalEthSpec; pub type E = MinimalEthSpec; @@ -43,14 +36,21 @@ fn main() { let matches = cli_app().get_matches(); match matches.subcommand() { - ("beacon-chain-sim", Some(matches)) => match run_beacon_chain_sim(matches) { + ("eth1-sim", Some(matches)) => match eth1_sim::run_eth1_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); std::process::exit(1) } }, - ("syncing-sim", Some(matches)) => match run_syncing_sim(matches) { + ("no-eth1-sim", Some(matches)) => match no_eth1_sim::run_no_eth1_sim(matches) { + Ok(()) => println!("Simulation exited successfully"), + Err(e) => { + eprintln!("Simulation exited with error: {}", e); + std::process::exit(1) + } + }, + ("syncing-sim", Some(matches)) => match sync_sim::run_syncing_sim(matches) { Ok(()) => println!("Simulation exited successfully"), Err(e) => { eprintln!("Simulation exited with error: {}", e); @@ -63,319 +63,3 @@ fn main() { } } } - -fn run_beacon_chain_sim(matches: &ArgMatches) -> Result<(), String> { - let nodes = value_t!(matches, "nodes", usize).unwrap_or(4); - let validators_per_node = value_t!(matches, "validators_per_node", usize).unwrap_or(20); - let speed_up_factor = value_t!(matches, "nodes", u64).unwrap_or(4); - let mut end_after_checks = true; - if matches.is_present("end_after_checks") { - end_after_checks = false; - } - - println!("Beacon Chain Simulator:"); - println!(" nodes:{}", nodes); - println!(" validators_per_node:{}", validators_per_node); - println!(" end_after_checks:{}", end_after_checks); - - let log_level = "debug"; - let log_format = None; - - beacon_chain_sim( - nodes, - validators_per_node, - speed_up_factor, - log_level, - log_format, - end_after_checks, - ) -} - -fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { - let initial_delay = value_t!(matches, "initial_delay", u64).unwrap_or(50); - let sync_timeout = value_t!(matches, "sync_timeout", u64).unwrap_or(10); - let speed_up_factor = value_t!(matches, "speedup", u64).unwrap_or(15); - let strategy = value_t!(matches, "strategy", String).unwrap_or("all".into()); - - println!("Syncing Simulator:"); - println!(" initial delay:{}", initial_delay); - println!(" sync timeout:{}", sync_timeout); - println!(" speed up factor:{}", speed_up_factor); - println!(" strategy:{}", strategy); - - let log_level = "debug"; - let log_format = None; - - syncing_sim( - speed_up_factor, - initial_delay, - sync_timeout, - strategy, - log_level, - log_format, - ) -} - -fn syncing_sim( - speed_up_factor: u64, - initial_delay: u64, - sync_timeout: u64, - strategy: String, - log_level: &str, - log_format: Option<&str>, -) -> Result<(), String> { - let mut env = EnvironmentBuilder::minimal() - .async_logger(log_level, log_format)? - .multi_threaded_tokio_runtime()? - .build()?; - - let spec = &mut env.eth2_config.spec; - let end_after_checks = true; - - spec.milliseconds_per_slot = spec.milliseconds_per_slot / speed_up_factor; - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 16; - - let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); - - let context = env.core_context(); - let beacon_config = testing_client_config(); - let num_validators = 8; - let future = LocalNetwork::new(context, beacon_config.clone()) - /* - * Add a validator client which handles all validators from the genesis state. - */ - .and_then(move |network| { - network - .add_validator_client(ValidatorConfig::default(), 0, (0..num_validators).collect()) - .map(|_| network) - }) - /* - * Start the processes that will run checks on the network as it runs. - */ - .and_then(move |network| { - // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. - let final_future: Box + Send> = - if end_after_checks { - Box::new(future::ok(()).map_err(|()| "".to_string())) - } else { - Box::new(future::empty().map_err(|()| "".to_string())) - }; - - future::ok(()) - // Check all syncing strategies one after other. - .join(pick_strategy( - &strategy, - network.clone(), - beacon_config.clone(), - slot_duration, - initial_delay, - sync_timeout, - )) - .join(final_future) - .map(|_| network) - }) - /* - * End the simulation by dropping the network. This will kill all running beacon nodes and - * validator clients. - */ - .map(|network| { - println!( - "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), - network.validator_client_count() - ); - - // Be explicit about dropping the network, as this kills all the nodes. This ensures - // all the checks have adequate time to pass. - drop(network) - }); - - env.runtime().block_on(future) -} - -fn beacon_chain_sim( - node_count: usize, - validators_per_node: usize, - speed_up_factor: u64, - log_level: &str, - log_format: Option<&str>, - end_after_checks: bool, -) -> Result<(), String> { - let mut env = EnvironmentBuilder::minimal() - .async_logger(log_level, log_format)? - .multi_threaded_tokio_runtime()? - .build()?; - - let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); - - let spec = &mut env.eth2_config.spec; - - spec.milliseconds_per_slot /= speed_up_factor; - spec.eth1_follow_distance = 16; - spec.min_genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 64; - spec.seconds_per_eth1_block = 1; - - let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); - let initial_validator_count = spec.min_genesis_active_validator_count as usize; - let total_validator_count = validators_per_node * node_count; - let deposit_amount = env.eth2_config.spec.max_effective_balance; - - let context = env.core_context(); - let executor = context.executor.clone(); - - let future = GanacheEth1Instance::new() - /* - * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit - * validators. - */ - .map(move |ganache_eth1_instance| { - let deposit_contract = ganache_eth1_instance.deposit_contract; - let ganache = ganache_eth1_instance.ganache; - let eth1_endpoint = ganache.endpoint(); - let deposit_contract_address = deposit_contract.address(); - - // Start a timer that produces eth1 blocks on an interval. - executor.spawn( - Interval::new(Instant::now(), eth1_block_time) - .map_err(|_| eprintln!("Eth1 block timer failed")) - .for_each(move |_| ganache.evm_mine().map_err(|_| ())) - .map_err(|_| eprintln!("Eth1 evm_mine failed")) - .map(|_| ()), - ); - - // Submit deposits to the deposit contract. - executor.spawn( - stream::unfold(0..total_validator_count, move |mut iter| { - iter.next().map(|i| { - println!("Submitting deposit for validator {}...", i); - deposit_contract - .deposit_deterministic_async::(i, deposit_amount) - .map(|_| ((), iter)) - }) - }) - .collect() - .map(|_| ()) - .map_err(|e| eprintln!("Error submitting deposit: {}", e)), - ); - - let mut beacon_config = testing_client_config(); - - beacon_config.genesis = ClientGenesis::DepositContract; - beacon_config.eth1.endpoint = eth1_endpoint; - beacon_config.eth1.deposit_contract_address = deposit_contract_address; - beacon_config.eth1.deposit_contract_deploy_block = 0; - beacon_config.eth1.lowest_cached_block_number = 0; - beacon_config.eth1.follow_distance = 1; - beacon_config.dummy_eth1_backend = false; - beacon_config.sync_eth1_chain = true; - - beacon_config - }) - /* - * Create a new `LocalNetwork` with one beacon node. - */ - .and_then(move |beacon_config| { - LocalNetwork::new(context, beacon_config.clone()) - .map(|network| (network, beacon_config)) - }) - /* - * One by one, add beacon nodes to the network. - */ - .and_then(move |(network, beacon_config)| { - let network_1 = network.clone(); - - stream::unfold(0..node_count - 1, move |mut iter| { - iter.next().map(|_| { - network_1 - .add_beacon_node(beacon_config.clone()) - .map(|()| ((), iter)) - }) - }) - .collect() - .map(|_| network) - }) - /* - * One by one, add validator clients to the network. Each validator client is attached to - * a single corresponding beacon node. - */ - .and_then(move |network| { - let network_1 = network.clone(); - - // Note: presently the validator client future will only resolve once genesis time - // occurs. This is great for this scenario, but likely to change in the future. - // - // If the validator client future behaviour changes, we would need to add a new future - // that delays until genesis. Otherwise, all of the checks that start in the next - // future will start too early. - - stream::unfold(0..node_count, move |mut iter| { - iter.next().map(|i| { - let indices = (i * validators_per_node..(i + 1) * validators_per_node) - .collect::>(); - - network_1 - .add_validator_client(ValidatorConfig::default(), i, indices) - .map(|()| ((), iter)) - }) - }) - .collect() - .map(|_| network) - }) - /* - * Start the processes that will run checks on the network as it runs. - */ - .and_then(move |network| { - // The `final_future` either completes immediately or never completes, depending on the value - // of `end_after_checks`. - let final_future: Box + Send> = - if end_after_checks { - Box::new(future::ok(()).map_err(|()| "".to_string())) - } else { - Box::new(future::empty().map_err(|()| "".to_string())) - }; - - future::ok(()) - // Check that the chain finalizes at the first given opportunity. - .join(checks::verify_first_finalization( - network.clone(), - slot_duration, - )) - // Check that the chain starts with the expected validator count. - .join(checks::verify_initial_validator_count( - network.clone(), - slot_duration, - initial_validator_count, - )) - // Check that validators greater than `spec.min_genesis_active_validator_count` are - // onboarded at the first possible opportunity. - .join(checks::verify_validator_onboarding( - network.clone(), - slot_duration, - total_validator_count, - )) - // End now or run forever, depending on the `end_after_checks` flag. - .join(final_future) - .map(|_| network) - }) - /* - * End the simulation by dropping the network. This will kill all running beacon nodes and - * validator clients. - */ - .map(|network| { - println!( - "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), - network.validator_client_count() - ); - - // Be explicit about dropping the network, as this kills all the nodes. This ensures - // all the checks have adequate time to pass. - drop(network) - }); - - env.runtime().block_on(future) -} diff --git a/tests/simulator/src/no_eth1_sim.rs b/tests/simulator/src/no_eth1_sim.rs new file mode 100644 index 000000000..b4d233909 --- /dev/null +++ b/tests/simulator/src/no_eth1_sim.rs @@ -0,0 +1,150 @@ +use crate::{checks, LocalNetwork}; +use clap::ArgMatches; +use futures::{future, stream, Future, Stream}; +use node_test_rig::{ + environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, +}; +use std::net::{IpAddr, Ipv4Addr}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { + let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); + let validators_per_node = value_t!(matches, "validators_per_node", usize) + .expect("missing validators_per_node default"); + let speed_up_factor = + value_t!(matches, "speed_up_factor", u64).expect("missing speed_up_factor default"); + let mut end_after_checks = true; + if matches.is_present("end_after_checks") { + end_after_checks = false; + } + + println!("Beacon Chain Simulator:"); + println!(" nodes:{}", node_count); + println!(" validators_per_node:{}", validators_per_node); + println!(" end_after_checks:{}", end_after_checks); + + let log_level = "debug"; + let log_format = None; + + let mut env = EnvironmentBuilder::mainnet() + .async_logger(log_level, log_format)? + .multi_threaded_tokio_runtime()? + .build()?; + + let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); + + let spec = &mut env.eth2_config.spec; + + spec.milliseconds_per_slot /= speed_up_factor; + spec.eth1_follow_distance = 16; + spec.min_genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 64; + spec.seconds_per_eth1_block = 1; + + let genesis_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|_| "should get system time")? + + Duration::from_secs(5); + + let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); + let total_validator_count = validators_per_node * node_count; + + let context = env.core_context(); + + let mut beacon_config = testing_client_config(); + + beacon_config.genesis = ClientGenesis::Interop { + validator_count: total_validator_count, + genesis_time: genesis_time.as_secs(), + }; + beacon_config.dummy_eth1_backend = true; + beacon_config.sync_eth1_chain = true; + + beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + + let future = LocalNetwork::new(context, beacon_config.clone()) + /* + * One by one, add beacon nodes to the network. + */ + .and_then(move |network| { + let network_1 = network.clone(); + + stream::unfold(0..node_count - 1, move |mut iter| { + iter.next().map(|_| { + network_1 + .add_beacon_node(beacon_config.clone()) + .map(|()| ((), iter)) + }) + }) + .collect() + .map(|_| network) + }) + /* + * One by one, add validator clients to the network. Each validator client is attached to + * a single corresponding beacon node. + */ + .and_then(move |network| { + let network_1 = network.clone(); + + // Note: presently the validator client future will only resolve once genesis time + // occurs. This is great for this scenario, but likely to change in the future. + // + // If the validator client future behaviour changes, we would need to add a new future + // that delays until genesis. Otherwise, all of the checks that start in the next + // future will start too early. + + stream::unfold(0..node_count, move |mut iter| { + iter.next().map(|i| { + let indices = (i * validators_per_node..(i + 1) * validators_per_node) + .collect::>(); + + network_1 + .add_validator_client(ValidatorConfig::default(), i, indices) + .map(|()| ((), iter)) + }) + }) + .collect() + .map(|_| network) + }) + /* + * Start the processes that will run checks on the network as it runs. + */ + .and_then(move |network| { + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + let final_future: Box + Send> = + if end_after_checks { + Box::new(future::ok(()).map_err(|()| "".to_string())) + } else { + Box::new(future::empty().map_err(|()| "".to_string())) + }; + + future::ok(()) + // Check that the chain finalizes at the first given opportunity. + .join(checks::verify_first_finalization( + network.clone(), + slot_duration, + )) + // End now or run forever, depending on the `end_after_checks` flag. + .join(final_future) + .map(|_| network) + }) + /* + * End the simulation by dropping the network. This will kill all running beacon nodes and + * validator clients. + */ + .map(|network| { + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); + + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network) + }); + + env.runtime().block_on(future) +} diff --git a/tests/simulator/src/sync_sim.rs b/tests/simulator/src/sync_sim.rs index bee0b4ba1..16a62fc32 100644 --- a/tests/simulator/src/sync_sim.rs +++ b/tests/simulator/src/sync_sim.rs @@ -1,12 +1,137 @@ use crate::checks::{epoch_delay, verify_all_finalized_at}; use crate::local_network::LocalNetwork; -use futures::stream; -use futures::{Future, IntoFuture, Stream}; +use clap::ArgMatches; +use futures::{future, stream, Future, IntoFuture, Stream}; use node_test_rig::ClientConfig; -use std::time::Duration; +use node_test_rig::{ + environment::EnvironmentBuilder, testing_client_config, ClientGenesis, ValidatorConfig, +}; +use std::net::{IpAddr, Ipv4Addr}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::timer::Interval; use types::{Epoch, EthSpec}; +pub fn run_syncing_sim(matches: &ArgMatches) -> Result<(), String> { + let initial_delay = value_t!(matches, "initial_delay", u64).unwrap(); + let sync_timeout = value_t!(matches, "sync_timeout", u64).unwrap(); + let speed_up_factor = value_t!(matches, "speedup", u64).unwrap(); + let strategy = value_t!(matches, "strategy", String).unwrap(); + + println!("Syncing Simulator:"); + println!(" initial_delay:{}", initial_delay); + println!(" sync timeout: {}", sync_timeout); + println!(" speed up factor:{}", speed_up_factor); + println!(" strategy:{}", strategy); + + let log_level = "debug"; + let log_format = None; + + syncing_sim( + speed_up_factor, + initial_delay, + sync_timeout, + strategy, + log_level, + log_format, + ) +} + +fn syncing_sim( + speed_up_factor: u64, + initial_delay: u64, + sync_timeout: u64, + strategy: String, + log_level: &str, + log_format: Option<&str>, +) -> Result<(), String> { + let mut env = EnvironmentBuilder::minimal() + .async_logger(log_level, log_format)? + .multi_threaded_tokio_runtime()? + .build()?; + + let spec = &mut env.eth2_config.spec; + let end_after_checks = true; + let eth1_block_time = Duration::from_millis(15_000 / speed_up_factor); + + spec.milliseconds_per_slot /= speed_up_factor; + spec.eth1_follow_distance = 16; + spec.min_genesis_delay = eth1_block_time.as_secs() * spec.eth1_follow_distance * 2; + spec.min_genesis_time = 0; + spec.min_genesis_active_validator_count = 64; + spec.seconds_per_eth1_block = 1; + + let num_validators = 8; + let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); + let context = env.core_context(); + let mut beacon_config = testing_client_config(); + + let genesis_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|_| "should get system time")? + + Duration::from_secs(5); + beacon_config.genesis = ClientGenesis::Interop { + validator_count: num_validators, + genesis_time: genesis_time.as_secs(), + }; + beacon_config.dummy_eth1_backend = true; + beacon_config.sync_eth1_chain = true; + + beacon_config.network.enr_address = Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + + let future = LocalNetwork::new(context, beacon_config.clone()) + /* + * Add a validator client which handles all validators from the genesis state. + */ + .and_then(move |network| { + network + .add_validator_client(ValidatorConfig::default(), 0, (0..num_validators).collect()) + .map(|_| network) + }) + /* + * Start the processes that will run checks on the network as it runs. + */ + .and_then(move |network| { + // The `final_future` either completes immediately or never completes, depending on the value + // of `end_after_checks`. + let final_future: Box + Send> = + if end_after_checks { + Box::new(future::ok(()).map_err(|()| "".to_string())) + } else { + Box::new(future::empty().map_err(|()| "".to_string())) + }; + + future::ok(()) + // Check all syncing strategies one after other. + .join(pick_strategy( + &strategy, + network.clone(), + beacon_config.clone(), + slot_duration, + initial_delay, + sync_timeout, + )) + .join(final_future) + .map(|_| network) + }) + /* + * End the simulation by dropping the network. This will kill all running beacon nodes and + * validator clients. + */ + .map(|network| { + println!( + "Simulation complete. Finished with {} beacon nodes and {} validator clients", + network.beacon_node_count(), + network.validator_client_count() + ); + + // Be explicit about dropping the network, as this kills all the nodes. This ensures + // all the checks have adequate time to pass. + drop(network) + }); + + env.runtime().block_on(future) +} + pub fn pick_strategy( strategy: &str, network: LocalNetwork, @@ -228,27 +353,24 @@ pub fn verify_syncing( pub fn check_still_syncing( network: &LocalNetwork, ) -> impl Future { - let net = network.clone(); network .remote_nodes() .into_future() - // get all head epochs + // get syncing status of nodes .and_then(|remote_nodes| { stream::unfold(remote_nodes.into_iter(), |mut iter| { iter.next().map(|remote_node| { remote_node .http - .beacon() - .get_head() - .map(|head| head.finalized_slot.epoch(E::slots_per_epoch())) - .map(|epoch| (epoch, iter)) - .map_err(|e| format!("Get head via http failed: {:?}", e)) + .node() + .syncing_status() + .map(|status| status.is_syncing) + .map(|status| (status, iter)) + .map_err(|e| format!("Get syncing status via http failed: {:?}", e)) }) }) .collect() }) - // find current epoch - .and_then(move |epochs| net.bootnode_epoch().map(|epoch| (epochs, epoch))) - .and_then(move |(epochs, epoch)| Ok(epochs.iter().any(|head_epoch| *head_epoch != epoch))) + .and_then(move |status| Ok(status.iter().any(|is_syncing| *is_syncing))) .map_err(|e| format!("Failed syncing check: {:?}", e)) } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 0b2cb8f32..68dfc906d 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "validator_client" -version = "0.1.0" +version = "0.2.0" authors = ["Paul Hauner ", "Age Manning ", "Luke Anderson "] edition = "2018" @@ -15,6 +15,7 @@ tree_hash = "0.1.0" clap = "2.33.0" eth2_interop_keypairs = { path = "../eth2/utils/eth2_interop_keypairs" } slot_clock = { path = "../eth2/utils/slot_clock" } +rest_types = { path = "../eth2/utils/rest_types" } types = { path = "../eth2/types" } serde = "1.0.102" serde_derive = "1.0.102" @@ -40,3 +41,4 @@ bls = { path = "../eth2/utils/bls" } remote_beacon_node = { path = "../eth2/utils/remote_beacon_node" } tempdir = "0.3" rayon = "1.2.0" +web3 = "0.10.0" diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index cf3507cd7..c1842519b 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -1,19 +1,20 @@ use crate::{ - duties_service::{DutiesService, ValidatorDuty}, + duties_service::{DutiesService, DutyAndState}, validator_store::ValidatorStore, }; use environment::RuntimeContext; use exit_future::Signal; -use futures::{Future, Stream}; +use futures::{future, Future, Stream}; use remote_beacon_node::{PublishStatus, RemoteBeaconNode}; -use slog::{crit, info, trace}; +use rest_types::ValidatorSubscription; +use slog::{crit, debug, info, trace}; use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; use std::time::{Duration, Instant}; -use tokio::timer::Interval; -use types::{ChainSpec, CommitteeIndex, EthSpec, Slot}; +use tokio::timer::{Delay, Interval}; +use types::{Attestation, ChainSpec, CommitteeIndex, EthSpec, Slot}; /// Builds an `AttestationService`. pub struct AttestationServiceBuilder { @@ -123,13 +124,13 @@ impl AttestationService { let context = &self.context; let log = context.log.clone(); + let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); let duration_to_next_slot = self .slot_clock .duration_to_next_slot() .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; let interval = { - let slot_duration = Duration::from_millis(spec.milliseconds_per_slot); Interval::new( Instant::now() + duration_to_next_slot + slot_duration / 3, slot_duration, @@ -154,7 +155,7 @@ impl AttestationService { } }) .for_each(move |_| { - if let Err(e) = service.spawn_attestation_tasks() { + if let Err(e) = service.spawn_attestation_tasks(slot_duration) { crit!( log_2, "Failed to spawn attestation tasks"; @@ -178,125 +179,143 @@ impl AttestationService { /// For each each required attestation, spawn a new task that downloads, signs and uploads the /// attestation to the beacon node. - fn spawn_attestation_tasks(&self) -> Result<(), String> { + fn spawn_attestation_tasks(&self, slot_duration: Duration) -> Result<(), String> { let service = self.clone(); let slot = service .slot_clock .now() .ok_or_else(|| "Failed to read slot clock".to_string())?; + let duration_to_next_slot = service + .slot_clock + .duration_to_next_slot() + .ok_or_else(|| "Unable to determine duration to next slot".to_string())?; - let mut committee_indices: HashMap> = HashMap::new(); + // If a validator needs to publish an aggregate attestation, they must do so at 2/3 + // through the slot. This delay triggers at this time + let aggregate_production_instant = Instant::now() + + duration_to_next_slot + .checked_sub(slot_duration / 3) + .unwrap_or_else(|| Duration::from_secs(0)); + let epoch = slot.epoch(E::slots_per_epoch()); + // Check if any attestation subscriptions are required. If there a new attestation duties for + // this epoch or the next, send them to the beacon node + let mut duties_to_subscribe = service.duties_service.unsubscribed_epoch_duties(&epoch); + duties_to_subscribe.append( + &mut service + .duties_service + .unsubscribed_epoch_duties(&(epoch + 1)), + ); + + // spawn a task to subscribe all the duties service + .context + .executor + .spawn(self.clone().send_subscriptions(duties_to_subscribe)); + + let duties_by_committee_index: HashMap> = service .duties_service .attesters(slot) .into_iter() - .for_each(|duty| { - if let Some(committee_index) = duty.attestation_committee_index { - let validator_duties = committee_indices - .entry(committee_index) - .or_insert_with(|| vec![]); + .fold(HashMap::new(), |mut map, duty_and_state| { + if let Some(committee_index) = duty_and_state.duty.attestation_committee_index { + let validator_duties = map.entry(committee_index).or_insert_with(|| vec![]); - validator_duties.push(duty); + validator_duties.push(duty_and_state); } + + map }); - committee_indices + // For each committee index for this slot: + // + // - Create and publish an `Attestation` for all required validators. + // - Create and publish `SignedAggregateAndProof` for all aggregating validators. + duties_by_committee_index .into_iter() .for_each(|(committee_index, validator_duties)| { // Spawn a separate task for each attestation. - service.context.executor.spawn(self.clone().do_attestation( - slot, - committee_index, - validator_duties, - )); + service + .context + .executor + .spawn(self.clone().publish_attestations_and_aggregates( + slot, + committee_index, + validator_duties, + aggregate_production_instant, + )); }); Ok(()) } - /// For a given `committee_index`, download the attestation, have it signed by all validators - /// in `validator_duties` then upload it. - fn do_attestation( - &self, - slot: Slot, - committee_index: CommitteeIndex, - validator_duties: Vec, - ) -> impl Future { + /// Subscribes any required validators to the beacon node for a particular slot. + /// + /// This informs the beacon node that the validator has a duty on a particular + /// slot allowing the beacon node to connect to the required subnet and determine + /// if attestations need to be aggregated. + fn send_subscriptions(&self, duties: Vec) -> impl Future { let service_1 = self.clone(); - let service_2 = self.clone(); + let num_duties = duties.len(); + let log_1 = self.context.log.clone(); let log_2 = self.context.log.clone(); + let (validator_subscriptions, successful_duties): (Vec<_>, Vec<_>) = duties + .into_iter() + .filter_map(|duty| { + let (slot, attestation_committee_index, _, validator_index) = + duty.attestation_duties()?; + let selection_proof = self + .validator_store + .produce_selection_proof(duty.validator_pubkey(), slot)?; + let modulo = duty.duty.aggregator_modulo?; + let subscription = ValidatorSubscription { + validator_index, + attestation_committee_index, + slot, + is_aggregator: selection_proof + .is_aggregator(modulo) + .map_err(|e| crit!(log_1, "Unable to determine aggregator: {:?}", e)) + .ok()?, + }; + + Some((subscription, (duty, selection_proof))) + }) + .unzip(); + + let num_failed_duties = num_duties - successful_duties.len(); + self.beacon_node .http .validator() - .produce_attestation(slot, committee_index) - .map_err(|e| format!("Failed to produce attestation: {:?}", e)) - .map(move |attestation| { - validator_duties - .iter() - .fold(attestation, |mut attestation, duty| { - let log = service_1.context.log.clone(); - - if let Some(( - duty_slot, - duty_committee_index, - validator_committee_position, - )) = attestation_duties(duty) - { - if duty_slot == slot && duty_committee_index == committee_index { - if service_1 - .validator_store - .sign_attestation( - &duty.validator_pubkey, - validator_committee_position, - &mut attestation, - ) - .is_none() - { - crit!(log, "Failed to sign attestation"); - } - } else { - crit!(log, "Inconsistent validator duties during signing"); - } - } else { - crit!(log, "Missing validator duties when signing"); - } - - attestation - }) - }) - .and_then(move |attestation| { - service_2 - .beacon_node - .http - .validator() - .publish_attestation(attestation.clone()) - .map(|publish_status| (attestation, publish_status)) - .map_err(|e| format!("Failed to publish attestation: {:?}", e)) - }) - .map(move |(attestation, publish_status)| match publish_status { + .subscribe(validator_subscriptions) + .map_err(|e| format!("Failed to subscribe validators: {:?}", e)) + .map(move |publish_status| match publish_status { PublishStatus::Valid => info!( log_1, - "Successfully published attestation"; - "signatures" => attestation.aggregation_bits.num_set_bits(), - "head_block" => format!("{}", attestation.data.beacon_block_root), - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), + "Successfully subscribed validators"; + "validators" => num_duties, + "failed_validators" => num_failed_duties, ), PublishStatus::Invalid(msg) => crit!( log_1, - "Published attestation was invalid"; + "Validator Subscription was invalid"; "message" => msg, - "committee_index" => attestation.data.index, - "slot" => attestation.data.slot.as_u64(), ), PublishStatus::Unknown => { crit!(log_1, "Unknown condition when publishing attestation") } }) + .and_then(move |_| { + for (duty, selection_proof) in successful_duties { + service_1 + .duties_service + .subscribe_duty(&duty.duty, selection_proof); + } + Ok(()) + }) .map_err(move |e| { crit!( log_2, @@ -305,12 +324,378 @@ impl AttestationService { ) }) } + + /// Performs the first step of the attesting process: downloading `Attestation` objects, + /// signing them and returning them to the validator. + /// + /// https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md#attesting + /// + /// ## Detail + /// + /// The given `validator_duties` should already be filtered to only contain those that match + /// `slot` and `committee_index`. Critical errors will be logged if this is not the case. + fn publish_attestations_and_aggregates( + &self, + slot: Slot, + committee_index: CommitteeIndex, + validator_duties: Vec, + aggregate_production_instant: Instant, + ) -> Box + Send> { + // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have + // any validators for the given `slot` and `committee_index`. + if validator_duties.is_empty() { + return Box::new(future::ok(())); + } + + let service_1 = self.clone(); + let log_1 = self.context.log.clone(); + let validator_duties_1 = Arc::new(validator_duties); + let validator_duties_2 = validator_duties_1.clone(); + + Box::new( + // Step 1. + // + // Download, sign and publish an `Attestation` for each validator. + self.produce_and_publish_attestations(slot, committee_index, validator_duties_1) + .and_then::<_, Box + Send>>( + move |attestation_opt| { + if let Some(attestation) = attestation_opt { + Box::new( + // Step 2. (Only if step 1 produced an attestation) + // + // First, wait until the `aggregation_production_instant` (2/3rds + // of the way though the slot). As verified in the + // `delay_triggers_when_in_the_past` test, this code will still run + // even if the instant has already elapsed. + // + // Then download, sign and publish a `SignedAggregateAndProof` for each + // validator that is elected to aggregate for this `slot` and + // `committee_index`. + Delay::new(aggregate_production_instant) + .map_err(|e| { + format!( + "Unable to create aggregate production delay: {:?}", + e + ) + }) + .and_then(move |()| { + service_1.produce_and_publish_aggregates( + attestation, + validator_duties_2, + ) + }), + ) + } else { + // If `produce_and_publish_attestations` did not download any + // attestations then there is no need to produce any + // `SignedAggregateAndProof`. + Box::new(future::ok(())) + } + }, + ) + .map_err(move |e| { + crit!( + log_1, + "Error during attestation routine"; + "error" => format!("{:?}", e), + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ) + }), + ) + } + + /// Performs the first step of the attesting process: downloading `Attestation` objects, + /// signing them and returning them to the validator. + /// + /// https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md#attesting + /// + /// ## Detail + /// + /// The given `validator_duties` should already be filtered to only contain those that match + /// `slot` and `committee_index`. Critical errors will be logged if this is not the case. + /// + /// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each + /// validator and the list of individually-signed `Attestation` objects is returned to the BN. + fn produce_and_publish_attestations( + &self, + slot: Slot, + committee_index: CommitteeIndex, + validator_duties: Arc>, + ) -> Box>, Error = String> + Send> { + if validator_duties.is_empty() { + return Box::new(future::ok(None)); + } + + let service = self.clone(); + + Box::new( + self.beacon_node + .http + .validator() + .produce_attestation(slot, committee_index) + .map_err(|e| format!("Failed to produce attestation: {:?}", e)) + .and_then::<_, Box + Send>>(move |attestation| { + let log = service.context.log.clone(); + + // For each validator in `validator_duties`, clone the `attestation` and add + // their signature. + // + // If any validator is unable to sign, they are simply skipped. + let signed_attestations = validator_duties + .iter() + .filter_map(|duty| { + let log = service.context.log.clone(); + + // Ensure that all required fields are present in the validator duty. + let (duty_slot, duty_committee_index, validator_committee_position, _) = + if let Some(tuple) = duty.attestation_duties() { + tuple + } else { + crit!( + log, + "Missing validator duties when signing"; + "duties" => format!("{:?}", duty) + ); + return None; + }; + + // Ensure that the attestation matches the duties. + if duty_slot != attestation.data.slot + || duty_committee_index != attestation.data.index + { + crit!( + log, + "Inconsistent validator duties during signing"; + "validator" => format!("{:?}", duty.validator_pubkey()), + "duty_slot" => duty_slot, + "attestation_slot" => attestation.data.slot, + "duty_index" => duty_committee_index, + "attestation_index" => attestation.data.index, + ); + return None; + } + + let mut attestation = attestation.clone(); + + if service + .validator_store + .sign_attestation( + duty.validator_pubkey(), + validator_committee_position, + &mut attestation, + ) + .is_none() + { + crit!( + log, + "Attestation signing refused"; + "validator" => format!("{:?}", duty.validator_pubkey()), + "slot" => attestation.data.slot, + "index" => attestation.data.index, + ); + None + } else { + Some(attestation) + } + }) + .collect::>(); + + // If there are any signed attestations, publish them to the BN. Otherwise, + // just return early. + if let Some(attestation) = signed_attestations.first().cloned() { + let num_attestations = signed_attestations.len(); + let beacon_block_root = attestation.data.beacon_block_root; + + Box::new( + service + .beacon_node + .http + .validator() + .publish_attestations(signed_attestations) + .map_err(|e| format!("Failed to publish attestation: {:?}", e)) + .map(move |publish_status| match publish_status { + PublishStatus::Valid => info!( + log, + "Successfully published attestations"; + "count" => num_attestations, + "head_block" => format!("{:?}", beacon_block_root), + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ), + PublishStatus::Invalid(msg) => crit!( + log, + "Published attestation was invalid"; + "message" => msg, + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ), + PublishStatus::Unknown => { + crit!(log, "Unknown condition when publishing attestation") + } + }) + .map(|()| Some(attestation)), + ) + } else { + debug!( + log, + "No attestations to publish"; + "committee_index" => committee_index, + "slot" => slot.as_u64(), + ); + Box::new(future::ok(None)) + } + }), + ) + } + + /// Performs the second step of the attesting process: downloading an aggregated `Attestation`, + /// converting it into a `SignedAggregateAndProof` and returning it to the BN. + /// + /// https://github.com/ethereum/eth2.0-specs/blob/v0.11.0/specs/phase0/validator.md#broadcast-aggregate + /// + /// ## Detail + /// + /// The given `validator_duties` should already be filtered to only contain those that match + /// `slot` and `committee_index`. Critical errors will be logged if this is not the case. + /// + /// Only one aggregated `Attestation` is downloaded from the BN. It is then cloned and signed + /// by each validator and the list of individually-signed `SignedAggregateAndProof` objects is + /// returned to the BN. + fn produce_and_publish_aggregates( + &self, + attestation: Attestation, + validator_duties: Arc>, + ) -> impl Future { + let service_1 = self.clone(); + let log_1 = self.context.log.clone(); + + self.beacon_node + .http + .validator() + .produce_aggregate_attestation(&attestation.data) + .map_err(|e| format!("Failed to produce an aggregate attestation: {:?}", e)) + .and_then::<_, Box + Send>>( + move |aggregated_attestation| { + // For each validator, clone the `aggregated_attestation` and convert it into + // a `SignedAggregateAndProof` + let signed_aggregate_and_proofs = validator_duties + .iter() + .filter_map(|duty_and_state| { + // Do not produce a signed aggregator for validators that are not + // subscribed aggregators. + // + // Note: this function returns `false` if the validator is required to + // be an aggregator but has not yet subscribed. + if !duty_and_state.is_aggregator() { + return None; + } + + let (duty_slot, duty_committee_index, _, validator_index) = + duty_and_state.attestation_duties().or_else(|| { + crit!(log_1, "Missing duties when signing aggregate"); + None + })?; + + let pubkey = &duty_and_state.duty.validator_pubkey; + let slot = attestation.data.slot; + let committee_index = attestation.data.index; + + if duty_slot != slot || duty_committee_index != committee_index { + crit!(log_1, "Inconsistent validator duties during signing"); + return None; + } + + if let Some(signed_aggregate_and_proof) = service_1 + .validator_store + .produce_signed_aggregate_and_proof( + pubkey, + validator_index, + aggregated_attestation.clone(), + ) + { + Some(signed_aggregate_and_proof) + } else { + crit!(log_1, "Failed to sign attestation"); + None + } + }) + .collect::>(); + + // If there any signed aggregates and proofs were produced, publish them to the + // BN. + if let Some(first) = signed_aggregate_and_proofs.first().cloned() { + let attestation = first.message.aggregate; + + Box::new(service_1 + .beacon_node + .http + .validator() + .publish_aggregate_and_proof(signed_aggregate_and_proofs) + .map(|publish_status| (attestation, publish_status)) + .map_err(|e| format!("Failed to publish aggregate and proofs: {:?}", e)) + .map(move |(attestation, publish_status)| match publish_status { + PublishStatus::Valid => info!( + log_1, + "Successfully published aggregate attestations"; + "signatures" => attestation.aggregation_bits.num_set_bits(), + "head_block" => format!("{}", attestation.data.beacon_block_root), + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + ), + PublishStatus::Invalid(msg) => crit!( + log_1, + "Published attestation was invalid"; + "message" => msg, + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + ), + PublishStatus::Unknown => { + crit!(log_1, "Unknown condition when publishing attestation") + } + })) + } else { + debug!( + log_1, + "No signed aggregates to publish"; + "committee_index" => attestation.data.index, + "slot" => attestation.data.slot.as_u64(), + ); + Box::new(future::ok(())) + } + }, + ) + } } -fn attestation_duties(duty: &ValidatorDuty) -> Option<(Slot, CommitteeIndex, usize)> { - Some(( - duty.attestation_slot?, - duty.attestation_committee_index?, - duty.attestation_committee_position?, - )) +#[cfg(test)] +mod tests { + use super::*; + use parking_lot::RwLock; + use tokio::runtime::Builder as RuntimeBuilder; + + /// This test is to ensure that a `tokio_timer::Delay` with an instant in the past will still + /// trigger. + #[test] + fn delay_triggers_when_in_the_past() { + let in_the_past = Instant::now() - Duration::from_secs(2); + let state_1 = Arc::new(RwLock::new(in_the_past)); + let state_2 = state_1.clone(); + + let future = Delay::new(in_the_past) + .map_err(|_| panic!("Failed to create duration")) + .map(move |()| *state_1.write() = Instant::now()); + + let mut runtime = RuntimeBuilder::new() + .core_threads(1) + .build() + .expect("failed to start runtime"); + + runtime.block_on(future).expect("failed to complete future"); + + assert!( + *state_2.read() > in_the_past, + "state should have been updated" + ); + } } diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index bbbe04592..3a9f73790 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -4,6 +4,7 @@ use exit_future::Signal; use futures::{future, Future, IntoFuture, Stream}; use parking_lot::RwLock; use remote_beacon_node::RemoteBeaconNode; +use rest_types::{ValidatorDuty, ValidatorDutyBytes}; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::collections::HashMap; @@ -12,7 +13,7 @@ use std::ops::Deref; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::timer::Interval; -use types::{ChainSpec, CommitteeIndex, Epoch, EthSpec, PublicKey, Slot}; +use types::{ChainSpec, CommitteeIndex, Epoch, EthSpec, PublicKey, SelectionProof, Slot}; /// Delay this period of time after the slot starts. This allows the node to process the new slot. const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); @@ -20,35 +21,85 @@ const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); /// Remove any duties where the `duties_epoch < current_epoch - PRUNE_DEPTH`. const PRUNE_DEPTH: u64 = 4; -type BaseHashMap = HashMap>; +type BaseHashMap = HashMap>; -/// Stores the duties for some validator for an epoch. -#[derive(PartialEq, Debug, Clone)] -pub struct ValidatorDuty { - /// The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ - pub validator_pubkey: PublicKey, - /// The slot at which the validator must attest. - pub attestation_slot: Option, - /// The index of the committee within `slot` of which the validator is a member. - pub attestation_committee_index: Option, - /// The position of the validator in the committee. - pub attestation_committee_position: Option, - /// The slots in which a validator must propose a block (can be empty). - pub block_proposal_slots: Vec, +#[derive(Debug, Clone)] +pub enum DutyState { + /// This duty has not been subscribed to the beacon node. + NotSubscribed, + /// The duty has been subscribed and the validator is an aggregator for this duty. The + /// selection proof is provided to construct the `AggregateAndProof` struct. + SubscribedAggregator(SelectionProof), } -impl TryInto for remote_beacon_node::ValidatorDuty { +#[derive(Debug, Clone)] +pub struct DutyAndState { + /// The validator duty. + pub duty: ValidatorDuty, + /// The current state of the validator duty. + state: DutyState, +} + +impl DutyAndState { + /// Returns true if the duty is an aggregation duty (the validator must aggregate all + /// attestations. + pub fn is_aggregator(&self) -> bool { + match self.state { + DutyState::NotSubscribed => false, + DutyState::SubscribedAggregator(_) => true, + } + } + + /// Returns the selection proof if the duty is an aggregation duty. + pub fn selection_proof(&self) -> Option { + match &self.state { + DutyState::SubscribedAggregator(proof) => Some(proof.clone()), + _ => None, + } + } + + /// Returns true if the this duty has been subscribed with the beacon node. + pub fn is_subscribed(&self) -> bool { + match self.state { + DutyState::NotSubscribed => false, + DutyState::SubscribedAggregator(_) => true, + } + } + + /// Returns the information required for an attesting validator, if they are scheduled to + /// attest. + pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64)> { + Some(( + self.duty.attestation_slot?, + self.duty.attestation_committee_index?, + self.duty.attestation_committee_position?, + self.duty.validator_index?, + )) + } + + pub fn validator_pubkey(&self) -> &PublicKey { + &self.duty.validator_pubkey + } +} + +impl TryInto for ValidatorDutyBytes { type Error = String; - fn try_into(self) -> Result { - Ok(ValidatorDuty { + fn try_into(self) -> Result { + let duty = ValidatorDuty { validator_pubkey: (&self.validator_pubkey) .try_into() .map_err(|e| format!("Invalid pubkey bytes from server: {:?}", e))?, + validator_index: self.validator_index, attestation_slot: self.attestation_slot, attestation_committee_index: self.attestation_committee_index, attestation_committee_position: self.attestation_committee_position, block_proposal_slots: self.block_proposal_slots, + aggregator_modulo: self.aggregator_modulo, + }; + Ok(DutyAndState { + duty, + state: DutyState::NotSubscribed, }) } } @@ -82,7 +133,7 @@ impl DutiesStore { .filter(|(_validator_pubkey, validator_map)| { validator_map .get(&epoch) - .map(|duties| !duties.block_proposal_slots.is_empty()) + .map(|duties| !duties.duty.block_proposal_slots.is_empty()) .unwrap_or_else(|| false) }) .count() @@ -96,7 +147,7 @@ impl DutiesStore { .filter(|(_validator_pubkey, validator_map)| { validator_map .get(&epoch) - .map(|duties| duties.attestation_slot.is_some()) + .map(|duties| duties.duty.attestation_slot.is_some()) .unwrap_or_else(|| false) }) .count() @@ -112,8 +163,8 @@ impl DutiesStore { let epoch = slot.epoch(slots_per_epoch); validator_map.get(&epoch).and_then(|duties| { - if duties.block_proposal_slots.contains(&slot) { - Some(duties.validator_pubkey.clone()) + if duties.duty.block_proposal_slots.contains(&slot) { + Some(duties.duty.validator_pubkey.clone()) } else { None } @@ -122,7 +173,49 @@ impl DutiesStore { .collect() } - fn attesters(&self, slot: Slot, slots_per_epoch: u64) -> Vec { + /// Gets a list of validator duties for an epoch that have not yet been subscribed + /// to the beacon node. + // Note: Potentially we should modify the data structure to store the unsubscribed epoch duties for validator clients with a large number of validators. This currently adds an O(N) search each slot. + fn unsubscribed_epoch_duties(&self, epoch: &Epoch) -> Vec { + self.store + .read() + .iter() + .filter_map(|(_validator_pubkey, validator_map)| { + validator_map.get(epoch).and_then(|duty_and_state| { + if !duty_and_state.is_subscribed() { + Some(duty_and_state) + } else { + None + } + }) + }) + .cloned() + .collect() + } + + /// Marks a duty as being subscribed to the beacon node. This is called by the attestation + /// service once it has been sent. + fn set_duty_state( + &self, + validator: &PublicKey, + slot: Slot, + state: DutyState, + slots_per_epoch: u64, + ) { + let epoch = slot.epoch(slots_per_epoch); + + let mut store = self.store.write(); + if let Some(map) = store.get_mut(validator) { + if let Some(duty) = map.get_mut(&epoch) { + if duty.duty.attestation_slot == Some(slot) { + // set the duty state + duty.state = state; + } + } + } + } + + fn attesters(&self, slot: Slot, slots_per_epoch: u64) -> Vec { self.store .read() .iter() @@ -132,7 +225,7 @@ impl DutiesStore { let epoch = slot.epoch(slots_per_epoch); validator_map.get(&epoch).and_then(|duties| { - if duties.attestation_slot == Some(slot) { + if duties.duty.attestation_slot == Some(slot) { Some(duties) } else { None @@ -143,16 +236,16 @@ impl DutiesStore { .collect() } - fn insert(&self, epoch: Epoch, duties: ValidatorDuty, slots_per_epoch: u64) -> InsertOutcome { + fn insert(&self, epoch: Epoch, duties: DutyAndState, slots_per_epoch: u64) -> InsertOutcome { let mut store = self.store.write(); - if !duties_match_epoch(&duties, epoch, slots_per_epoch) { + if !duties_match_epoch(&duties.duty, epoch, slots_per_epoch) { return InsertOutcome::Invalid; } - if let Some(validator_map) = store.get_mut(&duties.validator_pubkey) { + if let Some(validator_map) = store.get_mut(&duties.duty.validator_pubkey) { if let Some(known_duties) = validator_map.get_mut(&epoch) { - if *known_duties == duties { + if known_duties.duty == duties.duty { InsertOutcome::Identical } else { *known_duties = duties; @@ -164,7 +257,7 @@ impl DutiesStore { InsertOutcome::NewEpoch } } else { - let validator_pubkey = duties.validator_pubkey.clone(); + let validator_pubkey = duties.duty.validator_pubkey.clone(); let mut validator_map = HashMap::new(); validator_map.insert(epoch, duties); @@ -315,10 +408,29 @@ impl DutiesService { } /// Returns all `ValidatorDuty` for the given `slot`. - pub fn attesters(&self, slot: Slot) -> Vec { + pub fn attesters(&self, slot: Slot) -> Vec { self.store.attesters(slot, E::slots_per_epoch()) } + /// Returns all `ValidatorDuty` that have not been registered with the beacon node. + pub fn unsubscribed_epoch_duties(&self, epoch: &Epoch) -> Vec { + self.store.unsubscribed_epoch_duties(epoch) + } + + /// Marks the duty as being subscribed to the beacon node. + /// + /// If the duty is to be marked as an aggregator duty, a selection proof is also provided. + pub fn subscribe_duty(&self, duty: &ValidatorDuty, proof: SelectionProof) { + if let Some(slot) = duty.attestation_slot { + self.store.set_duty_state( + &duty.validator_pubkey, + slot, + DutyState::SubscribedAggregator(proof), + E::slots_per_epoch(), + ) + } + } + /// Start the service that periodically polls the beacon node for validator duties. pub fn start_update_service(&self, spec: &ChainSpec) -> Result { let log = self.context.log.clone(); @@ -477,7 +589,7 @@ impl DutiesService { let mut invalid = 0; all_duties.into_iter().try_for_each::<_, Result<_, String>>(|remote_duties| { - let duties: ValidatorDuty = remote_duties.try_into()?; + let duties: DutyAndState = remote_duties.try_into()?; match service_2 .store @@ -487,9 +599,9 @@ impl DutiesService { debug!( log, "First duty assignment for validator"; - "proposal_slots" => format!("{:?}", &duties.block_proposal_slots), - "attestation_slot" => format!("{:?}", &duties.attestation_slot), - "validator" => format!("{:?}", &duties.validator_pubkey) + "proposal_slots" => format!("{:?}", &duties.duty.block_proposal_slots), + "attestation_slot" => format!("{:?}", &duties.duty.attestation_slot), + "validator" => format!("{:?}", &duties.duty.validator_pubkey) ); new_validator += 1 } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6dfa29e40..ec7e2a743 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -105,7 +105,7 @@ impl ProductionValidatorClient { .duration_since(UNIX_EPOCH) .into_future() .map_err(|e| format!("Unable to read system time: {:?}", e)) - .and_then(move |now| { + .and_then::<_, Box + Send>>(move |now| { let log = log_3.clone(); let genesis = Duration::from_secs(genesis_time); @@ -114,9 +114,7 @@ impl ProductionValidatorClient { // // If the validator client starts before genesis, it will get errors from // the slot clock. - let box_future: Box + Send> = if now - < genesis - { + if now < genesis { info!( log, "Starting node prior to genesis"; @@ -138,9 +136,7 @@ impl ProductionValidatorClient { ); Box::new(future::ok((beacon_node, remote_eth2_config, genesis_time))) - }; - - box_future + } }) }) .and_then(|(beacon_node, eth2_config, genesis_time)| { diff --git a/validator_client/src/validator_directory.rs b/validator_client/src/validator_directory.rs index f904e7f67..197e1cb44 100644 --- a/validator_client/src/validator_directory.rs +++ b/validator_client/src/validator_directory.rs @@ -1,5 +1,6 @@ use bls::get_withdrawal_credentials; -use deposit_contract::encode_eth1_tx_data; +use deposit_contract::{encode_eth1_tx_data, DEPOSIT_GAS}; +use futures::{Future, IntoFuture}; use hex; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -12,6 +13,10 @@ use types::{ test_utils::generate_deterministic_keypair, ChainSpec, DepositData, Hash256, Keypair, PublicKey, SecretKey, Signature, }; +use web3::{ + types::{Address, TransactionRequest, U256}, + Transport, Web3, +}; const VOTING_KEY_PREFIX: &str = "voting"; const WITHDRAWAL_KEY_PREFIX: &str = "withdrawal"; @@ -241,7 +246,7 @@ impl ValidatorDirectoryBuilder { Ok(()) } - pub fn write_eth1_data_file(mut self) -> Result { + fn get_deposit_data(&self) -> Result<(Vec, u64), String> { let voting_keypair = self .voting_keypair .as_ref() @@ -254,30 +259,35 @@ impl ValidatorDirectoryBuilder { .amount .ok_or_else(|| "write_eth1_data_file requires an amount")?; let spec = self.spec.as_ref().ok_or_else(|| "build requires a spec")?; + + let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( + &withdrawal_keypair.pk, + spec.bls_withdrawal_prefix_byte, + )); + + let mut deposit_data = DepositData { + pubkey: voting_keypair.pk.clone().into(), + withdrawal_credentials, + amount, + signature: Signature::empty_signature().into(), + }; + + deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, &spec); + + let deposit_data = encode_eth1_tx_data(&deposit_data) + .map_err(|e| format!("Unable to encode eth1 deposit tx data: {:?}", e))?; + + Ok((deposit_data, amount)) + } + + pub fn write_eth1_data_file(mut self) -> Result { let path = self .directory .as_ref() .map(|directory| directory.join(ETH1_DEPOSIT_DATA_FILE)) .ok_or_else(|| "write_eth1_data_filer requires a directory")?; - let deposit_data = { - let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( - &withdrawal_keypair.pk, - spec.bls_withdrawal_prefix_byte, - )); - - let mut deposit_data = DepositData { - pubkey: voting_keypair.pk.clone().into(), - withdrawal_credentials, - amount, - signature: Signature::empty_signature().into(), - }; - - deposit_data.signature = deposit_data.create_signature(&voting_keypair.sk, &spec); - - encode_eth1_tx_data(&deposit_data) - .map_err(|e| format!("Unable to encode eth1 deposit tx data: {:?}", e))? - }; + let (deposit_data, _) = self.get_deposit_data()?; if path.exists() { return Err(format!("Eth1 data file already exists at: {:?}", path)); @@ -293,6 +303,31 @@ impl ValidatorDirectoryBuilder { Ok(self) } + pub fn submit_eth1_deposit( + self, + web3: Web3, + from: Address, + deposit_contract: Address, + ) -> impl Future { + self.get_deposit_data() + .into_future() + .and_then(move |(deposit_data, deposit_amount)| { + web3.eth() + .send_transaction(TransactionRequest { + from, + to: Some(deposit_contract), + gas: Some(DEPOSIT_GAS.into()), + gas_price: None, + value: Some(from_gwei(deposit_amount)), + data: Some(deposit_data.into()), + nonce: None, + condition: None, + }) + .map_err(|e| format!("Failed to send transaction: {:?}", e)) + }) + .map(|tx| (self, tx)) + } + pub fn build(self) -> Result { Ok(ValidatorDirectory { directory: self.directory.ok_or_else(|| "build requires a directory")?, @@ -303,6 +338,11 @@ impl ValidatorDirectoryBuilder { } } +/// Converts gwei to wei. +fn from_gwei(gwei: u64) -> U256 { + U256::from(gwei) * U256::exp10(9) +} + #[cfg(test)] mod tests { use super::*; diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index d1d2a1841..5846e7363 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use tempdir::TempDir; use types::{ Attestation, BeaconBlock, ChainSpec, Domain, Epoch, EthSpec, Fork, Hash256, PublicKey, - Signature, SignedBeaconBlock, SignedRoot, + SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, }; #[derive(Clone)] @@ -214,4 +214,46 @@ impl ValidatorStore { Some(()) }) } + + /// Signs an `AggregateAndProof` for a given validator. + /// + /// The resulting `SignedAggregateAndProof` is sent on the aggregation channel and cannot be + /// modified by actors other than the signing validator. + pub fn produce_signed_aggregate_and_proof( + &self, + validator_pubkey: &PublicKey, + validator_index: u64, + aggregate: Attestation, + ) -> Option> { + let validators = self.validators.read(); + let voting_keypair = validators.get(validator_pubkey)?.voting_keypair.as_ref()?; + + Some(SignedAggregateAndProof::from_aggregate( + validator_index, + aggregate, + &voting_keypair.sk, + &self.fork()?, + self.genesis_validators_root, + &self.spec, + )) + } + + /// Produces a `SelectionProof` for the `slot`, signed by with corresponding secret key to + /// `validator_pubkey`. + pub fn produce_selection_proof( + &self, + validator_pubkey: &PublicKey, + slot: Slot, + ) -> Option { + let validators = self.validators.read(); + let voting_keypair = validators.get(validator_pubkey)?.voting_keypair.as_ref()?; + + Some(SelectionProof::new::( + slot, + &voting_keypair.sk, + &self.fork()?, + self.genesis_validators_root, + &self.spec, + )) + } }