diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 3f0bd6f9b..0cda0fd76 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,7 +5,6 @@ on: branches: - unstable - stable - - capella - eip4844 tags: - v* @@ -36,11 +35,6 @@ jobs: run: | echo "VERSION=latest" >> $GITHUB_ENV echo "VERSION_SUFFIX=-unstable" >> $GITHUB_ENV - - name: Extract version (if capella) - if: github.event.ref == 'refs/heads/capella' - run: | - echo "VERSION=capella" >> $GITHUB_ENV - echo "VERSION_SUFFIX=" >> $GITHUB_ENV - name: Extract version (if eip4844) if: github.event.ref == 'refs/heads/eip4844' run: | diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml index 4d4e92ae1..8428c0a3b 100644 --- a/.github/workflows/linkcheck.yml +++ b/.github/workflows/linkcheck.yml @@ -7,6 +7,7 @@ on: pull_request: paths: - 'book/**' + merge_group: jobs: linkcheck: diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 8b6728c79..9223c40e1 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -6,6 +6,7 @@ on: branches: - unstable pull_request: + merge_group: jobs: run-local-testnet: @@ -24,25 +25,23 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: npm install ganache@latest --global - - name: Install geth + - name: Install geth (ubuntu) + if: matrix.os == 'ubuntu-22.04' run: | sudo add-apt-repository -y ppa:ethereum/ethereum sudo apt-get update sudo apt-get install ethereum - if: matrix.os == 'ubuntu-22.04' + - name: Install geth (mac) + if: matrix.os == 'macos-12' run: | brew tap ethereum/ethereum brew install ethereum - if: matrix.os == 'macos-12' - name: Install GNU sed & GNU grep + if: matrix.os == 'macos-12' run: | brew install gnu-sed grep echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH - if: matrix.os == 'macos-12' - # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - uses: actions/cache@v3 id: cache-cargo @@ -59,7 +58,7 @@ jobs: run: make && make install-lcli - name: Start local testnet - run: ./start_local_testnet.sh && sleep 60 + run: ./start_local_testnet.sh genesis.json && sleep 60 working-directory: scripts/local_testnet - name: Print logs @@ -75,7 +74,7 @@ jobs: working-directory: scripts/local_testnet - name: Start local testnet with blinded block production - run: ./start_local_testnet.sh -p && sleep 60 + run: ./start_local_testnet.sh -p genesis.json && sleep 60 working-directory: scripts/local_testnet - name: Print logs for blinded block testnet @@ -84,4 +83,4 @@ jobs: - name: Stop local testnet with blinded block production run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet + working-directory: scripts/local_testnet \ No newline at end of file diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index aab5dafe4..1683fe7ef 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -8,6 +8,7 @@ on: - trying - 'pr/*' pull_request: + merge_group: env: # Deny warnings in CI # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) @@ -21,7 +22,7 @@ jobs: target-branch-check: name: target-branch-check runs-on: ubuntu-latest - if: github.event_name == 'pull_request' + if: github.event_name == 'pull_request' || github.event_name == 'merge_group' steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" @@ -58,8 +59,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run tests in release run: make test-release release-tests-windows: @@ -78,8 +79,8 @@ jobs: run: | choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - - name: Install ganache - run: npm install -g ganache --loglevel verbose + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Install make run: choco install -y make - uses: KyleMayes/install-llvm-action@v1 @@ -140,8 +141,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run tests in debug run: make test-debug state-transition-vectors-ubuntu: @@ -196,8 +197,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run the beacon chain sim that starts from an eth1 contract run: cargo run --release --bin simulator eth1-sim merge-transition-ubuntu: @@ -212,8 +213,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run the beacon chain sim and go through the merge transition run: cargo run --release --bin simulator eth1-sim --post-merge no-eth1-simulator-ubuntu: @@ -228,8 +229,6 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache - name: Run the beacon chain sim without an eth1 connection run: cargo run --release --bin simulator no-eth1-sim syncing-simulator-ubuntu: @@ -244,8 +243,8 @@ jobs: uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Install ganache - run: sudo npm install -g ganache + - name: Install anvil + run: cargo install --git https://github.com/foundry-rs/foundry --locked anvil - name: Run the syncing simulator run: cargo run --release --bin simulator syncing-sim doppelganger-protection-test: @@ -269,14 +268,14 @@ jobs: run: | make make install-lcli - - name: Run the doppelganger protection success test script - run: | - cd scripts/tests - ./doppelganger_protection.sh success genesis.json - name: Run the doppelganger protection failure test script run: | cd scripts/tests ./doppelganger_protection.sh failure genesis.json + - name: Run the doppelganger protection success test script + run: | + cd scripts/tests + ./doppelganger_protection.sh success genesis.json execution-engine-integration-ubuntu: name: execution-engine-integration-ubuntu runs-on: ubuntu-latest diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ef23e1ed5..a408fcdd5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,8 +45,8 @@ questions. 2. **Work in a feature branch** of your personal fork (github.com/YOUR_NAME/lighthouse) of the main repository (github.com/sigp/lighthouse). -3. Once you feel you have addressed the issue, **create a pull-request** to merge - your changes into the main repository. +3. Once you feel you have addressed the issue, **create a pull-request** with + `unstable` as the base branch to merge your changes into the main repository. 4. Wait for the repository maintainers to **review your changes** to ensure the issue is addressed satisfactorily. Optionally, mention your PR on [discord](https://discord.gg/cyAszAh). diff --git a/Cargo.lock b/Cargo.lock index 9a9bebab8..b8486344a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,16 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "account_manager" version = "0.3.5" @@ -149,9 +159,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e1366e0c69c9f927b1fa5ce2c7bf9eafc8f9268c0b9800729e8b267612447c" +checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" dependencies = [ "aead 0.5.2", "aes 0.8.2", @@ -187,16 +197,27 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.20" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] @@ -226,9 +247,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.70" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" +checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8" [[package]] name = "arbitrary" @@ -269,7 +290,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -285,7 +306,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -325,9 +346,9 @@ dependencies = [ [[package]] name = "asn1_der" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" +checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" [[package]] name = "async-io" @@ -377,7 +398,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -388,7 +409,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -417,19 +438,20 @@ dependencies = [ [[package]] name = "atomic-waker" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "debc29dde2e69f9e47506b525f639ed42300fc014a3e007832592448fa8e4599" +checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "attohttpc" -version = "0.10.1" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf13118df3e3dce4b5ac930641343b91b656e4e72c8f8325838b01a4b1c9d45" +checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" dependencies = [ "http", "log", "url", + "wildmatch", ] [[package]] @@ -445,9 +467,9 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8c1df849285fbacd587de7818cc7d13be6cd2cbcd47a04fb1801b0e2706e33" +checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", "proc-macro2", @@ -527,7 +549,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.6.2", "object", "rustc-demangle", ] @@ -552,9 +574,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" +checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" [[package]] name = "base64ct" @@ -590,11 +612,10 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_hashing", "eth2_network_config", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "exit-future", "fork_choice", @@ -628,6 +649,7 @@ dependencies = [ "sloggers", "slot_clock", "smallvec", + "ssz_types", "state_processing", "store", "strum", @@ -643,7 +665,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "4.1.0" +version = "4.2.0" dependencies = [ "beacon_chain", "clap", @@ -721,7 +743,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.13", + "syn 2.0.16", "which", ] @@ -761,7 +783,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -805,10 +827,10 @@ version = "0.2.0" dependencies = [ "arbitrary", "blst", - "eth2_hashing", - "eth2_serde_utils", - "eth2_ssz", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", "hex", "milagro_bls", "rand 0.7.3", @@ -844,13 +866,13 @@ dependencies = [ [[package]] name = "boot_node" -version = "4.1.0" +version = "4.2.0" dependencies = [ "beacon_node", "clap", "clap_utils", "eth2_network_config", - "eth2_ssz", + "ethereum_ssz", "hex", "lighthouse_network", "log", @@ -898,9 +920,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.12.0" +version = "3.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" +checksum = "3c6ed94e98ecff0c12dd1b04c15ec0d7d9458ca8fe806cea6f12954efe74c63b" [[package]] name = "byte-slice-cast" @@ -961,17 +983,49 @@ dependencies = [ name = "cached_tree_hash" version = "0.1.0" dependencies = [ - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "quickcheck", "quickcheck_macros", "smallvec", + "ssz_types", "tree_hash", ] +[[package]] +name = "camino" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.17", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cast" version = "0.3.0" @@ -1112,8 +1166,8 @@ dependencies = [ "clap", "dirs", "eth2_network_config", - "eth2_ssz", "ethereum-types 0.14.1", + "ethereum_ssz", "hex", "serde", "serde_json", @@ -1156,7 +1210,7 @@ dependencies = [ "state_processing", "store", "task_executor", - "time 0.3.20", + "time 0.3.21", "timer", "tokio", "types", @@ -1171,16 +1225,6 @@ dependencies = [ "cc", ] -[[package]] -name = "codespan-reporting" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" -dependencies = [ - "termcolor", - "unicode-width", -] - [[package]] name = "compare_fields" version = "0.2.0" @@ -1198,23 +1242,13 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c278839b831783b70278b14df4d45e1beb1aad306c07bb796637de9a0e323e8e" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if", - "wasm-bindgen", -] - [[package]] name = "const-oid" version = "0.9.2" @@ -1254,9 +1288,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" +checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" dependencies = [ "libc", ] @@ -1323,9 +1357,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1454,12 +1488,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.2.5" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbcf33c2a618cbe41ee43ae6e9f2e48368cd9f9db2896f10167d8d762679f639" +checksum = "04d778600249295e82b6ab12e291ed9029407efee0cfb7baf67157edc65964df" dependencies = [ "nix 0.26.2", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -1477,9 +1511,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0-rc.2" +version = "4.0.0-rc.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" +checksum = "8d4ba9852b42210c7538b75484f9daa0655e9a3ac04f693747bb0f02cf3cfe16" dependencies = [ "cfg-if", "fiat-crypto", @@ -1489,50 +1523,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "cxx" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" -dependencies = [ - "cc", - "cxxbridge-flags", - "cxxbridge-macro", - "link-cplusplus", -] - -[[package]] -name = "cxx-build" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" -dependencies = [ - "cc", - "codespan-reporting", - "once_cell", - "proc-macro2", - "quote", - "scratch", - "syn 2.0.13", -] - -[[package]] -name = "cxxbridge-flags" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" - -[[package]] -name = "cxxbridge-macro" -version = "1.0.94" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.13", -] - [[package]] name = "darling" version = "0.13.4" @@ -1625,15 +1615,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d8666cb01533c39dde32bcbab8e227b4ed6679b2c925eba05feabea39508fb" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "data-encoding-macro" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca" +checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1641,9 +1631,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db" +checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1680,15 +1670,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" dependencies = [ "futures", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] name = "deposit_contract" version = "0.2.0" dependencies = [ - "eth2_ssz", "ethabi 16.0.0", + "ethereum_ssz", "hex", "reqwest", "serde_json", @@ -1803,9 +1793,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4391a22b19c916e50bec4d6140f29bdda3e3bb187223fe6e3ea0b6e4d1021c04" +checksum = "72eb77396836a4505da85bae0712fa324b74acfe1876d7c2f7e694ef3d0ee373" dependencies = [ "bitflags", "byteorder", @@ -1849,9 +1839,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", "crypto-common", @@ -1944,13 +1934,13 @@ dependencies = [ [[package]] name = "displaydoc" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf95dc3f046b9da4f2d51833c0d3547d8564ef6910f5c1ed130306a75b92886" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] @@ -1959,6 +1949,12 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65d09067bfacaa79114679b279d7f5885b53295b1e2cfb4e79c8e4bd3d633169" +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + [[package]] name = "ecdsa" version = "0.14.8" @@ -2005,10 +2001,10 @@ dependencies = [ "compare_fields_derive", "derivative", "eth2_network_config", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", "ethereum-types 0.14.1", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "fork_choice", "fs2", @@ -2044,7 +2040,7 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.6", + "digest 0.10.7", "ff", "generic-array", "group", @@ -2081,7 +2077,7 @@ dependencies = [ "rand 0.8.5", "rlp", "serde", - "sha3 0.10.6", + "sha3 0.10.8", "zeroize", ] @@ -2101,7 +2097,7 @@ dependencies = [ "rand 0.8.5", "rlp", "serde", - "sha3 0.10.6", + "sha3 0.10.8", "zeroize", ] @@ -2164,13 +2160,13 @@ dependencies = [ [[package]] name = "errno" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -2200,8 +2196,8 @@ dependencies = [ "environment", "eth1_test_rig", "eth2", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "execution_layer", "futures", "hex", @@ -2222,7 +2218,6 @@ dependencies = [ "tokio", "tree_hash", "types", - "web3", ] [[package]] @@ -2230,11 +2225,14 @@ name = "eth1_test_rig" version = "0.2.0" dependencies = [ "deposit_contract", + "ethers-contract", + "ethers-core", + "ethers-providers", + "hex", "serde_json", "tokio", "types", "unused_port", - "web3", ] [[package]] @@ -2244,9 +2242,9 @@ dependencies = [ "account_utils", "bytes", "eth2_keystore", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "futures", "futures-util", "libsecp256k1", @@ -2273,25 +2271,13 @@ dependencies = [ "types", ] -[[package]] -name = "eth2_hashing" -version = "0.3.0" -dependencies = [ - "cpufeatures", - "lazy_static", - "ring", - "rustc-hex", - "sha2 0.10.6", - "wasm-bindgen-test", -] - [[package]] name = "eth2_interop_keypairs" version = "0.2.0" dependencies = [ "base64 0.13.1", "bls", - "eth2_hashing", + "ethereum_hashing", "hex", "lazy_static", "num-bigint", @@ -2340,7 +2326,7 @@ version = "0.2.0" dependencies = [ "discv5", "eth2_config", - "eth2_ssz", + "ethereum_ssz", "kzg", "serde_json", "serde_yaml", @@ -2349,55 +2335,6 @@ dependencies = [ "zip", ] -[[package]] -name = "eth2_serde_utils" -version = "0.1.1" -dependencies = [ - "ethereum-types 0.14.1", - "hex", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "eth2_ssz" -version = "0.4.1" -dependencies = [ - "eth2_ssz_derive", - "ethereum-types 0.14.1", - "itertools", - "smallvec", -] - -[[package]] -name = "eth2_ssz_derive" -version = "0.3.1" -dependencies = [ - "darling 0.13.4", - "eth2_ssz", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "eth2_ssz_types" -version = "0.2.2" -dependencies = [ - "arbitrary", - "derivative", - "eth2_serde_utils", - "eth2_ssz", - "serde", - "serde_derive", - "serde_json", - "smallvec", - "tree_hash", - "tree_hash_derive", - "typenum", -] - [[package]] name = "eth2_wallet" version = "0.1.0" @@ -2450,7 +2387,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.6", + "sha3 0.10.8", "thiserror", "uint", ] @@ -2536,6 +2473,112 @@ dependencies = [ "uint", ] +[[package]] +name = "ethereum_hashing" +version = "1.0.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233dc6f434ce680dbabf4451ee3380cec46cb3c45d66660445a435619710dd35" +dependencies = [ + "cpufeatures", + "lazy_static", + "ring", + "sha2 0.10.6", +] + +[[package]] +name = "ethereum_serde_utils" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f8cb04ea380a33e9c269fa5f8df6f2d63dee19728235f3e639e7674e038686a" +dependencies = [ + "ethereum-types 0.14.1", + "hex", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "ethereum_ssz" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32749e96305376af40d7a7ee8ea4c4c64c68d09ff94a81ab78c8d9bc7153c221" +dependencies = [ + "ethereum-types 0.14.1", + "itertools", + "smallvec", +] + +[[package]] +name = "ethereum_ssz_derive" +version = "0.5.2" +source = "git+https://github.com/jimmygchen/ethereum_ssz?rev=231aa8c840262da694e024235dbc638a2980c545#231aa8c840262da694e024235dbc638a2980c545" +dependencies = [ + "darling 0.13.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ethers-contract" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" +dependencies = [ + "ethers-contract-abigen", + "ethers-contract-derive", + "ethers-core", + "ethers-providers", + "futures-util", + "hex", + "once_cell", + "pin-project", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "ethers-contract-abigen" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d4e5ad46aede34901f71afdb7bb555710ed9613d88d644245c657dc371aa228" +dependencies = [ + "Inflector", + "cfg-if", + "dunce", + "ethers-core", + "eyre", + "getrandom 0.2.9", + "hex", + "proc-macro2", + "quote", + "regex", + "reqwest", + "serde", + "serde_json", + "syn 1.0.109", + "toml", + "url", + "walkdir", +] + +[[package]] +name = "ethers-contract-derive" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f192e8e4cf2b038318aae01e94e7644e0659a76219e94bcd3203df744341d61f" +dependencies = [ + "ethers-contract-abigen", + "ethers-core", + "hex", + "proc-macro2", + "quote", + "serde_json", + "syn 1.0.109", +] + [[package]] name = "ethers-core" version = "1.0.2" @@ -2544,12 +2587,14 @@ checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", + "cargo_metadata", "chrono", "elliptic-curve", "ethabi 18.0.0", "generic-array", "hex", "k256", + "once_cell", "open-fastrlp", "rand 0.8.5", "rlp", @@ -2557,6 +2602,7 @@ dependencies = [ "serde", "serde_json", "strum", + "syn 1.0.109", "thiserror", "tiny-keccak", "unicode-xid", @@ -2576,7 +2622,7 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.8", + "getrandom 0.2.9", "hashers", "hex", "http", @@ -2638,10 +2684,9 @@ dependencies = [ "bytes", "environment", "eth2", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_types", "ethereum-consensus", + "ethereum_serde_utils", + "ethereum_ssz", "ethers-core", "exit-future", "fork_choice", @@ -2665,6 +2710,7 @@ dependencies = [ "slog", "slot_clock", "ssz-rs", + "ssz_types", "state_processing", "strum", "superstruct 0.6.0", @@ -2689,6 +2735,16 @@ dependencies = [ "futures", ] +[[package]] +name = "eyre" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -2783,13 +2839,13 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", "libz-sys", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -2818,8 +2874,8 @@ name = "fork_choice" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "proto_array", "slog", "state_processing", @@ -2910,9 +2966,9 @@ checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -2931,7 +2987,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -3007,8 +3063,8 @@ dependencies = [ "environment", "eth1", "eth1_test_rig", - "eth2_hashing", - "eth2_ssz", + "ethereum_hashing", + "ethereum_ssz", "futures", "int_to_bytes", "merkle_proof", @@ -3036,9 +3092,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "js-sys", @@ -3114,9 +3170,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" +checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" dependencies = [ "bytes", "fnv", @@ -3127,7 +3183,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "tracing", ] @@ -3158,7 +3214,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -3167,7 +3223,16 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", +] + +[[package]] +name = "hashbrown" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash 0.8.3", ] [[package]] @@ -3190,11 +3255,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.13.2", ] [[package]] @@ -3299,7 +3364,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -3362,8 +3427,8 @@ dependencies = [ "environment", "eth1", "eth2", - "eth2_serde_utils", - "eth2_ssz", + "ethereum_serde_utils", + "ethereum_ssz", "execution_layer", "futures", "genesis", @@ -3439,9 +3504,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.25" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc5e554ff619822309ffd57d8734d77cd5ce6238bc956f037ea06c58238c9899" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -3463,15 +3528,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ "http", "hyper", - "rustls 0.20.8", + "rustls 0.21.1", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls 0.24.0", ] [[package]] @@ -3503,12 +3568,11 @@ dependencies = [ [[package]] name = "iana-time-zone-haiku" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ - "cxx", - "cxx-build", + "cc", ] [[package]] @@ -3590,13 +3654,13 @@ dependencies = [ [[package]] name = "igd" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd32c880165b2f776af0b38d206d1cabaebcf46c166ac6ae004a5d45f7d48ef" +checksum = "556b5a75cd4adb7c4ea21c64af1c48cefb2ce7d43dc4352c720a1fe47c21f355" dependencies = [ "attohttpc", "log", - "rand 0.7.3", + "rand 0.8.5", "url", "xmltree", ] @@ -3616,7 +3680,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.4.0", + "parity-scale-codec 3.5.0", ] [[package]] @@ -3657,6 +3721,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -3802,35 +3872,20 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" +checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" dependencies = [ "wasm-bindgen", ] -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures", - "futures-executor", - "futures-util", - "log", - "serde", - "serde_derive", - "serde_json", -] - [[package]] name = "jsonwebtoken" version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "pem", "ring", "serde", @@ -3848,14 +3903,14 @@ dependencies = [ "ecdsa", "elliptic-curve", "sha2 0.10.6", - "sha3 0.10.6", + "sha3 0.10.8", ] [[package]] name = "keccak" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3afef3b6eff9ce9d8ff9b3601125eec7f0c8cbac7abd14f355d053fa56c98768" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" dependencies = [ "cpufeatures", ] @@ -3877,10 +3932,10 @@ dependencies = [ "arbitrary", "c-kzg", "derivative", - "eth2_hashing", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "hex", "serde", "serde_derive", @@ -3904,7 +3959,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "4.1.0" +version = "4.2.0" dependencies = [ "account_utils", "beacon_chain", @@ -3917,10 +3972,10 @@ dependencies = [ "environment", "eth1_test_rig", "eth2", - "eth2_hashing", "eth2_network_config", - "eth2_ssz", "eth2_wallet", + "ethereum_hashing", + "ethereum_ssz", "genesis", "int_to_bytes", "kzg", @@ -3928,6 +3983,7 @@ dependencies = [ "lighthouse_version", "log", "malloc_utils", + "rayon", "sensitive_url", "serde", "serde_json", @@ -3938,7 +3994,6 @@ dependencies = [ "tree_hash", "types", "validator_dir", - "web3", ] [[package]] @@ -3966,15 +4021,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.141" +version = "0.2.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" +checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" [[package]] name = "libflate" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97822bf791bd4d5b403713886a5fbe8bf49520fe78e323b0dc480ca1a03e50b0" +checksum = "5ff4ae71b685bbad2f2f391fe74f6b7659a34871c08b210fdc039e43bee07d18" dependencies = [ "adler32", "crc32fast", @@ -4008,9 +4063,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libm" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" +checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" [[package]] name = "libmdbx" @@ -4036,7 +4091,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.8", + "getrandom 0.2.9", "instant", "libp2p-core 0.38.0", "libp2p-dns", @@ -4132,9 +4187,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.39.1" +version = "0.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7f8b7d65c070a5a1b5f8f0510648189da08f787b8963f8e21219e0710733af" +checksum = "3c1df63c0b582aa434fb09b2d86897fa2b419ffeccf934b36f87fcedc8e835c2" dependencies = [ "either", "fnv", @@ -4225,18 +4280,18 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a8ea433ae0cea7e3315354305237b9897afe45278b2118a7a57ca744e70fd27" +checksum = "9e2d584751cecb2aabaa56106be6be91338a60a0f4e420cf2af639204f596fc1" dependencies = [ "bs58", "ed25519-dalek", "log", "multiaddr 0.17.1", "multihash 0.17.0", - "prost", "quick-protobuf", "rand 0.8.5", + "sha2 0.10.6", "thiserror", "zeroize", ] @@ -4410,7 +4465,7 @@ checksum = "ff08d13d0dc66e5e9ba6279c1de417b84fa0d0adc3b03e5732928c180ec02781" dependencies = [ "futures", "futures-rustls", - "libp2p-core 0.39.1", + "libp2p-core 0.39.2", "libp2p-identity", "rcgen 0.10.0", "ring", @@ -4448,7 +4503,7 @@ dependencies = [ "thiserror", "tinytemplate", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", "webrtc", ] @@ -4546,9 +4601,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" dependencies = [ "cc", "pkg-config", @@ -4557,7 +4612,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "4.1.0" +version = "4.2.0" dependencies = [ "account_manager", "account_utils", @@ -4571,8 +4626,8 @@ dependencies = [ "env_logger 0.9.3", "environment", "eth1", - "eth2_hashing", "eth2_network_config", + "ethereum_hashing", "futures", "lazy_static", "lighthouse_metrics", @@ -4612,9 +4667,8 @@ dependencies = [ "dirs", "discv5", "error-chain", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_ssz", + "ethereum_ssz_derive", "exit-future", "fnv", "futures", @@ -4639,6 +4693,7 @@ dependencies = [ "slog-term", "smallvec", "snap", + "ssz_types", "strum", "superstruct 0.5.0", "task_executor", @@ -4664,15 +4719,6 @@ dependencies = [ "target_info", ] -[[package]] -name = "link-cplusplus" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" -dependencies = [ - "cc", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -4681,9 +4727,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.3.1" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "lmdb-rkv" @@ -4737,11 +4783,18 @@ dependencies = [ name = "logging" version = "0.2.0" dependencies = [ + "chrono", "lazy_static", "lighthouse_metrics", + "parking_lot 0.12.1", + "serde", + "serde_json", "slog", + "slog-async", "slog-term", "sloggers", + "take_mut", + "tokio", ] [[package]] @@ -4838,7 +4891,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -4880,8 +4933,8 @@ dependencies = [ name = "merkle_proof" version = "0.2.0" dependencies = [ - "eth2_hashing", "ethereum-types 0.14.1", + "ethereum_hashing", "lazy_static", "quickcheck", "quickcheck_macros", @@ -4992,6 +5045,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.6" @@ -5103,7 +5165,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" dependencies = [ "core2", - "digest 0.10.6", + "digest 0.10.7", "multihash-derive", "sha2 0.10.6", "unsigned-varint 0.7.1", @@ -5116,9 +5178,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ "core2", - "digest 0.10.6", "multihash-derive", - "sha2 0.10.6", "unsigned-varint 0.7.1", ] @@ -5281,9 +5341,8 @@ dependencies = [ "derivative", "environment", "error-chain", - "eth2_ssz", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_ssz", "execution_layer", "exit-future", "fnv", @@ -5309,6 +5368,7 @@ dependencies = [ "sloggers", "slot_clock", "smallvec", + "ssz_types", "store", "strum", "task_executor", @@ -5394,9 +5454,9 @@ dependencies = [ [[package]] name = "ntapi" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc51db7b362b205941f71232e56c625156eb9a929f8cf74a428fd5bc094a4afc" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] @@ -5431,7 +5491,7 @@ dependencies = [ "autocfg 0.1.8", "byteorder", "lazy_static", - "libm 0.2.6", + "libm 0.2.7", "num-integer", "num-iter", "num-traits", @@ -5569,9 +5629,9 @@ dependencies = [ [[package]] name = "openssl" -version = "0.10.49" +version = "0.10.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d2f106ab837a24e03672c59b1239669a0596406ff657c3c0835b6b7f0f35a33" +checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" dependencies = [ "bitflags", "cfg-if", @@ -5590,7 +5650,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -5601,18 +5661,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.25.2+1.1.1t" +version = "111.25.3+1.1.1t" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320708a054ad9b3bf314688b5db87cf4d6683d64cfc835e2337924ae62bf4431" +checksum = "924757a6a226bf60da5f7dd0311a34d2b52283dd82ddeb103208ddc66362f80c" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.84" +version = "0.9.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a20eace9dc2d82904039cb76dcf50fb1a0bba071cfd1629720b5d6f1ddba0fa" +checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" dependencies = [ "cc", "libc", @@ -5628,8 +5688,8 @@ dependencies = [ "beacon_chain", "bitvec 1.0.1", "derivative", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "itertools", "lazy_static", "lighthouse_metrics", @@ -5699,9 +5759,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" +checksum = "5ddb756ca205bd108aee3c62c6d3c994e1df84a59b9d6d4a5ea42ee1fd5a9a28" dependencies = [ "arrayvec", "bitvec 1.0.1", @@ -5737,9 +5797,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" @@ -5883,22 +5943,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.12" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] @@ -5931,9 +5991,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" @@ -5977,9 +6037,9 @@ dependencies = [ [[package]] name = "polling" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e1f879b2998099c2d69ab9605d145d5b661195627eccc680002c4918a7fb6fa" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", "bitflags", @@ -5988,7 +6048,7 @@ dependencies = [ "libc", "log", "pin-project-lite 0.2.9", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6023,7 +6083,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug", - "universal-hash 0.5.0", + "universal-hash 0.5.1", ] [[package]] @@ -6032,7 +6092,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b7fa9f396f51dffd61546fd8573ee20592287996568e6175ceb0f8699ad75d" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "byteorder", "bytes", "fallible-iterator", @@ -6063,9 +6123,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "pq-sys" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b845d6d8ec554f972a2c5298aad68953fd64e7441e846075450b44656a016d1" +checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" dependencies = [ "vcpkg", ] @@ -6149,9 +6209,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.56" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" +checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" dependencies = [ "unicode-ident", ] @@ -6208,9 +6268,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48e50df39172a3e7eb17e14642445da64996989bc212b583015435d39a58537" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive", @@ -6218,9 +6278,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c828f93f5ca4826f97fedcbd3f9a536c16b12cff3dbbb4a007f932bbad95b12" +checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck", @@ -6253,9 +6313,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea9b0f8cbe5e15a8a042d030bd96668db28ecb567ec37d691971ff5731d2b1b" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools", @@ -6266,9 +6326,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379119666929a1afd7a043aa6cf96fa67a6dce9af60c88095a4686dbce4c9c88" +checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" dependencies = [ "prost", ] @@ -6277,12 +6337,13 @@ dependencies = [ name = "proto_array" version = "0.2.0" dependencies = [ - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "safe_arith", "serde", "serde_derive", "serde_yaml", + "superstruct 0.5.0", "types", ] @@ -6380,9 +6441,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" dependencies = [ "proc-macro2", ] @@ -6479,7 +6540,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -6530,7 +6591,7 @@ checksum = "6413f3de1edee53342e6138e75b56d32e7bc6e332b3bd62d497b1929d4cfbcdd" dependencies = [ "pem", "ring", - "time 0.3.20", + "time 0.3.21", "x509-parser 0.13.2", "yasna", ] @@ -6543,7 +6604,7 @@ checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", "ring", - "time 0.3.20", + "time 0.3.21", "yasna", ] @@ -6571,20 +6632,20 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "redox_syscall 0.2.16", "thiserror", ] [[package]] name = "regex" -version = "1.7.3" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" +checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-syntax 0.7.1", ] [[package]] @@ -6593,7 +6654,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] @@ -6603,12 +6664,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] -name = "reqwest" -version = "0.11.16" +name = "regex-syntax" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" +checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c" + +[[package]] +name = "reqwest" +version = "0.11.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", "bytes", "encoding_rs", "futures-core", @@ -6627,15 +6694,15 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.8", + "rustls 0.21.1", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls 0.23.4", - "tokio-util 0.7.7", + "tokio-rustls 0.24.0", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", @@ -6768,16 +6835,16 @@ dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", - "hashlink 0.8.1", + "hashlink 0.8.2", "libsqlite3-sys", "smallvec", ] [[package]] name = "rustc-demangle" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a36c42d1873f9a77c53bde094f9664d9891bc604a45b4798fd2c389ed12e5b" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -6820,16 +6887,16 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.7" +version = "0.37.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aae838e49b3d63e9274e1c01833cc8139d3fec468c3b84688c628f44b1ae11d" +checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -6857,13 +6924,35 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "rustls" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +dependencies = [ + "log", + "ring", + "rustls-webpki", + "sct 0.7.0", +] + [[package]] name = "rustls-pemfile" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ - "base64 0.21.0", + "base64 0.21.1", +] + +[[package]] +name = "rustls-webpki" +version = "0.100.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +dependencies = [ + "ring", + "untrusted", ] [[package]] @@ -6919,21 +7008,21 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.5.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" +checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" dependencies = [ "cfg-if", "derive_more", - "parity-scale-codec 3.4.0", + "parity-scale-codec 3.5.0", "scale-info-derive", ] [[package]] name = "scale-info-derive" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fa974aea2d63dd18a4ec3a49d59af9f34178c73a4f56d2f18205628d00681e" +checksum = "53012eae69e5aa5c14671942a5dd47de59d4cdcff8532a6dd0e081faf1119482" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -6971,12 +7060,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "scratch" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" - [[package]] name = "scrypt" version = "0.7.0" @@ -7035,29 +7118,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" -dependencies = [ - "secp256k1-sys", -] - -[[package]] -name = "secp256k1-sys" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" -dependencies = [ - "cc", -] - [[package]] name = "security-framework" -version = "2.8.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ "bitflags", "core-foundation", @@ -7068,9 +7133,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -7090,6 +7155,9 @@ name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +dependencies = [ + "serde", +] [[package]] name = "semver-parser" @@ -7113,9 +7181,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.159" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" +checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" dependencies = [ "serde_derive", ] @@ -7142,20 +7210,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.159" +version = "1.0.163" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c614d17805b093df4b147b51339e7e44bf05ef59fba1e45d83500bcfb4d8585" +checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] name = "serde_json" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d721eca97ac802aa7777b701877c8004d950fc142651367300d21c1cc0194744" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -7170,7 +7238,7 @@ checksum = "bcec881020c684085e55a25f7fd888954d56609ef363479dc5a1305eb0d40cab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -7240,7 +7308,7 @@ checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7251,7 +7319,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7275,7 +7343,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.6", + "digest 0.10.7", ] [[package]] @@ -7292,11 +7360,11 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "keccak", ] @@ -7330,7 +7398,7 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.6", + "digest 0.10.7", "rand_core 0.6.4", ] @@ -7343,7 +7411,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -7385,8 +7453,8 @@ version = "0.1.0" dependencies = [ "bincode", "byteorder", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "filesystem", "flate2", "lazy_static", @@ -7434,7 +7502,7 @@ name = "slashing_protection" version = "0.1.0" dependencies = [ "arbitrary", - "eth2_serde_utils", + "ethereum_serde_utils", "filesystem", "lazy_static", "r2d2", @@ -7475,7 +7543,7 @@ dependencies = [ "serde", "serde_json", "slog", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -7520,7 +7588,7 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -7571,14 +7639,14 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774d05a3edae07ce6d68ea6984f3c05e9bba8927e3dd591e3b479e5b03213d0d" +checksum = "5ccba027ba85743e09d15c03296797cad56395089b832b48b5a5217880f57733" dependencies = [ "aes-gcm 0.9.4", "blake2", "chacha20poly1305", - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek 4.0.0-rc.1", "rand_core 0.6.4", "ring", "rustc_version 0.4.0", @@ -7598,12 +7666,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc8d618c6641ae355025c449427f9e96b98abf99a772be3cef6708d15c77147a" +checksum = "2538b18701741680e0322a2302176d3253a35388e2e62f172f64f4f16605f877" dependencies = [ "libc", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -7662,6 +7730,23 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ssz_types" +version = "0.5.3" +source = "git+https://github.com/sigp/ssz_types?rev=63a80d04286c8561d5c211230a21bf1299d66059#63a80d04286c8561d5c211230a21bf1299d66059" +dependencies = [ + "arbitrary", + "derivative", + "ethereum_serde_utils", + "ethereum_ssz", + "itertools", + "serde", + "serde_derive", + "smallvec", + "tree_hash", + "typenum", +] + [[package]] name = "state_processing" version = "0.2.0" @@ -7671,10 +7756,9 @@ dependencies = [ "bls", "derivative", "env_logger 0.9.3", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", "int_to_bytes", "integer-sqrt", "itertools", @@ -7684,6 +7768,7 @@ dependencies = [ "rayon", "safe_arith", "smallvec", + "ssz_types", "tokio", "tree_hash", "types", @@ -7694,7 +7779,7 @@ name = "state_transition_vectors" version = "0.1.0" dependencies = [ "beacon_chain", - "eth2_ssz", + "ethereum_ssz", "lazy_static", "state_processing", "tokio", @@ -7714,8 +7799,8 @@ dependencies = [ "beacon_chain", "db-key", "directory", - "eth2_ssz", - "eth2_ssz_derive", + "ethereum_ssz", + "ethereum_ssz_derive", "itertools", "lazy_static", "leveldb", @@ -7843,8 +7928,8 @@ name = "swap_or_not_shuffle" version = "0.2.0" dependencies = [ "criterion", - "eth2_hashing", "ethereum-types 0.14.1", + "ethereum_hashing", ] [[package]] @@ -7860,9 +7945,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.13" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec" +checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" dependencies = [ "proc-macro2", "quote", @@ -7904,9 +7989,9 @@ dependencies = [ [[package]] name = "system-configuration" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75182f12f490e953596550b65ee31bda7c8e043d9386174b353bda50838c3fd" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags", "core-foundation", @@ -8066,7 +8151,7 @@ checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -8101,9 +8186,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.20" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" +checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" dependencies = [ "itoa", "libc", @@ -8115,15 +8200,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" +checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" dependencies = [ "time-core", ] @@ -8194,9 +8279,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.27.0" +version = "1.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" +checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" dependencies = [ "autocfg 1.1.0", "bytes", @@ -8208,7 +8293,7 @@ dependencies = [ "signal-hook-registry", "socket2 0.4.9", "tokio-macros", - "windows-sys 0.45.0", + "windows-sys 0.48.0", ] [[package]] @@ -8223,13 +8308,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" +checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] @@ -8261,9 +8346,9 @@ dependencies = [ "pin-project-lite 0.2.9", "postgres-protocol", "postgres-types", - "socket2 0.5.1", + "socket2 0.5.3", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] @@ -8289,15 +8374,25 @@ dependencies = [ ] [[package]] -name = "tokio-stream" -version = "0.1.12" +name = "tokio-rustls" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb52b74f05dbf495a8fba459fdc331812b96aa086d9eb78101fa0d4569c3313" +checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +dependencies = [ + "rustls 0.21.1", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite 0.2.9", "tokio", - "tokio-util 0.7.7", + "tokio-util 0.7.8", ] [[package]] @@ -8347,9 +8442,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" +checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" dependencies = [ "bytes", "futures-core", @@ -8432,20 +8527,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", ] [[package]] name = "tracing-core" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ "once_cell", "valuable", @@ -8474,9 +8569,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" dependencies = [ "matchers", "nu-ansi-term", @@ -8492,9 +8587,9 @@ dependencies = [ [[package]] name = "trackable" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017e2a1a93718e4e8386d037cfb8add78f1d690467f4350fb582f55af1203167" +checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" dependencies = [ "trackable_derive", ] @@ -8511,22 +8606,19 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.4.1" +version = "0.5.1" +source = "git+https://github.com/sigp/tree_hash?rev=a2471f3b240f407a0ec7436cff11f03e5ec8c706#a2471f3b240f407a0ec7436cff11f03e5ec8c706" dependencies = [ - "beacon_chain", - "eth2_hashing", - "eth2_ssz", - "eth2_ssz_derive", "ethereum-types 0.14.1", - "rand 0.8.5", + "ethereum_hashing", "smallvec", - "tree_hash_derive", - "types", ] [[package]] name = "tree_hash_derive" -version = "0.4.0" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83baa26594d96889e5fef7638dfb0f41e16070301a5cf6da99b9a6a0804cec89" dependencies = [ "darling 0.13.4", "quote", @@ -8681,13 +8773,12 @@ dependencies = [ "compare_fields_derive", "criterion", "derivative", - "eth2_hashing", "eth2_interop_keypairs", - "eth2_serde_utils", - "eth2_ssz", - "eth2_ssz_derive", - "eth2_ssz_types", "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_serde_utils", + "ethereum_ssz", + "ethereum_ssz_derive", "hex", "int_to_bytes", "itertools", @@ -8711,6 +8802,7 @@ dependencies = [ "serde_yaml", "slog", "smallvec", + "ssz_types", "state_processing", "superstruct 0.6.0", "swap_or_not_shuffle", @@ -8794,9 +8886,9 @@ dependencies = [ [[package]] name = "universal-hash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", "subtle", @@ -8860,17 +8952,17 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", "serde", ] [[package]] name = "uuid" -version = "1.3.0" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79" +checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -8888,7 +8980,7 @@ dependencies = [ "environment", "eth2", "eth2_keystore", - "eth2_serde_utils", + "ethereum_serde_utils", "exit-future", "filesystem", "futures", @@ -8920,6 +9012,7 @@ dependencies = [ "task_executor", "tempfile", "tokio", + "tokio-stream", "tree_hash", "types", "url", @@ -9078,9 +9171,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" +checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9088,24 +9181,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" +checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.34" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454" +checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ "cfg-if", "js-sys", @@ -9115,9 +9208,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" +checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9125,46 +9218,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" +checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.16", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.84" +version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" - -[[package]] -name = "wasm-bindgen-test" -version = "0.3.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db36fc0f9fb209e88fb3642590ae0205bb5a56216dabd963ba15879fe53a30b" -dependencies = [ - "console_error_panic_hook", - "js-sys", - "scoped-tls", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-bindgen-test-macro", -] - -[[package]] -name = "wasm-bindgen-test-macro" -version = "0.3.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0734759ae6b3b1717d661fe4f016efcfb9828f5edb4520c18eaee05af3b43be9" -dependencies = [ - "proc-macro2", - "quote", -] +checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" [[package]] name = "wasm-streams" @@ -9229,61 +9298,14 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" +checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "web3" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f258e254752d210b84fe117b31f1e3cc9cbf04c0d747eb7f8cf7cf5e370f6d" -dependencies = [ - "arrayvec", - "base64 0.13.1", - "bytes", - "derive_more", - "ethabi 16.0.0", - "ethereum-types 0.12.1", - "futures", - "futures-timer", - "headers", - "hex", - "idna 0.2.3", - "jsonrpc-core", - "log", - "once_cell", - "parking_lot 0.12.1", - "pin-project", - "reqwest", - "rlp", - "secp256k1", - "serde", - "serde_json", - "soketto", - "tiny-keccak", - "tokio", - "tokio-util 0.6.10", - "url", - "web3-async-native-tls", -] - -[[package]] -name = "web3-async-native-tls" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f6d8d1636b2627fe63518d5a9b38a569405d9c9bc665c43c9c341de57227ebb" -dependencies = [ - "native-tls", - "thiserror", - "tokio", - "url", -] - [[package]] name = "web3signer_tests" version = "0.1.0" @@ -9366,7 +9388,7 @@ dependencies = [ "sha2 0.10.6", "stun", "thiserror", - "time 0.3.20", + "time 0.3.21", "tokio", "turn", "url", @@ -9403,7 +9425,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "942be5bd85f072c3128396f6e5a9bfb93ca8c1939ded735d177b7bcba9a13d05" dependencies = [ "aes 0.6.0", - "aes-gcm 0.10.1", + "aes-gcm 0.10.2", "async-trait", "bincode", "block-modes", @@ -9433,7 +9455,7 @@ dependencies = [ "tokio", "webpki 0.21.4", "webrtc-util", - "x25519-dalek 2.0.0-rc.2", + "x25519-dalek 2.0.0-pre.1", "x509-parser 0.13.2", ] @@ -9455,7 +9477,7 @@ dependencies = [ "tokio", "turn", "url", - "uuid 1.3.0", + "uuid 1.3.3", "waitgroup", "webrtc-mdns", "webrtc-util", @@ -9476,18 +9498,15 @@ dependencies = [ [[package]] name = "webrtc-media" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a3c157a040324e5049bcbd644ffc9079e6738fa2cfab2bcff64e5cc4c00d7" +checksum = "f72e1650a8ae006017d1a5280efb49e2610c19ccc3c0905b03b648aee9554991" dependencies = [ "byteorder", "bytes", - "derive_builder", - "displaydoc", "rand 0.8.5", "rtp", "thiserror", - "webrtc-util", ] [[package]] @@ -9575,6 +9594,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" +[[package]] +name = "wildmatch" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f44b95f62d34113cf558c93511ac93027e03e9c29a60dd0fd70e6e025c7270a" + [[package]] name = "winapi" version = "0.3.9" @@ -9873,13 +9898,12 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0-rc.2" +version = "2.0.0-pre.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fabd6e16dd08033932fc3265ad4510cc2eab24656058a6dcb107ffe274abcc95" +checksum = "e5da623d8af10a62342bcbbb230e33e58a63255a58012f8653c578e54bab48df" dependencies = [ - "curve25519-dalek 4.0.0-rc.2", + "curve25519-dalek 3.2.0", "rand_core 0.6.4", - "serde", "zeroize", ] @@ -9899,7 +9923,7 @@ dependencies = [ "ring", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -9917,14 +9941,14 @@ dependencies = [ "oid-registry 0.6.1", "rusticata-macros", "thiserror", - "time 0.3.20", + "time 0.3.21", ] [[package]] name = "xml-rs" -version = "0.8.4" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d7d3948613f75c98fd9328cfdcc45acc4d360655289d0a7d4ec931392200a3" +checksum = "1690519550bfa95525229b9ca2350c63043a4857b3b0013811b2ccf4a2420b01" [[package]] name = "xmltree" @@ -9960,11 +9984,11 @@ dependencies = [ [[package]] name = "yasna" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aed2e7a52e3744ab4d0c05c20aa065258e84c49fd4226f5191b2ed29712710b4" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.20", + "time 0.3.21", ] [[package]] @@ -9984,7 +10008,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.13", + "syn 2.0.16", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b83175af5..092ccf32f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,22 +53,14 @@ members = [ "consensus/fork_choice", "consensus/proto_array", "consensus/safe_arith", - "consensus/ssz", - "consensus/ssz_derive", - "consensus/ssz_types", - "consensus/serde_utils", "consensus/state_processing", "consensus/swap_or_not_shuffle", - "consensus/tree_hash", - "consensus/tree_hash_derive", "crypto/bls", "crypto/kzg", - "crypto/eth2_hashing", "crypto/eth2_key_derivation", "crypto/eth2_keystore", "crypto/eth2_wallet", - "crypto/kzg", "lcli", @@ -97,14 +89,10 @@ resolver = "2" [patch] [patch.crates-io] warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } -eth2_ssz = { path = "consensus/ssz" } -eth2_ssz_derive = { path = "consensus/ssz_derive" } -eth2_ssz_types = { path = "consensus/ssz_types" } -eth2_hashing = { path = "crypto/eth2_hashing" } -tree_hash = { path = "consensus/tree_hash" } -tree_hash_derive = { path = "consensus/tree_hash_derive" } -eth2_serde_utils = { path = "consensus/serde_utils" } arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" } +tree_hash = { git = "https://github.com/sigp/tree_hash", rev="a2471f3b240f407a0ec7436cff11f03e5ec8c706" } +ssz_types = { git = "https://github.com/sigp/ssz_types", rev="63a80d04286c8561d5c211230a21bf1299d66059" } +ethereum_ssz_derive = { git = "https://github.com/jimmygchen/ethereum_ssz", rev="231aa8c840262da694e024235dbc638a2980c545"} [patch."https://github.com/ralexstokes/mev-rs"] mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" } diff --git a/Dockerfile b/Dockerfile index 6f44ae124..cc0241116 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,9 @@ FROM rust:1.68.2-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES +ARG PROFILE=release ENV FEATURES $FEATURES +ENV PROFILE $PROFILE RUN cd lighthouse && make FROM ubuntu:22.04 diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 9e5b57a29..5755a355f 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -27,7 +27,6 @@ pub const PASSWORD_PROMPT: &str = "Enter the keystore password"; pub const DEFAULT_BEACON_NODE: &str = "http://localhost:5052/"; pub const CONFIRMATION_PHRASE: &str = "Exit my validator"; pub const WEBSITE_URL: &str = "https://lighthouse-book.sigmaprime.io/voluntary-exit.html"; -pub const PROMPT: &str = "WARNING: WITHDRAWING STAKED ETH IS NOT CURRENTLY POSSIBLE"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("exit") @@ -161,7 +160,6 @@ async fn publish_voluntary_exit( ); if !no_confirmation { eprintln!("WARNING: THIS IS AN IRREVERSIBLE OPERATION\n"); - eprintln!("{}\n", PROMPT); eprintln!( "PLEASE VISIT {} TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT.", WEBSITE_URL diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 1fb9dc11a..8f4bbc076 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "4.1.0" +version = "4.2.0" authors = ["Paul Hauner ", "Age Manning { pub slasher: Option>>, /// Provides monitoring of a set of explicitly defined validators. pub validator_monitor: RwLock>, + /// The slot at which blocks are downloaded back to. + pub genesis_backfill_slot: Slot, pub proposal_blob_cache: BlobCache, pub data_availability_checker: DataAvailabilityChecker, pub kzg: Option>, @@ -599,7 +602,7 @@ impl BeaconChain { /// Persists `self.eth1_chain` and its caches to disk. pub fn persist_eth1_cache(&self) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); + let _timer = metrics::start_timer(&metrics::PERSIST_ETH1_CACHE); if let Some(eth1_chain) = self.eth1_chain.as_ref() { self.store @@ -4891,6 +4894,7 @@ impl BeaconChain { &mut state, &block, signature_strategy, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, @@ -5805,6 +5809,7 @@ impl BeaconChain { let shuffling_id = BlockShufflingIds { current: head_block.current_epoch_shuffling_id.clone(), next: head_block.next_epoch_shuffling_id.clone(), + previous: None, block_root: head_block.root, } .id_for_epoch(shuffling_epoch) diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 71160fcb6..9b2edbd8b 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -218,7 +218,6 @@ where finalized_checkpoint: self.finalized_checkpoint, justified_checkpoint: self.justified_checkpoint, justified_balances: self.justified_balances.effective_balances.clone(), - best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT, unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, proposer_boost_root: self.proposer_boost_root, @@ -355,24 +354,62 @@ where } } +pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV17; + /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. -#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] +#[superstruct( + variants(V11, V17), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct PersistedForkChoiceStore { - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub balances_cache: BalancesCacheV8, pub time: Slot, pub finalized_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint, pub justified_balances: Vec, + #[superstruct(only(V11))] pub best_justified_checkpoint: Checkpoint, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub unrealized_justified_checkpoint: Checkpoint, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub unrealized_finalized_checkpoint: Checkpoint, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub proposer_boost_root: Hash256, - #[superstruct(only(V11))] + #[superstruct(only(V11, V17))] pub equivocating_indices: BTreeSet, } -pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV11; +impl Into for PersistedForkChoiceStoreV11 { + fn into(self) -> PersistedForkChoiceStore { + PersistedForkChoiceStore { + balances_cache: self.balances_cache, + time: self.time, + finalized_checkpoint: self.finalized_checkpoint, + justified_checkpoint: self.justified_checkpoint, + justified_balances: self.justified_balances, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + proposer_boost_root: self.proposer_boost_root, + equivocating_indices: self.equivocating_indices, + } + } +} + +impl Into for PersistedForkChoiceStore { + fn into(self) -> PersistedForkChoiceStoreV11 { + PersistedForkChoiceStoreV11 { + balances_cache: self.balances_cache, + time: self.time, + finalized_checkpoint: self.finalized_checkpoint, + justified_checkpoint: self.justified_checkpoint, + justified_balances: self.justified_balances, + best_justified_checkpoint: JUNK_BEST_JUSTIFIED_CHECKPOINT, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + proposer_boost_root: self.proposer_boost_root, + equivocating_indices: self.equivocating_indices, + } + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 2107fbf69..5306df846 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -84,7 +84,7 @@ use state_processing::{ per_block_processing, per_slot_processing, state_advance::partial_state_advance, BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError, - VerifyBlockRoot, + StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs; @@ -1615,6 +1615,7 @@ impl ExecutionPendingBlock { block.as_block(), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut consensus_context, &chain.spec, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 78f39e358..056714417 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -8,7 +8,7 @@ use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_bound use crate::head_tracker::HeadTracker; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; -use crate::shuffling_cache::ShufflingCache; +use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::ValidatorMonitor; @@ -710,6 +710,8 @@ where )?; } + let head_shuffling_ids = BlockShufflingIds::try_from_head(head_block_root, &head_state)?; + let mut head_snapshot = BeaconSnapshot { beacon_block_root: head_block_root, beacon_block: Arc::new(head_block), @@ -791,6 +793,29 @@ where let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; + // Calculate the weak subjectivity point in which to backfill blocks to. + let genesis_backfill_slot = if self.chain_config.genesis_backfill { + Slot::new(0) + } else { + let backfill_epoch_range = (self.spec.min_validator_withdrawability_delay + + self.spec.churn_limit_quotient) + .as_u64() + / 2; + match slot_clock.now() { + Some(current_slot) => { + let genesis_backfill_epoch = current_slot + .epoch(TEthSpec::slots_per_epoch()) + .saturating_sub(backfill_epoch_range); + genesis_backfill_epoch.start_slot(TEthSpec::slots_per_epoch()) + } + None => { + // The slot clock cannot derive the current slot. We therefore assume we are + // at or prior to genesis and backfill should sync all the way to genesis. + Slot::new(0) + } + } + }; + let beacon_chain = BeaconChain { spec: self.spec.clone(), config: self.chain_config, @@ -845,7 +870,11 @@ where DEFAULT_SNAPSHOT_CACHE_SIZE, head_for_snapshot_cache, )), - shuffling_cache: TimeoutRwLock::new(ShufflingCache::new(shuffling_cache_size)), + shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( + shuffling_cache_size, + head_shuffling_ids, + log.clone(), + )), eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), @@ -860,6 +889,7 @@ where graffiti: self.graffiti, slasher: self.slasher.clone(), validator_monitor: RwLock::new(validator_monitor), + genesis_backfill_slot, //TODO(sean) should we move kzg solely to the da checker? data_availability_checker: DataAvailabilityChecker::new( slot_clock, @@ -1036,7 +1066,7 @@ mod test { use super::*; use crate::test_utils::EphemeralHarnessType; use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; - use eth2_hashing::hash; + use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, }; diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index e3adca9ca..35a701a54 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -31,7 +31,9 @@ //! the head block root. This is unacceptable for fast-responding functions like the networking //! stack. +use crate::beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT; use crate::persisted_fork_choice::PersistedForkChoice; +use crate::shuffling_cache::BlockShufflingIds; use crate::{ beacon_chain::{ BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, @@ -850,6 +852,35 @@ impl BeaconChain { ); }); + match BlockShufflingIds::try_from_head( + new_snapshot.beacon_block_root, + &new_snapshot.beacon_state, + ) { + Ok(head_shuffling_ids) => { + self.shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .map(|mut shuffling_cache| { + shuffling_cache.update_head_shuffling_ids(head_shuffling_ids) + }) + .unwrap_or_else(|| { + error!( + self.log, + "Failed to obtain cache write lock"; + "lock" => "shuffling_cache", + "task" => "update head shuffling decision root" + ); + }); + } + Err(e) => { + error!( + self.log, + "Failed to get head shuffling ids"; + "error" => ?e, + "head_block_root" => ?new_snapshot.beacon_block_root + ); + } + } + observe_head_block_delays( &mut self.block_times_cache.write(), &new_head_proto_block, diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index 992143531..a74fdced1 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -73,6 +73,9 @@ pub struct ChainConfig { pub optimistic_finalized_sync: bool, /// The size of the shuffling cache, pub shuffling_cache_size: usize, + /// If using a weak-subjectivity sync, whether we should download blocks all the way back to + /// genesis. + pub genesis_backfill: bool, /// Whether to send payload attributes every slot, regardless of connected proposers. /// /// This is useful for block builders and testing. @@ -106,6 +109,7 @@ impl Default for ChainConfig { // This value isn't actually read except in tests. optimistic_finalized_sync: true, shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE, + genesis_backfill: false, always_prepare_payload: false, enable_backfill_rate_limiting: true, } diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index f820622e5..8b6c6b374 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,7 +1,7 @@ use crate::metrics; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2::lighthouse::Eth1SyncStatusData; -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index ef23248ab..ccd17af24 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -5,7 +5,7 @@ use slog::{info, warn, Logger}; use state_processing::state_advance::complete_state_advance; use state_processing::{ per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, + StateProcessingStrategy, VerifyBlockRoot, }; use std::sync::Arc; use std::time::Duration; @@ -177,6 +177,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It &mut state, &block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 85a9ec8fb..657415ac8 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -193,13 +193,17 @@ impl BeaconChain { oldest_block_parent: expected_block_root, ..anchor_info }; - let backfill_complete = new_anchor.block_backfill_complete(); + let backfill_complete = new_anchor.block_backfill_complete(self.genesis_backfill_slot); self.store .compare_and_set_anchor_info_with_write(Some(anchor_info), Some(new_anchor))?; // If backfill has completed and the chain is configured to reconstruct historic states, // send a message to the background migrator instructing it to begin reconstruction. - if backfill_complete && self.config.reconstruct_historic_states { + // This can only happen if we have backfilled all the way to genesis. + if backfill_complete + && self.genesis_backfill_slot == Slot::new(0) + && self.config.reconstruct_historic_states + { self.store_migrator.process_reconstruction(); } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index a8fdc0abd..40f24af77 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -876,6 +876,14 @@ lazy_static! { "beacon_sync_committee_message_gossip_verification_seconds", "Full runtime of sync contribution gossip verification" ); + pub static ref SYNC_MESSAGE_EQUIVOCATIONS: Result = try_create_int_counter( + "sync_message_equivocations_total", + "Number of sync messages with the same validator index for different blocks" + ); + pub static ref SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD: Result = try_create_int_counter( + "sync_message_equivocations_to_head_total", + "Number of sync message which conflict with a previous message but elect the head" + ); /* * Sync Committee Contribution Verification diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index ed22beaec..59c67bd1b 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -20,7 +20,7 @@ use std::collections::{HashMap, HashSet}; use std::hash::Hash; use std::marker::PhantomData; use types::slot_data::SlotData; -use types::{Epoch, EthSpec, Slot, Unsigned}; +use types::{Epoch, EthSpec, Hash256, Slot, Unsigned}; /// The maximum capacity of the `AutoPruningEpochContainer`. /// @@ -39,10 +39,10 @@ pub const MAX_CACHED_EPOCHS: u64 = 3; pub type ObservedAttesters = AutoPruningEpochContainer; pub type ObservedSyncContributors = - AutoPruningSlotContainer, E>; + AutoPruningSlotContainer, E>; pub type ObservedAggregators = AutoPruningEpochContainer; pub type ObservedSyncAggregators = - AutoPruningSlotContainer; + AutoPruningSlotContainer; #[derive(Debug, PartialEq)] pub enum Error { @@ -62,7 +62,7 @@ pub enum Error { } /// Implemented on an item in an `AutoPruningContainer`. -pub trait Item { +pub trait Item { /// Instantiate `Self` with the given `capacity`. fn with_capacity(capacity: usize) -> Self; @@ -75,11 +75,11 @@ pub trait Item { /// Returns the number of validators that have been observed by `self`. fn validator_count(&self) -> usize; - /// Store `validator_index` in `self`. - fn insert(&mut self, validator_index: usize) -> bool; + /// Store `validator_index` and `value` in `self`. + fn insert(&mut self, validator_index: usize, value: T) -> bool; - /// Returns `true` if `validator_index` has been stored in `self`. - fn contains(&self, validator_index: usize) -> bool; + /// Returns `Some(T)` if there is an entry for `validator_index`. + fn get(&self, validator_index: usize) -> Option; } /// Stores a `BitVec` that represents which validator indices have attested or sent sync committee @@ -88,7 +88,7 @@ pub struct EpochBitfield { bitfield: BitVec, } -impl Item for EpochBitfield { +impl Item<()> for EpochBitfield { fn with_capacity(capacity: usize) -> Self { Self { bitfield: BitVec::with_capacity(capacity), @@ -108,7 +108,7 @@ impl Item for EpochBitfield { self.bitfield.iter().filter(|bit| **bit).count() } - fn insert(&mut self, validator_index: usize) -> bool { + fn insert(&mut self, validator_index: usize, _value: ()) -> bool { self.bitfield .get_mut(validator_index) .map(|mut bit| { @@ -129,8 +129,11 @@ impl Item for EpochBitfield { }) } - fn contains(&self, validator_index: usize) -> bool { - self.bitfield.get(validator_index).map_or(false, |bit| *bit) + fn get(&self, validator_index: usize) -> Option<()> { + self.bitfield + .get(validator_index) + .map_or(false, |bit| *bit) + .then_some(()) } } @@ -140,7 +143,7 @@ pub struct EpochHashSet { set: HashSet, } -impl Item for EpochHashSet { +impl Item<()> for EpochHashSet { fn with_capacity(capacity: usize) -> Self { Self { set: HashSet::with_capacity(capacity), @@ -163,27 +166,27 @@ impl Item for EpochHashSet { /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was /// already in the set. - fn insert(&mut self, validator_index: usize) -> bool { + fn insert(&mut self, validator_index: usize, _value: ()) -> bool { !self.set.insert(validator_index) } /// Returns `true` if the `validator_index` is in the set. - fn contains(&self, validator_index: usize) -> bool { - self.set.contains(&validator_index) + fn get(&self, validator_index: usize) -> Option<()> { + self.set.contains(&validator_index).then_some(()) } } /// Stores a `HashSet` of which validator indices have created a sync aggregate during a /// slot. pub struct SyncContributorSlotHashSet { - set: HashSet, + map: HashMap, phantom: PhantomData, } -impl Item for SyncContributorSlotHashSet { +impl Item for SyncContributorSlotHashSet { fn with_capacity(capacity: usize) -> Self { Self { - set: HashSet::with_capacity(capacity), + map: HashMap::with_capacity(capacity), phantom: PhantomData, } } @@ -194,22 +197,24 @@ impl Item for SyncContributorSlotHashSet { } fn len(&self) -> usize { - self.set.len() + self.map.len() } fn validator_count(&self) -> usize { - self.set.len() + self.map.len() } /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was /// already in the set. - fn insert(&mut self, validator_index: usize) -> bool { - !self.set.insert(validator_index) + fn insert(&mut self, validator_index: usize, beacon_block_root: Hash256) -> bool { + self.map + .insert(validator_index, beacon_block_root) + .is_some() } /// Returns `true` if the `validator_index` is in the set. - fn contains(&self, validator_index: usize) -> bool { - self.set.contains(&validator_index) + fn get(&self, validator_index: usize) -> Option { + self.map.get(&validator_index).copied() } } @@ -219,7 +224,7 @@ pub struct SyncAggregatorSlotHashSet { set: HashSet, } -impl Item for SyncAggregatorSlotHashSet { +impl Item<()> for SyncAggregatorSlotHashSet { fn with_capacity(capacity: usize) -> Self { Self { set: HashSet::with_capacity(capacity), @@ -241,13 +246,13 @@ impl Item for SyncAggregatorSlotHashSet { /// Inserts the `validator_index` in the set. Returns `true` if the `validator_index` was /// already in the set. - fn insert(&mut self, validator_index: usize) -> bool { + fn insert(&mut self, validator_index: usize, _value: ()) -> bool { !self.set.insert(validator_index) } /// Returns `true` if the `validator_index` is in the set. - fn contains(&self, validator_index: usize) -> bool { - self.set.contains(&validator_index) + fn get(&self, validator_index: usize) -> Option<()> { + self.set.contains(&validator_index).then_some(()) } } @@ -275,7 +280,7 @@ impl Default for AutoPruningEpochContainer { } } -impl AutoPruningEpochContainer { +impl, E: EthSpec> AutoPruningEpochContainer { /// Observe that `validator_index` has produced attestation `a`. Returns `Ok(true)` if `a` has /// previously been observed for `validator_index`. /// @@ -293,7 +298,7 @@ impl AutoPruningEpochContainer { self.prune(epoch); if let Some(item) = self.items.get_mut(&epoch) { - Ok(item.insert(validator_index)) + Ok(item.insert(validator_index, ())) } else { // To avoid re-allocations, try and determine a rough initial capacity for the new item // by obtaining the mean size of all items in earlier epoch. @@ -309,7 +314,7 @@ impl AutoPruningEpochContainer { let initial_capacity = sum.checked_div(count).unwrap_or_else(T::default_capacity); let mut item = T::with_capacity(initial_capacity); - item.insert(validator_index); + item.insert(validator_index, ()); self.items.insert(epoch, item); Ok(false) @@ -333,7 +338,7 @@ impl AutoPruningEpochContainer { let exists = self .items .get(&epoch) - .map_or(false, |item| item.contains(validator_index)); + .map_or(false, |item| item.get(validator_index).is_some()); Ok(exists) } @@ -392,7 +397,7 @@ impl AutoPruningEpochContainer { pub fn index_seen_at_epoch(&self, index: usize, epoch: Epoch) -> bool { self.items .get(&epoch) - .map(|item| item.contains(index)) + .map(|item| item.get(index).is_some()) .unwrap_or(false) } } @@ -405,23 +410,63 @@ impl AutoPruningEpochContainer { /// sync contributions with an epoch prior to `data.slot - 3` will be cleared from the cache. /// /// `V` should be set to a `SyncAggregatorSlotHashSet` or a `SyncContributorSlotHashSet`. -pub struct AutoPruningSlotContainer { +pub struct AutoPruningSlotContainer { lowest_permissible_slot: Slot, items: HashMap, - _phantom: PhantomData, + _phantom_e: PhantomData, + _phantom_s: PhantomData, } -impl Default for AutoPruningSlotContainer { +impl Default for AutoPruningSlotContainer { fn default() -> Self { Self { lowest_permissible_slot: Slot::new(0), items: HashMap::new(), - _phantom: PhantomData, + _phantom_e: PhantomData, + _phantom_s: PhantomData, } } } -impl AutoPruningSlotContainer { +impl, E: EthSpec> + AutoPruningSlotContainer +{ + /// Observes the given `value` for the given `validator_index`. + /// + /// The `override_observation` function is supplied `previous_observation` + /// and `value`. If it returns `true`, then any existing observation will be + /// overridden. + /// + /// This function returns `Some` if: + /// - An observation already existed for the validator, AND, + /// - The `override_observation` function returned `false`. + /// + /// Alternatively, it returns `None` if: + /// - An observation did not already exist for the given validator, OR, + /// - The `override_observation` function returned `true`. + pub fn observe_validator_with_override( + &mut self, + key: K, + validator_index: usize, + value: S, + override_observation: F, + ) -> Result, Error> + where + F: Fn(&S, &S) -> bool, + { + if let Some(prev_observation) = self.observation_for_validator(key, validator_index)? { + if override_observation(&prev_observation, &value) { + self.observe_validator(key, validator_index, value)?; + Ok(None) + } else { + Ok(Some(prev_observation)) + } + } else { + self.observe_validator(key, validator_index, value)?; + Ok(None) + } + } + /// Observe that `validator_index` has produced a sync committee message. Returns `Ok(true)` if /// the sync committee message has previously been observed for `validator_index`. /// @@ -429,14 +474,19 @@ impl AutoPruningSlotContainer Result { + pub fn observe_validator( + &mut self, + key: K, + validator_index: usize, + value: S, + ) -> Result { let slot = key.get_slot(); self.sanitize_request(slot, validator_index)?; self.prune(slot); if let Some(item) = self.items.get_mut(&key) { - Ok(item.insert(validator_index)) + Ok(item.insert(validator_index, value)) } else { // To avoid re-allocations, try and determine a rough initial capacity for the new item // by obtaining the mean size of all items in earlier slot. @@ -452,32 +502,45 @@ impl AutoPruningSlotContainer Result { + self.observation_for_validator(key, validator_index) + .map(|observation| observation.is_some()) + } + + /// Returns `Ok(Some)` if the `validator_index` has already produced a + /// conflicting sync committee message. + /// + /// ## Errors + /// + /// - `validator_index` is higher than `VALIDATOR_REGISTRY_LIMIT`. + /// - `key.slot` is earlier than `self.lowest_permissible_slot`. + pub fn observation_for_validator( + &self, + key: K, + validator_index: usize, + ) -> Result, Error> { self.sanitize_request(key.get_slot(), validator_index)?; - let exists = self + let observation = self .items .get(&key) - .map_or(false, |item| item.contains(validator_index)); + .and_then(|item| item.get(validator_index)); - Ok(exists) + Ok(observation) } /// Returns the number of validators that have been observed at the given `slot`. Returns @@ -561,6 +624,116 @@ mod tests { type E = types::MainnetEthSpec; + #[test] + fn value_storage() { + type Container = AutoPruningSlotContainer, E>; + + let mut store: Container = <_>::default(); + let key = Slot::new(0); + let validator_index = 0; + let value = Hash256::zero(); + + // Assert there is no entry. + assert!(store + .observation_for_validator(key, validator_index) + .unwrap() + .is_none()); + assert!(!store + .validator_has_been_observed(key, validator_index) + .unwrap()); + + // Add an entry. + assert!(!store + .observe_validator(key, validator_index, value) + .unwrap()); + + // Assert there is a correct entry. + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(value) + ); + assert!(store + .validator_has_been_observed(key, validator_index) + .unwrap()); + + let alternate_value = Hash256::from_low_u64_be(1); + + // Assert that override false does not override. + assert_eq!( + store + .observe_validator_with_override(key, validator_index, alternate_value, |_, _| { + false + }) + .unwrap(), + Some(value) + ); + + // Assert that override true overrides and acts as if there was never an + // entry there. + assert_eq!( + store + .observe_validator_with_override(key, validator_index, alternate_value, |_, _| { + true + }) + .unwrap(), + None + ); + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(alternate_value) + ); + + // Reset the store. + let mut store: Container = <_>::default(); + + // Asset that a new entry with override = false is inserted + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + None + ); + assert_eq!( + store + .observe_validator_with_override(key, validator_index, value, |_, _| { false }) + .unwrap(), + None, + ); + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(value) + ); + + // Reset the store. + let mut store: Container = <_>::default(); + + // Asset that a new entry with override = true is inserted + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + None + ); + assert_eq!( + store + .observe_validator_with_override(key, validator_index, value, |_, _| { true }) + .unwrap(), + None, + ); + assert_eq!( + store + .observation_for_validator(key, validator_index) + .unwrap(), + Some(value) + ); + } + macro_rules! test_suite_epoch { ($mod_name: ident, $type: ident) => { #[cfg(test)] @@ -722,7 +895,7 @@ mod tests { test_suite_epoch!(observed_aggregators, ObservedAggregators); macro_rules! test_suite_slot { - ($mod_name: ident, $type: ident) => { + ($mod_name: ident, $type: ident, $value: expr) => { #[cfg(test)] mod $mod_name { use super::*; @@ -737,7 +910,7 @@ mod tests { "should indicate an unknown item is unknown" ); assert_eq!( - store.observe_validator(key, i), + store.observe_validator(key, i, $value), Ok(false), "should observe new item" ); @@ -750,7 +923,7 @@ mod tests { "should indicate a known item is known" ); assert_eq!( - store.observe_validator(key, i), + store.observe_validator(key, i, $value), Ok(true), "should acknowledge an existing item" ); @@ -997,6 +1170,10 @@ mod tests { } }; } - test_suite_slot!(observed_sync_contributors, ObservedSyncContributors); - test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators); + test_suite_slot!( + observed_sync_contributors, + ObservedSyncContributors, + Hash256::zero() + ); + test_suite_slot!(observed_sync_aggregators, ObservedSyncAggregators, ()); } diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 829dc2a8a..8297ea934 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -1,17 +1,41 @@ -use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11; +use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV11, PersistedForkChoiceStoreV17}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error, StoreItem}; use superstruct::superstruct; // If adding a new version you should update this type alias and fix the breakages. -pub type PersistedForkChoice = PersistedForkChoiceV11; +pub type PersistedForkChoice = PersistedForkChoiceV17; -#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)] +#[superstruct( + variants(V11, V17), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct PersistedForkChoice { pub fork_choice: fork_choice::PersistedForkChoice, #[superstruct(only(V11))] pub fork_choice_store: PersistedForkChoiceStoreV11, + #[superstruct(only(V17))] + pub fork_choice_store: PersistedForkChoiceStoreV17, +} + +impl Into for PersistedForkChoiceV11 { + fn into(self) -> PersistedForkChoice { + PersistedForkChoice { + fork_choice: self.fork_choice, + fork_choice_store: self.fork_choice_store.into(), + } + } +} + +impl Into for PersistedForkChoice { + fn into(self) -> PersistedForkChoiceV11 { + PersistedForkChoiceV11 { + fork_choice: self.fork_choice, + fork_choice_store: self.fork_choice_store.into(), + } + } } macro_rules! impl_store_item { @@ -33,3 +57,4 @@ macro_rules! impl_store_item { } impl_store_item!(PersistedForkChoiceV11); +impl_store_item!(PersistedForkChoiceV17); diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 5808e648a..7b398db2f 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -4,6 +4,7 @@ mod migration_schema_v13; mod migration_schema_v14; mod migration_schema_v15; mod migration_schema_v16; +mod migration_schema_v17; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -141,6 +142,14 @@ pub fn migrate_schema( let ops = migration_schema_v16::downgrade_from_v16::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(16), SchemaVersion(17)) => { + let ops = migration_schema_v17::upgrade_to_v17::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(17), SchemaVersion(16)) => { + let ops = migration_schema_v17::downgrade_from_v17::(db.clone(), log)?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs new file mode 100644 index 000000000..770cbb8ab --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs @@ -0,0 +1,88 @@ +use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::persisted_fork_choice::{PersistedForkChoiceV11, PersistedForkChoiceV17}; +use proto_array::core::{SszContainerV16, SszContainerV17}; +use slog::{debug, Logger}; +use ssz::{Decode, Encode}; +use std::sync::Arc; +use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; + +pub fn upgrade_fork_choice( + mut fork_choice: PersistedForkChoiceV11, +) -> Result { + let ssz_container_v16 = SszContainerV16::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + Error::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + let ssz_container_v17: SszContainerV17 = ssz_container_v16.try_into().map_err(|e| { + Error::SchemaMigrationError(format!( + "Missing checkpoint during schema migration: {:?}", + e + )) + })?; + fork_choice.fork_choice.proto_array_bytes = ssz_container_v17.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +pub fn downgrade_fork_choice( + mut fork_choice: PersistedForkChoiceV17, +) -> Result { + let ssz_container_v17 = SszContainerV17::from_ssz_bytes( + &fork_choice.fork_choice.proto_array_bytes, + ) + .map_err(|e| { + Error::SchemaMigrationError(format!( + "Failed to decode ProtoArrayForkChoice during schema migration: {:?}", + e + )) + })?; + + let ssz_container_v16: SszContainerV16 = ssz_container_v17.into(); + fork_choice.fork_choice.proto_array_bytes = ssz_container_v16.as_ssz_bytes(); + + Ok(fork_choice.into()) +} + +pub fn upgrade_to_v17( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Get persisted_fork_choice. + let v11 = db + .get_item::(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + + let v17 = upgrade_fork_choice(v11)?; + + debug!( + log, + "Removing unused best_justified_checkpoint from fork choice store." + ); + + Ok(vec![v17.as_kv_store_op(FORK_CHOICE_DB_KEY)]) +} + +pub fn downgrade_from_v17( + db: Arc>, + log: Logger, +) -> Result, Error> { + // Get persisted_fork_choice. + let v17 = db + .get_item::(&FORK_CHOICE_DB_KEY)? + .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; + + let v11 = downgrade_fork_choice(v17)?; + + debug!( + log, + "Adding junk best_justified_checkpoint to fork choice store." + ); + + Ok(vec![v11.as_kv_store_op(FORK_CHOICE_DB_KEY)]) +} diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 91a1e24d8..086e1c094 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -1,10 +1,18 @@ -use crate::{metrics, BeaconChainError}; -use lru::LruCache; -use oneshot_broadcast::{oneshot, Receiver, Sender}; +use std::collections::HashMap; use std::sync::Arc; -use types::{beacon_state::CommitteeCache, AttestationShufflingId, Epoch, Hash256}; -/// The size of the LRU cache that stores committee caches for quicker verification. +use itertools::Itertools; +use slog::{debug, Logger}; + +use oneshot_broadcast::{oneshot, Receiver, Sender}; +use types::{ + beacon_state::CommitteeCache, AttestationShufflingId, BeaconState, Epoch, EthSpec, Hash256, + RelativeEpoch, +}; + +use crate::{metrics, BeaconChainError}; + +/// The size of the cache that stores committee caches for quicker verification. /// /// Each entry should be `8 + 800,000 = 800,008` bytes in size with 100k validators. (8-byte hash + /// 100k indices). Therefore, this cache should be approx `16 * 800,008 = 12.8 MB`. (Note: this @@ -45,18 +53,24 @@ impl CacheItem { } } -/// Provides an LRU cache for `CommitteeCache`. +/// Provides a cache for `CommitteeCache`. /// /// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like /// a find/replace error. pub struct ShufflingCache { - cache: LruCache, + cache: HashMap, + cache_size: usize, + head_shuffling_ids: BlockShufflingIds, + logger: Logger, } impl ShufflingCache { - pub fn new(cache_size: usize) -> Self { + pub fn new(cache_size: usize, head_shuffling_ids: BlockShufflingIds, logger: Logger) -> Self { Self { - cache: LruCache::new(cache_size), + cache: HashMap::new(), + cache_size, + head_shuffling_ids, + logger, } } @@ -76,7 +90,7 @@ impl ShufflingCache { metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_HITS); metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS); let ready = CacheItem::Committee(committee); - self.cache.put(key.clone(), ready.clone()); + self.insert_cache_item(key.clone(), ready.clone()); Some(ready) } // The promise has not yet been resolved. Return the promise so the caller can await @@ -93,13 +107,12 @@ impl ShufflingCache { // It's worth noting that this is the only place where we removed unresolved // promises from the cache. This means unresolved promises will only be removed if // we try to access them again. This is OK, since the promises don't consume much - // memory and the nature of the LRU cache means that future, relevant entries will - // still be added to the cache. We expect that *all* promises should be resolved, - // unless there is a programming or database error. + // memory. We expect that *all* promises should be resolved, unless there is a + // programming or database error. Err(oneshot_broadcast::Error::SenderDropped) => { metrics::inc_counter(&metrics::SHUFFLING_CACHE_PROMISE_FAILS); metrics::inc_counter(&metrics::SHUFFLING_CACHE_MISSES); - self.cache.pop(key); + self.cache.remove(key); None } }, @@ -112,13 +125,13 @@ impl ShufflingCache { } pub fn contains(&self, key: &AttestationShufflingId) -> bool { - self.cache.contains(key) + self.cache.contains_key(key) } - pub fn insert_committee_cache( + pub fn insert_committee_cache( &mut self, key: AttestationShufflingId, - committee_cache: &T, + committee_cache: &C, ) { if self .cache @@ -127,13 +140,55 @@ impl ShufflingCache { // worth two in the promise-bush! .map_or(true, CacheItem::is_promise) { - self.cache.put( + self.insert_cache_item( key, CacheItem::Committee(committee_cache.to_arc_committee_cache()), ); } } + /// Prunes the cache first before inserting a new cache item. + fn insert_cache_item(&mut self, key: AttestationShufflingId, cache_item: CacheItem) { + self.prune_cache(); + self.cache.insert(key, cache_item); + } + + /// Prunes the `cache` to keep the size below the `cache_size` limit, based on the following + /// preferences: + /// - Entries from more recent epochs are preferred over older ones. + /// - Entries with shuffling ids matching the head's previous, current, and future epochs must + /// not be pruned. + fn prune_cache(&mut self) { + let target_cache_size = self.cache_size.saturating_sub(1); + if let Some(prune_count) = self.cache.len().checked_sub(target_cache_size) { + let shuffling_ids_to_prune = self + .cache + .keys() + .sorted_by_key(|key| key.shuffling_epoch) + .filter(|shuffling_id| { + Some(shuffling_id) + != self + .head_shuffling_ids + .id_for_epoch(shuffling_id.shuffling_epoch) + .as_ref() + .as_ref() + }) + .take(prune_count) + .cloned() + .collect::>(); + + for shuffling_id in shuffling_ids_to_prune.iter() { + debug!( + self.logger, + "Removing old shuffling from cache"; + "shuffling_epoch" => shuffling_id.shuffling_epoch, + "shuffling_decision_block" => ?shuffling_id.shuffling_decision_block + ); + self.cache.remove(shuffling_id); + } + } + } + pub fn create_promise( &mut self, key: AttestationShufflingId, @@ -148,9 +203,17 @@ impl ShufflingCache { } let (sender, receiver) = oneshot(); - self.cache.put(key, CacheItem::Promise(receiver)); + self.insert_cache_item(key, CacheItem::Promise(receiver)); Ok(sender) } + + /// Inform the cache that the shuffling decision roots for the head has changed. + /// + /// The shufflings for the head's previous, current, and future epochs will never be ejected from + /// the cache during `Self::insert_cache_item`. + pub fn update_head_shuffling_ids(&mut self, head_shuffling_ids: BlockShufflingIds) { + self.head_shuffling_ids = head_shuffling_ids; + } } /// A helper trait to allow lazy-cloning of the committee cache when inserting into the cache. @@ -170,26 +233,29 @@ impl ToArcCommitteeCache for Arc { } } -impl Default for ShufflingCache { - fn default() -> Self { - Self::new(DEFAULT_CACHE_SIZE) - } -} - /// Contains the shuffling IDs for a beacon block. +#[derive(Clone)] pub struct BlockShufflingIds { pub current: AttestationShufflingId, pub next: AttestationShufflingId, + pub previous: Option, pub block_root: Hash256, } impl BlockShufflingIds { /// Returns the shuffling ID for the given epoch. /// - /// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`. + /// Returns `None` if `epoch` is prior to `self.previous?.shuffling_epoch` or + /// `self.current.shuffling_epoch` (if `previous` is `None`). pub fn id_for_epoch(&self, epoch: Epoch) -> Option { if epoch == self.current.shuffling_epoch { Some(self.current.clone()) + } else if self + .previous + .as_ref() + .map_or(false, |id| id.shuffling_epoch == epoch) + { + self.previous.clone() } else if epoch == self.next.shuffling_epoch { Some(self.next.clone()) } else if epoch > self.next.shuffling_epoch { @@ -201,18 +267,57 @@ impl BlockShufflingIds { None } } + + pub fn try_from_head( + head_block_root: Hash256, + head_state: &BeaconState, + ) -> Result { + let get_shuffling_id = |relative_epoch| { + AttestationShufflingId::new(head_block_root, head_state, relative_epoch).map_err(|e| { + format!( + "Unable to get attester shuffling decision slot for the epoch {:?}: {:?}", + relative_epoch, e + ) + }) + }; + + Ok(Self { + current: get_shuffling_id(RelativeEpoch::Current)?, + next: get_shuffling_id(RelativeEpoch::Next)?, + previous: Some(get_shuffling_id(RelativeEpoch::Previous)?), + block_root: head_block_root, + }) + } } // Disable tests in debug since the beacon chain harness is slow unless in release. #[cfg(not(debug_assertions))] #[cfg(test)] mod test { - use super::*; - use crate::test_utils::EphemeralHarnessType; + use task_executor::test_utils::null_logger; use types::*; - type BeaconChainHarness = - crate::test_utils::BeaconChainHarness>; + use crate::test_utils::EphemeralHarnessType; + + use super::*; + + type E = MinimalEthSpec; + type TestBeaconChainType = EphemeralHarnessType; + type BeaconChainHarness = crate::test_utils::BeaconChainHarness; + const TEST_CACHE_SIZE: usize = 5; + + // Creates a new shuffling cache for testing + fn new_shuffling_cache() -> ShufflingCache { + let current_epoch = 8; + let head_shuffling_ids = BlockShufflingIds { + current: shuffling_id(current_epoch), + next: shuffling_id(current_epoch + 1), + previous: Some(shuffling_id(current_epoch - 1)), + block_root: Hash256::from_low_u64_le(0), + }; + let logger = null_logger().unwrap(); + ShufflingCache::new(TEST_CACHE_SIZE, head_shuffling_ids, logger) + } /// Returns two different committee caches for testing. fn committee_caches() -> (Arc, Arc) { @@ -249,7 +354,7 @@ mod test { fn resolved_promise() { let (committee_a, _) = committee_caches(); let id_a = shuffling_id(1); - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -276,7 +381,7 @@ mod test { #[test] fn unresolved_promise() { let id_a = shuffling_id(1); - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); // Create a promise. let sender = cache.create_promise(id_a.clone()).unwrap(); @@ -301,7 +406,7 @@ mod test { fn two_promises() { let (committee_a, committee_b) = committee_caches(); let (id_a, id_b) = (shuffling_id(1), shuffling_id(2)); - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); // Create promise A. let sender_a = cache.create_promise(id_a.clone()).unwrap(); @@ -355,7 +460,7 @@ mod test { #[test] fn too_many_promises() { - let mut cache = ShufflingCache::default(); + let mut cache = new_shuffling_cache(); for i in 0..MAX_CONCURRENT_PROMISES { cache.create_promise(shuffling_id(i as u64)).unwrap(); @@ -375,4 +480,105 @@ mod test { "the cache should have two entries" ); } + + #[test] + fn should_insert_committee_cache() { + let mut cache = new_shuffling_cache(); + let id_a = shuffling_id(1); + let committee_cache_a = Arc::new(CommitteeCache::default()); + cache.insert_committee_cache(id_a.clone(), &committee_cache_a); + assert!( + matches!(cache.get(&id_a).unwrap(), CacheItem::Committee(committee_cache) if committee_cache == committee_cache_a), + "should insert committee cache" + ); + } + + #[test] + fn should_prune_committee_cache_with_lowest_epoch() { + let mut cache = new_shuffling_cache(); + let shuffling_id_and_committee_caches = (0..(TEST_CACHE_SIZE + 1)) + .map(|i| (shuffling_id(i as u64), Arc::new(CommitteeCache::default()))) + .collect::>(); + + for (shuffling_id, committee_cache) in shuffling_id_and_committee_caches.iter() { + cache.insert_committee_cache(shuffling_id.clone(), committee_cache); + } + + for i in 1..(TEST_CACHE_SIZE + 1) { + assert!( + cache.contains(&shuffling_id_and_committee_caches.get(i).unwrap().0), + "should contain recent epoch shuffling ids" + ); + } + + assert!( + !cache.contains(&shuffling_id_and_committee_caches.get(0).unwrap().0), + "should not contain oldest epoch shuffling id" + ); + assert_eq!( + cache.cache.len(), + cache.cache_size, + "should limit cache size" + ); + } + + #[test] + fn should_retain_head_state_shufflings() { + let mut cache = new_shuffling_cache(); + let current_epoch = 10; + let committee_cache = Arc::new(CommitteeCache::default()); + + // Insert a few entries for next the epoch with different decision roots. + for i in 0..TEST_CACHE_SIZE { + let shuffling_id = AttestationShufflingId { + shuffling_epoch: (current_epoch + 1).into(), + shuffling_decision_block: Hash256::from_low_u64_be(current_epoch + i as u64), + }; + cache.insert_committee_cache(shuffling_id, &committee_cache); + } + + // Now, update the head shuffling ids + let head_shuffling_ids = BlockShufflingIds { + current: shuffling_id(current_epoch), + next: shuffling_id(current_epoch + 1), + previous: Some(shuffling_id(current_epoch - 1)), + block_root: Hash256::from_low_u64_le(42), + }; + cache.update_head_shuffling_ids(head_shuffling_ids.clone()); + + // Insert head state shuffling ids. Should not be overridden by other shuffling ids. + cache.insert_committee_cache(head_shuffling_ids.current.clone(), &committee_cache); + cache.insert_committee_cache(head_shuffling_ids.next.clone(), &committee_cache); + cache.insert_committee_cache( + head_shuffling_ids.previous.clone().unwrap(), + &committee_cache, + ); + + // Insert a few entries for older epochs. + for i in 0..TEST_CACHE_SIZE { + let shuffling_id = AttestationShufflingId { + shuffling_epoch: Epoch::from(i), + shuffling_decision_block: Hash256::from_low_u64_be(i as u64), + }; + cache.insert_committee_cache(shuffling_id, &committee_cache); + } + + assert!( + cache.contains(&head_shuffling_ids.current), + "should retain head shuffling id for the current epoch." + ); + assert!( + cache.contains(&head_shuffling_ids.next), + "should retain head shuffling id for the next epoch." + ); + assert!( + cache.contains(&head_shuffling_ids.previous.unwrap()), + "should retain head shuffling id for previous epoch." + ); + assert_eq!( + cache.cache.len(), + cache.cache_size, + "should limit cache size" + ); + } } diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 4b4228e71..14cdc2400 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -153,7 +153,21 @@ pub enum Error { /// It's unclear if this sync message is valid, however we have already observed a /// signature from this validator for this slot and should not observe /// another. - PriorSyncCommitteeMessageKnown { validator_index: u64, slot: Slot }, + PriorSyncCommitteeMessageKnown { + validator_index: u64, + slot: Slot, + prev_root: Hash256, + new_root: Hash256, + }, + /// We have already observed a contribution for the aggregator and refuse to + /// process another. + /// + /// ## Peer scoring + /// + /// It's unclear if this sync message is valid, however we have already observed a + /// signature from this validator for this slot and should not observe + /// another. + PriorSyncContributionMessageKnown { validator_index: u64, slot: Slot }, /// The sync committee message was received on an invalid sync committee message subnet. /// /// ## Peer scoring @@ -378,10 +392,10 @@ impl VerifiedSyncContribution { if chain .observed_sync_aggregators .write() - .observe_validator(observed_key, aggregator_index as usize) + .observe_validator(observed_key, aggregator_index as usize, ()) .map_err(BeaconChainError::from)? { - return Err(Error::PriorSyncCommitteeMessageKnown { + return Err(Error::PriorSyncContributionMessageKnown { validator_index: aggregator_index, slot: contribution.slot, }); @@ -450,19 +464,40 @@ impl VerifiedSyncCommitteeMessage { // The sync committee message is the first valid message received for the participating validator // for the slot, sync_message.slot. let validator_index = sync_message.validator_index; - if chain + let head_root = chain.canonical_head.cached_head().head_block_root(); + let new_root = sync_message.beacon_block_root; + let should_override_prev = |prev_root: &Hash256, new_root: &Hash256| { + let roots_differ = new_root != prev_root; + let new_elects_head = new_root == &head_root; + + if roots_differ { + // Track sync committee messages that differ from each other. + metrics::inc_counter(&metrics::SYNC_MESSAGE_EQUIVOCATIONS); + if new_elects_head { + // Track sync committee messages that swap from an old block to a new block. + metrics::inc_counter(&metrics::SYNC_MESSAGE_EQUIVOCATIONS_TO_HEAD); + } + } + + roots_differ && new_elects_head + }; + if let Some(prev_root) = chain .observed_sync_contributors .read() - .validator_has_been_observed( + .observation_for_validator( SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()), validator_index as usize, ) .map_err(BeaconChainError::from)? { - return Err(Error::PriorSyncCommitteeMessageKnown { - validator_index, - slot: sync_message.slot, - }); + if !should_override_prev(&prev_root, &new_root) { + return Err(Error::PriorSyncCommitteeMessageKnown { + validator_index, + slot: sync_message.slot, + prev_root, + new_root, + }); + } } // The aggregate signature of the sync committee message is valid. @@ -474,18 +509,22 @@ impl VerifiedSyncCommitteeMessage { // It's important to double check that the sync committee message still hasn't been observed, since // there can be a race-condition if we receive two sync committee messages at the same time and // process them in different threads. - if chain + if let Some(prev_root) = chain .observed_sync_contributors .write() - .observe_validator( + .observe_validator_with_override( SlotSubcommitteeIndex::new(sync_message.slot, subnet_id.into()), validator_index as usize, + sync_message.beacon_block_root, + should_override_prev, ) .map_err(BeaconChainError::from)? { return Err(Error::PriorSyncCommitteeMessageKnown { validator_index, slot: sync_message.slot, + prev_root, + new_root, }); } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index db502fcda..1d28f1c0a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,4 +1,5 @@ use crate::blob_verification::{AsBlock, BlockWrapper}; +use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, @@ -29,6 +30,7 @@ pub use genesis::{interop_genesis_state_with_eth1, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use kzg::{Kzg, TrustedSetup}; use merkle_proof::MerkleTree; +use operation_pool::ReceivedPreCapella; use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; use rand::rngs::StdRng; @@ -43,7 +45,7 @@ use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ state_advance::{complete_state_advance, partial_state_advance}, - StateRootStrategy, + StateProcessingStrategy, }; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; @@ -67,7 +69,7 @@ const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // // You should mutate the `ChainSpec` prior to initialising the harness if you would like to use // a different value. -pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::max_value(); +pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; pub type BaseHarnessType = Witness, TEthSpec, THotStore, TColdStore>; @@ -88,7 +90,7 @@ pub type AddBlocksResult = ( BeaconState, ); -/// Deprecated: Indicates how the `BeaconChainHarness` should produce blocks. +/// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { /// Produce blocks upon the canonical head (normal case). @@ -104,7 +106,7 @@ pub enum BlockStrategy { }, } -/// Deprecated: Indicates how the `BeaconChainHarness` should produce attestations. +/// Indicates how the `BeaconChainHarness` should produce attestations. #[derive(Clone, Debug)] pub enum AttestationStrategy { /// All validators attest to whichever block the `BeaconChainHarness` has produced. @@ -744,7 +746,7 @@ where pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store - .load_hot_state(&state_hash.into(), StateRootStrategy::Accurate) + .load_hot_state(&state_hash.into(), StateProcessingStrategy::Accurate) .unwrap() } @@ -767,6 +769,7 @@ where state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } + /// Returns a newly created block, signed by the proposer for the given slot. pub async fn make_block( &self, mut state: BeaconState, @@ -1003,31 +1006,31 @@ where head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec> { - self.make_unaggregated_attestations_with_limit( + let fork = self + .spec + .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); + self.make_unaggregated_attestations_with_opts( attesting_validators, state, state_root, head_block_root, attestation_slot, - None, + MakeAttestationOptions { limit: None, fork }, ) .0 } - pub fn make_unaggregated_attestations_with_limit( + pub fn make_unaggregated_attestations_with_opts( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, - limit: Option, + opts: MakeAttestationOptions, ) -> (Vec>, Vec) { + let MakeAttestationOptions { limit, fork } = opts; let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); - let fork = self - .spec - .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); - let attesters = Mutex::new(vec![]); let attestations = state @@ -1160,8 +1163,6 @@ where .collect() } - /// Deprecated: Use make_unaggregated_attestations() instead. - /// /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is @@ -1219,16 +1220,35 @@ where slot: Slot, limit: Option, ) -> (HarnessAttestations, Vec) { - let (unaggregated_attestations, attesters) = self - .make_unaggregated_attestations_with_limit( - attesting_validators, - state, - state_root, - block_hash, - slot, - limit, - ); let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch())); + self.make_attestations_with_opts( + attesting_validators, + state, + state_root, + block_hash, + slot, + MakeAttestationOptions { limit, fork }, + ) + } + + pub fn make_attestations_with_opts( + &self, + attesting_validators: &[usize], + state: &BeaconState, + state_root: Hash256, + block_hash: SignedBeaconBlockHash, + slot: Slot, + opts: MakeAttestationOptions, + ) -> (HarnessAttestations, Vec) { + let MakeAttestationOptions { fork, .. } = opts; + let (unaggregated_attestations, attesters) = self.make_unaggregated_attestations_with_opts( + attesting_validators, + state, + state_root, + block_hash, + slot, + opts, + ); let aggregated_attestations: Vec>> = unaggregated_attestations @@ -1560,6 +1580,26 @@ where .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } + pub fn add_bls_to_execution_change( + &self, + validator_index: u64, + address: Address, + ) -> Result<(), String> { + let signed_bls_change = self.make_bls_to_execution_change(validator_index, address); + if let ObservationOutcome::New(verified_bls_change) = self + .chain + .verify_bls_to_execution_change_for_gossip(signed_bls_change) + .expect("should verify BLS to execution change for gossip") + { + self.chain + .import_bls_to_execution_change(verified_bls_change, ReceivedPreCapella::No) + .then_some(()) + .ok_or("should import BLS to execution change to the op pool".to_string()) + } else { + Err("should observe new BLS to execution change".to_string()) + } + } + pub fn make_bls_to_execution_change( &self, validator_index: u64, @@ -2077,9 +2117,6 @@ where .collect() } - /// Deprecated: Do not modify the slot clock manually; rely on add_attested_blocks_at_slots() - /// instead - /// /// Advance the slot of the `BeaconChain`. /// /// Does not produce blocks or attestations. @@ -2093,18 +2130,6 @@ where self.chain.slot_clock.set_current_time(time); } - /// Deprecated: Use make_block() instead - /// - /// Returns a newly created block, signed by the proposer for the given slot. - pub async fn build_block( - &self, - state: BeaconState, - slot: Slot, - _block_strategy: BlockStrategy, - ) -> (BlockContentsTuple>, BeaconState) { - self.make_block(state, slot).await - } - /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { @@ -2140,8 +2165,6 @@ where .await } - /// Deprecated: Use add_attested_blocks_at_slots() instead - /// /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the /// last-produced block (the head of the chain). /// @@ -2296,6 +2319,13 @@ impl fmt::Debug for BeaconChainHarness { } } +pub struct MakeAttestationOptions { + /// Produce exactly `limit` attestations. + pub limit: Option, + /// Fork to use for signing attestations. + pub fork: Fork, +} + pub fn build_log(level: slog::Level, enabled: bool) -> Logger { let decorator = TermDecorator::new().build(); let drain = FullFormat::new(decorator).build().fuse(); diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index d79a56df6..396aac71b 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -199,6 +199,7 @@ pub struct ValidatorMetrics { pub attestation_head_misses: u64, pub attestation_target_hits: u64, pub attestation_target_misses: u64, + pub latest_attestation_inclusion_distance: u64, } impl ValidatorMetrics { @@ -225,6 +226,10 @@ impl ValidatorMetrics { pub fn increment_head_misses(&mut self) { self.attestation_head_misses += 1; } + + pub fn set_latest_inclusion_distance(&mut self, distance: u64) { + self.latest_attestation_inclusion_distance = distance; + } } /// A validator that is being monitored by the `ValidatorMonitor`. @@ -568,7 +573,6 @@ impl ValidatorMonitor { } else { validator_metrics.increment_misses() } - drop(validator_metrics); // Indicates if any attestation made it on-chain. // @@ -693,8 +697,10 @@ impl ValidatorMonitor { &[id], inclusion_delay as i64, ); + validator_metrics.set_latest_inclusion_distance(inclusion_delay); } } + drop(validator_metrics); // Indicates the number of sync committee signatures that made it into // a sync aggregate in the current_epoch (state.epoch - 1). diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 6a9e60479..1040521e5 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1,5 +1,9 @@ #![cfg(not(debug_assertions))] +use beacon_chain::attestation_verification::{ + batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations, Error, +}; +use beacon_chain::test_utils::{MakeAttestationOptions, HARNESS_GENESIS_TIME}; use beacon_chain::{ attestation_verification::Error as AttnError, test_utils::{ @@ -7,6 +11,7 @@ use beacon_chain::{ }, BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped, }; +use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use lazy_static::lazy_static; use state_processing::{ @@ -14,9 +19,9 @@ use state_processing::{ }; use tree_hash::TreeHash; use types::{ - test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, BeaconStateError, - BitList, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, - SignedAggregateAndProof, Slot, SubnetId, Unsigned, + test_utils::generate_deterministic_keypair, Address, AggregateSignature, Attestation, + BeaconStateError, BitList, ChainSpec, Epoch, EthSpec, ForkName, Hash256, Keypair, + MainnetEthSpec, SecretKey, SelectionProof, SignedAggregateAndProof, Slot, SubnetId, Unsigned, }; pub type E = MainnetEthSpec; @@ -25,6 +30,8 @@ pub type E = MainnetEthSpec; /// have committees where _some_ validators are aggregators but not _all_. pub const VALIDATOR_COUNT: usize = 256; +pub const CAPELLA_FORK_EPOCH: usize = 1; + lazy_static! { /// A cached set of keys. static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); @@ -50,6 +57,50 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness (BeaconChainHarness>, ChainSpec) { + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH as u64)); + + let validator_keypairs = KEYPAIRS[0..validator_count].to_vec(); + let genesis_state = interop_genesis_state( + &validator_keypairs, + HARNESS_GENESIS_TIME, + Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), + None, + &spec, + ) + .unwrap(); + + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .spec(spec.clone()) + .keypairs(validator_keypairs) + .withdrawal_keypairs( + KEYPAIRS[0..validator_count] + .iter() + .cloned() + .map(Some) + .collect(), + ) + .genesis_state_ephemeral_store(genesis_state) + .mock_execution_layer() + .build(); + + harness + .execution_block_generator() + .move_to_terminal_block() + .unwrap(); + + harness.advance_slot(); + + (harness, spec) +} + /// Returns an attestation that is valid for some slot in the given `chain`. /// /// Also returns some info about who created it. @@ -998,6 +1049,100 @@ async fn attestation_that_skips_epochs() { .expect("should gossip verify attestation that skips slots"); } +/// Ensures that an attestation can be processed when a validator receives proposer reward +/// in an epoch _and_ is scheduled for a withdrawal. This is a regression test for a scenario where +/// inconsistent state lookup could cause withdrawal root mismatch. +#[tokio::test] +async fn attestation_validator_receive_proposer_reward_and_withdrawals() { + let (harness, _) = get_harness_capella_spec(VALIDATOR_COUNT); + + // Advance to a Capella block. Make sure the blocks have attestations. + let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let attesters = (0..two_thirds).collect(); + harness + .extend_chain( + // To trigger the bug we need the proposer attestation reward to be signed at a block + // that isn't the first in the epoch. + MainnetEthSpec::slots_per_epoch() as usize + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(attesters), + ) + .await; + + // Add BLS change for the block proposer at slot 33. This sets up a withdrawal for the block proposer. + let proposer_index = harness + .chain + .block_at_slot(harness.get_current_slot(), WhenSlotSkipped::None) + .expect("should not error getting block at slot") + .expect("should find block at slot") + .message() + .proposer_index(); + harness + .add_bls_to_execution_change(proposer_index, Address::from_low_u64_be(proposer_index)) + .unwrap(); + + // Apply two blocks: one to process the BLS change, and another to process the withdrawal. + harness.advance_slot(); + harness + .extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + let earlier_slot = harness.get_current_slot(); + let earlier_block = harness + .chain + .block_at_slot(earlier_slot, WhenSlotSkipped::None) + .expect("should not error getting block at slot") + .expect("should find block at slot"); + + // Extend the chain out a few epochs so we have some chain depth to play with. + harness.advance_slot(); + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * 2, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + let current_slot = harness.get_current_slot(); + let mut state = harness + .chain + .get_state(&earlier_block.state_root(), Some(earlier_slot)) + .expect("should not error getting state") + .expect("should find state"); + + while state.slot() < current_slot { + per_slot_processing(&mut state, None, &harness.spec).expect("should process slot"); + } + + let state_root = state.update_tree_hash_cache().unwrap(); + + // Get an attestation pointed to an old block (where we do not have its shuffling cached). + // Verifying the attestation triggers an inconsistent state replay. + let remaining_attesters = (two_thirds..VALIDATOR_COUNT).collect(); + let (attestation, subnet_id) = harness + .get_unaggregated_attestations( + &AttestationStrategy::SomeValidators(remaining_attesters), + &state, + state_root, + earlier_block.canonical_root(), + current_slot, + ) + .first() + .expect("should have at least one committee") + .first() + .cloned() + .expect("should have at least one attestation in committee"); + + harness + .chain + .verify_unaggregated_attestation_for_gossip(&attestation, Some(subnet_id)) + .expect("should gossip verify attestation without checking withdrawals root"); +} + #[tokio::test] async fn attestation_to_finalized_block() { let harness = get_harness(VALIDATOR_COUNT); @@ -1189,3 +1334,198 @@ async fn verify_attestation_for_gossip_doppelganger_detection() { .validator_has_been_observed(epoch, index) .expect("should check if gossip aggregator was observed")); } + +#[tokio::test] +async fn attestation_verification_use_head_state_fork() { + let (harness, spec) = get_harness_capella_spec(VALIDATOR_COUNT); + + // Advance to last block of the pre-Capella fork epoch. Capella is at slot 32. + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * CAPELLA_FORK_EPOCH - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + // Assert our head is a block at slot 31 in the pre-Capella fork epoch. + let pre_capella_slot = harness.get_current_slot(); + let pre_capella_block = harness + .chain + .block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev) + .expect("should not error getting block at slot") + .expect("should find block at slot"); + assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge); + + // Advance slot clock to Capella fork. + harness.advance_slot(); + let first_capella_slot = harness.get_current_slot(); + assert_eq!( + spec.fork_name_at_slot::(first_capella_slot), + ForkName::Capella + ); + + let (state, state_root) = harness.get_current_state_and_root(); + + // Scenario 1: other node signed attestation using the Capella fork epoch. + { + let attesters = (0..VALIDATOR_COUNT / 2).collect::>(); + let capella_fork = spec.fork_for_name(ForkName::Capella).unwrap(); + let committee_attestations = harness + .make_unaggregated_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: capella_fork, + limit: None, + }, + ) + .0 + .first() + .cloned() + .expect("should have at least one committee"); + let attestations_and_subnets = committee_attestations + .iter() + .map(|(attestation, subnet_id)| (attestation, Some(*subnet_id))); + + assert!( + batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain).is_ok(), + "should accept attestations with `data.slot` >= first capella slot signed using the Capella fork" + ); + } + + // Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork + { + let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::>(); + let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap(); + let committee_attestations = harness + .make_unaggregated_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: merge_fork, + limit: None, + }, + ) + .0 + .first() + .cloned() + .expect("should have at least one committee"); + let attestations_and_subnets = committee_attestations + .iter() + .map(|(attestation, subnet_id)| (attestation, Some(*subnet_id))); + + let results = + batch_verify_unaggregated_attestations(attestations_and_subnets, &harness.chain) + .expect("should return attestation results"); + let error = results + .into_iter() + .collect::, _>>() + .err() + .expect("should return an error"); + assert!( + matches!(error, Error::InvalidSignature), + "should reject attestations with `data.slot` >= first capella slot signed using the pre-Capella fork" + ); + } +} + +#[tokio::test] +async fn aggregated_attestation_verification_use_head_state_fork() { + let (harness, spec) = get_harness_capella_spec(VALIDATOR_COUNT); + + // Advance to last block of the pre-Capella fork epoch. Capella is at slot 32. + harness + .extend_chain( + MainnetEthSpec::slots_per_epoch() as usize * CAPELLA_FORK_EPOCH - 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(vec![]), + ) + .await; + + // Assert our head is a block at slot 31 in the pre-Capella fork epoch. + let pre_capella_slot = harness.get_current_slot(); + let pre_capella_block = harness + .chain + .block_at_slot(pre_capella_slot, WhenSlotSkipped::Prev) + .expect("should not error getting block at slot") + .expect("should find block at slot"); + assert_eq!(pre_capella_block.fork_name(&spec).unwrap(), ForkName::Merge); + + // Advance slot clock to Capella fork. + harness.advance_slot(); + let first_capella_slot = harness.get_current_slot(); + assert_eq!( + spec.fork_name_at_slot::(first_capella_slot), + ForkName::Capella + ); + + let (state, state_root) = harness.get_current_state_and_root(); + + // Scenario 1: other node signed attestation using the Capella fork epoch. + { + let attesters = (0..VALIDATOR_COUNT / 2).collect::>(); + let capella_fork = spec.fork_for_name(ForkName::Capella).unwrap(); + let aggregates = harness + .make_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: capella_fork, + limit: None, + }, + ) + .0 + .into_iter() + .map(|(_, aggregate)| aggregate.expect("should have signed aggregate and proof")) + .collect::>(); + + assert!( + batch_verify_aggregated_attestations(aggregates.iter(), &harness.chain).is_ok(), + "should accept aggregates with `data.slot` >= first capella slot signed using the Capella fork" + ); + } + + // Scenario 2: other node forgot to update their node and signed attestations using bellatrix fork + { + let attesters = (VALIDATOR_COUNT / 2..VALIDATOR_COUNT).collect::>(); + let merge_fork = spec.fork_for_name(ForkName::Merge).unwrap(); + let aggregates = harness + .make_attestations_with_opts( + attesters.as_slice(), + &state, + state_root, + pre_capella_block.canonical_root().into(), + first_capella_slot, + MakeAttestationOptions { + fork: merge_fork, + limit: None, + }, + ) + .0 + .into_iter() + .map(|(_, aggregate)| aggregate.expect("should have signed aggregate and proof")) + .collect::>(); + + let results = batch_verify_aggregated_attestations(aggregates.iter(), &harness.chain) + .expect("should return attestation results"); + let error = results + .into_iter() + .collect::, _>>() + .err() + .expect("should return an error"); + assert!( + matches!(error, Error::InvalidSignature), + "should reject aggregates with `data.slot` >= first capella slot signed using the pre-Capella fork" + ); + } +} diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 4b6d5b241..ce0ee940e 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -13,7 +13,8 @@ use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ common::get_indexed_attestation, per_block_processing::{per_block_processing, BlockSignatureStrategy}, - per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot, + per_slot_processing, BlockProcessingError, ConsensusContext, StateProcessingStrategy, + VerifyBlockRoot, }; use std::marker::PhantomData; use std::sync::Arc; @@ -1191,6 +1192,7 @@ async fn add_base_block_to_altair_chain() { &mut state, &base_block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, @@ -1329,6 +1331,7 @@ async fn add_altair_block_to_base_chain() { &mut state, &altair_block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &harness.chain.spec, diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index e85b021f5..4faa66b75 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -916,6 +916,9 @@ async fn invalid_after_optimistic_sync() { .await, ); + // EL status should still be online, no errors. + assert!(!rig.execution_layer().is_offline_or_erroring().await); + // Running fork choice is necessary since a block has been invalidated. rig.recompute_head().await; diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 239f55e7d..4204a5121 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -5,12 +5,16 @@ use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, Relativ use int_to_bytes::int_to_bytes32; use lazy_static::lazy_static; use safe_arith::SafeArith; +use state_processing::{ + per_block_processing::{altair::sync_committee::process_sync_aggregate, VerifySignatures}, + state_advance::complete_state_advance, +}; use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::{ AggregateSignature, Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, SecretKey, Slot, - SyncSelectionProof, SyncSubnetId, Unsigned, + SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned, }; pub type E = MainnetEthSpec; @@ -47,10 +51,29 @@ fn get_valid_sync_committee_message( relative_sync_committee: RelativeSyncCommittee, message_index: usize, ) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { - let head_state = harness.chain.head_beacon_state_cloned(); let head_block_root = harness.chain.head_snapshot().beacon_block_root; + get_valid_sync_committee_message_for_block( + harness, + slot, + relative_sync_committee, + message_index, + head_block_root, + ) +} + +/// Returns a sync message that is valid for some slot in the given `chain`. +/// +/// Also returns some info about who created it. +fn get_valid_sync_committee_message_for_block( + harness: &BeaconChainHarness>, + slot: Slot, + relative_sync_committee: RelativeSyncCommittee, + message_index: usize, + block_root: Hash256, +) -> (SyncCommitteeMessage, usize, SecretKey, SyncSubnetId) { + let head_state = harness.chain.head_beacon_state_cloned(); let (signature, _) = harness - .make_sync_committee_messages(&head_state, head_block_root, slot, relative_sync_committee) + .make_sync_committee_messages(&head_state, block_root, slot, relative_sync_committee) .get(0) .expect("sync messages should exist") .get(message_index) @@ -119,7 +142,7 @@ fn get_non_aggregator( subcommittee.iter().find_map(|pubkey| { let validator_index = harness .chain - .validator_index(&pubkey) + .validator_index(pubkey) .expect("should get validator index") .expect("pubkey should exist in beacon chain"); @@ -376,7 +399,7 @@ async fn aggregated_gossip_verification() { SyncCommitteeError::AggregatorNotInCommittee { aggregator_index } - if aggregator_index == valid_aggregate.message.aggregator_index as u64 + if aggregator_index == valid_aggregate.message.aggregator_index ); /* @@ -472,7 +495,7 @@ async fn aggregated_gossip_verification() { assert_invalid!( "sync contribution created with incorrect sync committee", - next_valid_contribution.clone(), + next_valid_contribution, SyncCommitteeError::InvalidSignature | SyncCommitteeError::AggregatorNotInCommittee { .. } ); } @@ -496,6 +519,30 @@ async fn unaggregated_gossip_verification() { let (valid_sync_committee_message, expected_validator_index, validator_sk, subnet_id) = get_valid_sync_committee_message(&harness, current_slot, RelativeSyncCommittee::Current, 0); + let parent_root = harness.chain.head_snapshot().beacon_block.parent_root(); + let (valid_sync_committee_message_to_parent, _, _, _) = + get_valid_sync_committee_message_for_block( + &harness, + current_slot, + RelativeSyncCommittee::Current, + 0, + parent_root, + ); + + assert_eq!( + valid_sync_committee_message.slot, valid_sync_committee_message_to_parent.slot, + "test pre-condition: same slot" + ); + assert_eq!( + valid_sync_committee_message.validator_index, + valid_sync_committee_message_to_parent.validator_index, + "test pre-condition: same validator index" + ); + assert!( + valid_sync_committee_message.beacon_block_root + != valid_sync_committee_message_to_parent.beacon_block_root, + "test pre-condition: differing roots" + ); macro_rules! assert_invalid { ($desc: tt, $attn_getter: expr, $subnet_getter: expr, $($error: pat_param) |+ $( if $guard: expr )?) => { @@ -602,28 +649,130 @@ async fn unaggregated_gossip_verification() { SyncCommitteeError::InvalidSignature ); + let head_root = valid_sync_committee_message.beacon_block_root; + let parent_root = valid_sync_committee_message_to_parent.beacon_block_root; + + let verifed_message_to_parent = harness + .chain + .verify_sync_committee_message_for_gossip( + valid_sync_committee_message_to_parent.clone(), + subnet_id, + ) + .expect("valid sync message to parent should be verified"); + // Add the aggregate to the pool. harness .chain - .verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id) - .expect("valid sync message should be verified"); + .add_to_naive_sync_aggregation_pool(verifed_message_to_parent) + .unwrap(); /* * The following test ensures that: * - * There has been no other valid sync committee message for the declared slot for the - * validator referenced by sync_committee_message.validator_index. + * A sync committee message from the same validator to the same block will + * be rejected. */ assert_invalid!( - "sync message that has already been seen", - valid_sync_committee_message, + "sync message to parent block that has already been seen", + valid_sync_committee_message_to_parent.clone(), subnet_id, SyncCommitteeError::PriorSyncCommitteeMessageKnown { validator_index, slot, + prev_root, + new_root } - if validator_index == expected_validator_index as u64 && slot == current_slot + if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == parent_root && new_root == parent_root ); + let verified_message_to_head = harness + .chain + .verify_sync_committee_message_for_gossip(valid_sync_committee_message.clone(), subnet_id) + .expect("valid sync message to the head should be verified"); + // Add the aggregate to the pool. + harness + .chain + .add_to_naive_sync_aggregation_pool(verified_message_to_head) + .unwrap(); + + /* + * The following test ensures that: + * + * A sync committee message from the same validator to the same block will + * be rejected. + */ + assert_invalid!( + "sync message to the head that has already been seen", + valid_sync_committee_message.clone(), + subnet_id, + SyncCommitteeError::PriorSyncCommitteeMessageKnown { + validator_index, + slot, + prev_root, + new_root + } + if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == head_root && new_root == head_root + ); + + /* + * The following test ensures that: + * + * A sync committee message from the same validator to a non-head block will + * be rejected. + */ + assert_invalid!( + "sync message to parent after message to head has already been seen", + valid_sync_committee_message_to_parent.clone(), + subnet_id, + SyncCommitteeError::PriorSyncCommitteeMessageKnown { + validator_index, + slot, + prev_root, + new_root + } + if validator_index == expected_validator_index as u64 && slot == current_slot && prev_root == head_root && new_root == parent_root + ); + + // Ensure that the sync aggregates in the op pool for both the parent block and head block are valid. + let chain = &harness.chain; + let check_sync_aggregate = |root: Hash256| async move { + // Generate an aggregate sync message from the naive aggregation pool. + let aggregate = chain + .get_aggregated_sync_committee_contribution(&SyncContributionData { + // It's a test pre-condition that both sync messages have the same slot. + slot: valid_sync_committee_message.slot, + beacon_block_root: root, + subcommittee_index: subnet_id.into(), + }) + .unwrap() + .unwrap(); + + // Insert the aggregate into the op pool. + chain.op_pool.insert_sync_contribution(aggregate).unwrap(); + + // Load the block and state for the given root. + let block = chain.get_block(&root).await.unwrap().unwrap(); + let mut state = chain.get_state(&block.state_root(), None).unwrap().unwrap(); + + // Advance the state to simulate a pre-state for block production. + let slot = valid_sync_committee_message.slot + 1; + complete_state_advance(&mut state, Some(block.state_root()), slot, &chain.spec).unwrap(); + + // Get an aggregate that would be included in a block. + let aggregate_for_inclusion = chain.op_pool.get_sync_aggregate(&state).unwrap().unwrap(); + + // Validate the retrieved aggregate against the state. + process_sync_aggregate( + &mut state, + &aggregate_for_inclusion, + 0, + VerifySignatures::True, + &chain.spec, + ) + .unwrap(); + }; + check_sync_aggregate(valid_sync_committee_message.beacon_block_root).await; + check_sync_aggregate(valid_sync_committee_message_to_parent.beacon_block_root).await; + /* * The following test ensures that: * @@ -649,7 +798,7 @@ async fn unaggregated_gossip_verification() { assert_invalid!( "sync message on incorrect subnet", - next_valid_sync_committee_message.clone(), + next_valid_sync_committee_message, next_subnet_id, SyncCommitteeError::InvalidSubnetId { received, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 876458eea..d39bb2e3e 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,7 +6,6 @@ edition = "2021" [dev-dependencies] serde_yaml = "0.8.13" -logging = { path = "../../common/logging" } state_processing = { path = "../../consensus/state_processing" } operation_pool = { path = "../operation_pool" } tokio = "1.14.0" @@ -17,6 +16,7 @@ store = { path = "../store" } network = { path = "../network" } timer = { path = "../timer" } lighthouse_network = { path = "../lighthouse_network" } +logging = { path = "../../common/logging" } parking_lot = "0.12.0" types = { path = "../../consensus/types" } eth2_config = { path = "../../common/eth2_config" } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index c977746c7..70157b05f 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -259,6 +259,12 @@ where genesis_state_bytes, } => { info!(context.log(), "Starting checkpoint sync"); + if config.chain.genesis_backfill { + info!( + context.log(), + "Blocks will downloaded all the way back to genesis" + ); + } let anchor_state = BeaconState::from_ssz_bytes(&anchor_state_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?; @@ -280,6 +286,12 @@ where "Starting checkpoint sync"; "remote_url" => %url, ); + if config.chain.genesis_backfill { + info!( + context.log(), + "Blocks will be downloaded all the way back to genesis" + ); + } let remote = BeaconNodeHttpClient::new( url, @@ -475,6 +487,7 @@ where network_globals: None, eth1_service: Some(genesis_service.eth1_service.clone()), log: context.log().clone(), + sse_logging_components: runtime_context.sse_logging_components.clone(), }); // Discard the error from the oneshot. @@ -695,6 +708,7 @@ where network_senders: self.network_senders.clone(), network_globals: self.network_globals.clone(), eth1_service: self.eth1_service.clone(), + sse_logging_components: runtime_context.sse_logging_components.clone(), log: log.clone(), }); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1105bc41f..1ff469fe3 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -142,7 +142,8 @@ pub fn spawn_notifier( .get_anchor_info() .map(|ai| ai.oldest_block_slot) { - sync_distance = current_anchor_slot; + sync_distance = current_anchor_slot + .saturating_sub(beacon_chain.genesis_backfill_slot); speedo // For backfill sync use a fake slot which is the distance we've progressed from the starting `oldest_block_slot`. .observe( @@ -207,14 +208,14 @@ pub fn spawn_notifier( "Downloading historical blocks"; "distance" => distance, "speed" => sync_speed_pretty(speed), - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), ); } else { info!( log, "Downloading historical blocks"; "distance" => distance, - "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot))), + "est_time" => estimated_time_pretty(speedo.estimated_time_till_slot(original_anchor_slot.unwrap_or(current_slot).saturating_sub(beacon_chain.genesis_backfill_slot))), ); } } else if !is_backfilling && last_backfill_log_slot.is_some() { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index e0dd797bf..cc982aee0 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -7,7 +7,6 @@ edition = "2021" [dev-dependencies] eth1_test_rig = { path = "../../testing/eth1_test_rig" } serde_yaml = "0.8.13" -web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } sloggers = { version = "2.1.1", features = ["json"] } environment = { path = "../../lighthouse/environment" } @@ -20,9 +19,9 @@ serde = { version = "1.0.116", features = ["derive"] } hex = "0.4.2" types = { path = "../../consensus/types"} merkle_proof = { path = "../../consensus/merkle_proof"} -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" parking_lot = "0.12.0" slog = "2.5.2" superstruct = "0.5.0" diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index cd680478c..505e4a479 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -2,7 +2,7 @@ use environment::{Environment, EnvironmentBuilder}; use eth1::{Config, Eth1Endpoint, Service}; use eth1::{DepositCache, DEFAULT_CHAIN_ID}; -use eth1_test_rig::GanacheEth1Instance; +use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider}; use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; use merkle_proof::verify_merkle_proof; use sensitive_url::SensitiveUrl; @@ -12,7 +12,6 @@ use std::ops::Range; use std::time::Duration; use tree_hash::TreeHash; use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature}; -use web3::{transports::Http, Web3}; const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; @@ -53,7 +52,7 @@ fn random_deposit_data() -> DepositData { /// Blocking operation to get the deposit logs from the `deposit_contract`. async fn blocking_deposit_logs( client: &HttpJsonRpc, - eth1: &GanacheEth1Instance, + eth1: &AnvilEth1Instance, range: Range, ) -> Vec { client @@ -65,7 +64,7 @@ async fn blocking_deposit_logs( /// Blocking operation to get the deposit root from the `deposit_contract`. async fn blocking_deposit_root( client: &HttpJsonRpc, - eth1: &GanacheEth1Instance, + eth1: &AnvilEth1Instance, block_number: u64, ) -> Option { client @@ -77,7 +76,7 @@ async fn blocking_deposit_root( /// Blocking operation to get the deposit count from the `deposit_contract`. async fn blocking_deposit_count( client: &HttpJsonRpc, - eth1: &GanacheEth1Instance, + eth1: &AnvilEth1Instance, block_number: u64, ) -> Option { client @@ -86,16 +85,16 @@ async fn blocking_deposit_count( .expect("should get deposit count") } -async fn get_block_number(web3: &Web3) -> u64 { - web3.eth() - .block_number() +async fn get_block_number(client: &Provider) -> u64 { + client + .get_block_number() .await .map(|v| v.as_u64()) .expect("should get block number") } -async fn new_ganache_instance() -> Result { - GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await +async fn new_anvil_instance() -> Result { + AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await } mod eth1_cache { @@ -108,13 +107,13 @@ mod eth1_cache { let log = null_logger(); for follow_distance in 0..3 { - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let initial_block_number = get_block_number(&web3).await; + let initial_block_number = get_block_number(&anvil_client).await; let config = Config { endpoint: Eth1Endpoint::NoAuth( @@ -146,7 +145,7 @@ mod eth1_cache { }; for _ in 0..blocks { - eth1.ganache.evm_mine().await.expect("should mine block"); + eth1.anvil.evm_mine().await.expect("should mine block"); } service @@ -189,11 +188,11 @@ mod eth1_cache { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let cache_len = 4; @@ -203,7 +202,7 @@ mod eth1_cache { SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), ), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, + lowest_cached_block_number: get_block_number(&anvil_client).await, follow_distance: 0, block_cache_truncation: Some(cache_len), ..Config::default() @@ -216,7 +215,7 @@ mod eth1_cache { let blocks = cache_len * 2; for _ in 0..blocks { - eth1.ganache.evm_mine().await.expect("should mine block") + eth1.anvil.evm_mine().await.expect("should mine block") } service @@ -244,11 +243,11 @@ mod eth1_cache { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let cache_len = 4; @@ -258,7 +257,7 @@ mod eth1_cache { SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), ), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, + lowest_cached_block_number: get_block_number(&anvil_client).await, follow_distance: 0, block_cache_truncation: Some(cache_len), ..Config::default() @@ -270,7 +269,7 @@ mod eth1_cache { for _ in 0..4u8 { for _ in 0..cache_len / 2 { - eth1.ganache.evm_mine().await.expect("should mine block") + eth1.anvil.evm_mine().await.expect("should mine block") } service .update_deposit_cache(None) @@ -298,11 +297,11 @@ mod eth1_cache { let n = 16; - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let service = Service::new( Config { @@ -310,7 +309,7 @@ mod eth1_cache { SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), ), deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&web3).await, + lowest_cached_block_number: get_block_number(&anvil_client).await, follow_distance: 0, ..Config::default() }, @@ -320,7 +319,7 @@ mod eth1_cache { .unwrap(); for _ in 0..n { - eth1.ganache.evm_mine().await.expect("should mine block") + eth1.anvil.evm_mine().await.expect("should mine block") } futures::try_join!( @@ -341,6 +340,7 @@ mod eth1_cache { } mod deposit_tree { + use super::*; #[tokio::test] @@ -350,13 +350,13 @@ mod deposit_tree { let n = 4; - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let start_block = get_block_number(&web3).await; + let start_block = get_block_number(&anvil_client).await; let service = Service::new( Config { @@ -431,13 +431,13 @@ mod deposit_tree { let n = 8; - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let start_block = get_block_number(&web3).await; + let start_block = get_block_number(&anvil_client).await; let service = Service::new( Config { @@ -484,11 +484,12 @@ mod deposit_tree { let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); + let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let mut deposit_roots = vec![]; let mut deposit_counts = vec![]; @@ -502,7 +503,7 @@ mod deposit_tree { .deposit(deposit.clone()) .await .expect("should perform a deposit"); - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; deposit_roots.push( blocking_deposit_root(&client, ð1, block_number) .await @@ -518,7 +519,7 @@ mod deposit_tree { let mut tree = DepositCache::default(); // Pull all the deposit logs from the contract. - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number) .await .iter() @@ -593,15 +594,15 @@ mod http { #[tokio::test] async fn incrementing_deposits() { async { - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), 0); @@ -616,10 +617,10 @@ mod http { ); for i in 1..=8 { - eth1.ganache + eth1.anvil .increase_time(1) .await - .expect("should be able to increase time on ganache"); + .expect("should be able to increase time on anvil"); deposit_contract .deposit(random_deposit_data()) @@ -627,7 +628,7 @@ mod http { .expect("should perform a deposit"); // Check the logs. - let block_number = get_block_number(&web3).await; + let block_number = get_block_number(&anvil_client).await; let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; assert_eq!(logs.len(), i, "the number of logs should be as expected"); @@ -690,13 +691,13 @@ mod fast { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let now = get_block_number(&web3).await; + let now = get_block_number(&anvil_client).await; let spec = MainnetEthSpec::default_spec(); let service = Service::new( Config { @@ -724,7 +725,7 @@ mod fast { .await .expect("should perform a deposit"); // Mine an extra block between deposits to test for corner cases - eth1.ganache.evm_mine().await.expect("should mine block"); + eth1.anvil.evm_mine().await.expect("should mine block"); } service @@ -737,7 +738,7 @@ mod fast { "should have imported n deposits" ); - for block_num in 0..=get_block_number(&web3).await { + for block_num in 0..=get_block_number(&anvil_client).await { let expected_deposit_count = blocking_deposit_count(&client, ð1, block_num).await; let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await; @@ -773,13 +774,13 @@ mod persist { async { let log = null_logger(); - let eth1 = new_ganache_instance() + let eth1 = new_anvil_instance() .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let anvil_client = eth1.json_rpc_client(); - let now = get_block_number(&web3).await; + let now = get_block_number(&anvil_client).await; let config = Config { endpoint: Eth1Endpoint::NoAuth( SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index d001a482d..f561c972f 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -13,7 +13,7 @@ slog = "2.5.2" futures = "0.3.7" sensitive_url = { path = "../../common/sensitive_url" } reqwest = { version = "0.11.0", features = ["json","stream"] } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" serde_json = "1.0.58" serde = { version = "1.0.116", features = ["derive"] } warp = { version = "0.3.2", features = ["tls"] } @@ -22,16 +22,16 @@ environment = { path = "../../lighthouse/environment" } bytes = "1.1.0" task_executor = { path = "../../common/task_executor" } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" eth2 = { path = "../../common/eth2" } kzg = { path = "../../crypto/kzg" } state_processing = { path = "../../consensus/state_processing" } superstruct = "0.6.0" lru = "0.7.1" exit-future = "0.2.0" -tree_hash = "0.4.1" -tree_hash_derive = { path = "../../consensus/tree_hash_derive"} +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" parking_lot = "0.12.0" slot_clock = { path = "../../common/slot_clock" } tempfile = "3.1.0" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index cb09d3a0b..0a5d155f5 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -142,11 +142,11 @@ pub enum BlockByNumberQuery<'a> { pub struct ExecutionBlock { #[serde(rename = "hash")] pub block_hash: ExecutionBlockHash, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, pub parent_hash: ExecutionBlockHash, pub total_difficulty: Uint256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, } @@ -172,13 +172,13 @@ pub struct ExecutionBlockWithTransactions { pub logs_bloom: FixedVector, #[serde(alias = "mixHash")] pub prev_randao: Hash256, - #[serde(rename = "number", with = "eth2_serde_utils::u64_hex_be")] + #[serde(rename = "number", with = "serde_utils::u64_hex_be")] pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, @@ -189,7 +189,7 @@ pub struct ExecutionBlockWithTransactions { #[superstruct(only(Capella, Deneb))] pub withdrawals: Vec, #[superstruct(only(Deneb))] - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub excess_data_gas: Uint256, } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 137ba5318..8e403b2be 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -993,7 +993,7 @@ impl HttpJsonRpc { ) -> Result>>, Error> { #[derive(Serialize)] #[serde(transparent)] - struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] u64); + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] u64); let params = json!([Quantity(start), Quantity(count)]); let response: Vec>> = self diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 6f35b5285..d541107d2 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -37,7 +37,7 @@ pub struct JsonResponseBody { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct TransparentJsonPayloadId(#[serde(with = "eth2_serde_utils::bytes_8_hex")] pub PayloadId); +pub struct TransparentJsonPayloadId(#[serde(with = "serde_utils::bytes_8_hex")] pub PayloadId); impl From for TransparentJsonPayloadId { fn from(id: PayloadId) -> Self { @@ -58,7 +58,7 @@ pub type JsonPayloadIdRequest = TransparentJsonPayloadId; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonPayloadIdResponse { - #[serde(with = "eth2_serde_utils::bytes_8_hex")] + #[serde(with = "serde_utils::bytes_8_hex")] pub payload_id: PayloadId, } @@ -81,17 +81,17 @@ pub struct JsonExecutionPayload { #[serde(with = "serde_logs_bloom")] pub logs_bloom: FixedVector, pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub block_number: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub base_fee_per_gas: Uint256, pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] @@ -99,7 +99,7 @@ pub struct JsonExecutionPayload { #[superstruct(only(V2, V3))] pub withdrawals: VariableList, #[superstruct(only(V3))] - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub excess_data_gas: Uint256, } @@ -289,7 +289,7 @@ pub struct JsonGetPayloadResponse { pub execution_payload: JsonExecutionPayloadV2, #[superstruct(only(V3), partial_getter(rename = "execution_payload_v3"))] pub execution_payload: JsonExecutionPayloadV3, - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub block_value: Uint256, #[superstruct(only(V3))] pub blobs_bundle: JsonBlobsBundleV1, @@ -324,12 +324,12 @@ impl From> for GetPayloadResponse { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct JsonWithdrawal { - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub index: u64, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub amount: u64, } @@ -367,7 +367,7 @@ impl From for Withdrawal { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub struct JsonPayloadAttributes { - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub timestamp: u64, pub prev_randao: Hash256, pub suggested_fee_recipient: Address, @@ -620,18 +620,18 @@ impl From> for ExecutionPayloadBodyV1< #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransitionConfigurationV1 { - #[serde(with = "eth2_serde_utils::u256_hex_be")] + #[serde(with = "serde_utils::u256_hex_be")] pub terminal_total_difficulty: Uint256, pub terminal_block_hash: ExecutionBlockHash, - #[serde(with = "eth2_serde_utils::u64_hex_be")] + #[serde(with = "serde_utils::u64_hex_be")] pub terminal_block_number: u64, } /// Serializes the `logs_bloom` field of an `ExecutionPayload`. pub mod serde_logs_bloom { use super::*; - use eth2_serde_utils::hex::PrefixedHexVisitor; use serde::{Deserializer, Serializer}; + use serde_utils::hex::PrefixedHexVisitor; pub fn serialize(bytes: &FixedVector, serializer: S) -> Result where diff --git a/beacon_node/execution_layer/src/engines.rs b/beacon_node/execution_layer/src/engines.rs index ce413cb11..362f5b0b2 100644 --- a/beacon_node/execution_layer/src/engines.rs +++ b/beacon_node/execution_layer/src/engines.rs @@ -238,6 +238,11 @@ impl Engine { **self.state.read().await == EngineStateInternal::Synced } + /// Returns `true` if the engine has a status other than synced or syncing. + pub async fn is_offline(&self) -> bool { + EngineState::from(**self.state.read().await) == EngineState::Offline + } + /// Run the `EngineApi::upcheck` function if the node's last known state is not synced. This /// might be used to recover the node if offline. pub async fn upcheck(&self) { diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 0e1fddfad..1033c743e 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -307,6 +307,11 @@ struct Inner { builder_profit_threshold: Uint256, log: Logger, always_prefer_builder_payload: bool, + /// Track whether the last `newPayload` call errored. + /// + /// This is used *only* in the informational sync status endpoint, so that a VC using this + /// node can prefer another node with a healthier EL. + last_new_payload_errored: RwLock, } #[derive(Debug, Default, Clone, Serialize, Deserialize)] @@ -413,7 +418,7 @@ impl ExecutionLayer { info!( log, - "Connected to external block builder"; + "Using external block builder"; "builder_url" => ?url, "builder_profit_threshold" => builder_profit_threshold, "local_user_agent" => builder_client.get_user_agent(), @@ -435,6 +440,7 @@ impl ExecutionLayer { builder_profit_threshold: Uint256::from(builder_profit_threshold), log, always_prefer_builder_payload, + last_new_payload_errored: RwLock::new(false), }; Ok(Self { @@ -627,6 +633,15 @@ impl ExecutionLayer { synced } + /// Return `true` if the execution layer is offline or returning errors on `newPayload`. + /// + /// This function should never be used to prevent any operation in the beacon node, but can + /// be used to give an indication on the HTTP API that the node's execution layer is struggling, + /// which can in turn be used by the VC. + pub async fn is_offline_or_erroring(&self) -> bool { + self.engine().is_offline().await || *self.inner.last_new_payload_errored.read().await + } + /// Updates the proposer preparation data provided by validators pub async fn update_proposer_preparation( &self, @@ -1192,18 +1207,6 @@ impl ExecutionLayer { } /// Maps to the `engine_newPayload` JSON-RPC call. - /// - /// ## Fallback Behaviour - /// - /// The request will be broadcast to all nodes, simultaneously. It will await a response (or - /// failure) from all nodes and then return based on the first of these conditions which - /// returns true: - /// - /// - Error::ConsensusFailure if some nodes return valid and some return invalid - /// - Valid, if any nodes return valid. - /// - Invalid, if any nodes return invalid. - /// - Syncing, if any nodes return syncing. - /// - An error, if all nodes return an error. pub async fn notify_new_payload( &self, execution_payload: &ExecutionPayload, @@ -1232,12 +1235,18 @@ impl ExecutionLayer { &["new_payload", status.status.into()], ); } + *self.inner.last_new_payload_errored.write().await = result.is_err(); process_payload_status(execution_payload.block_hash(), result, self.log()) .map_err(Box::new) .map_err(Error::EngineError) } + /// Update engine sync status. + pub async fn upcheck(&self) { + self.engine().upcheck().await; + } + /// Register that the given `validator_index` is going to produce a block at `slot`. /// /// The block will be built atop `head_block_root` and the EL will need to prepare an @@ -1297,18 +1306,6 @@ impl ExecutionLayer { } /// Maps to the `engine_consensusValidated` JSON-RPC call. - /// - /// ## Fallback Behaviour - /// - /// The request will be broadcast to all nodes, simultaneously. It will await a response (or - /// failure) from all nodes and then return based on the first of these conditions which - /// returns true: - /// - /// - Error::ConsensusFailure if some nodes return valid and some return invalid - /// - Valid, if any nodes return valid. - /// - Invalid, if any nodes return invalid. - /// - Syncing, if any nodes return syncing. - /// - An error, if all nodes return an error. pub async fn notify_forkchoice_updated( &self, head_block_hash: ExecutionBlockHash, @@ -2273,7 +2270,7 @@ fn ethers_tx_to_bytes( .ok_or(BlobTxConversionError::VersionedHashesMissing)? .iter() .map(|versioned_hash| { - let hash_bytes = eth2_serde_utils::hex::decode( + let hash_bytes = serde_utils::hex::decode( versioned_hash .as_str() .ok_or(BlobTxConversionError::VersionedHashesMissing)?, diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 6122f28dc..cd2024797 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -30,7 +30,12 @@ pub async fn handle_rpc( .map_err(|s| (s, GENERIC_ERROR_CODE))?; match method { - ETH_SYNCING => Ok(JsonValue::Bool(false)), + ETH_SYNCING => ctx + .syncing_response + .lock() + .clone() + .map(JsonValue::Bool) + .map_err(|message| (message, GENERIC_ERROR_CODE)), ETH_GET_BLOCK_BY_NUMBER => { let tag = params .get(0) @@ -180,7 +185,9 @@ pub async fn handle_rpc( // Canned responses set by block hash take priority. if let Some(status) = ctx.get_new_payload_status(request.block_hash()) { - return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()); + return status + .map(|status| serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap()) + .map_err(|message| (message, GENERIC_ERROR_CODE)); } let (static_response, should_import) = @@ -398,11 +405,15 @@ pub async fn handle_rpc( // Canned responses set by block hash take priority. if let Some(status) = ctx.get_fcu_payload_status(&head_block_hash) { - let response = JsonForkchoiceUpdatedV1Response { - payload_status: JsonPayloadStatusV1::from(status), - payload_id: None, - }; - return Ok(serde_json::to_value(response).unwrap()); + return status + .map(|status| { + let response = JsonForkchoiceUpdatedV1Response { + payload_status: JsonPayloadStatusV1::from(status), + payload_id: None, + }; + serde_json::to_value(response).unwrap() + }) + .map_err(|message| (message, GENERIC_ERROR_CODE)); } let mut response = ctx @@ -440,7 +451,7 @@ pub async fn handle_rpc( ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1 => { #[derive(Deserialize)] #[serde(transparent)] - struct Quantity(#[serde(with = "eth2_serde_utils::u64_hex_be")] pub u64); + struct Quantity(#[serde(with = "serde_utils::u64_hex_be")] pub u64); let start = get_param::(params, 0) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))? diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index ef728722d..72cd0e81e 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -140,6 +140,7 @@ impl MockServer { hook: <_>::default(), new_payload_statuses: <_>::default(), fcu_payload_statuses: <_>::default(), + syncing_response: Arc::new(Mutex::new(Ok(false))), engine_capabilities: Arc::new(RwLock::new(DEFAULT_ENGINE_CAPABILITIES)), _phantom: PhantomData, }); @@ -433,14 +434,25 @@ impl MockServer { self.ctx .new_payload_statuses .lock() - .insert(block_hash, status); + .insert(block_hash, Ok(status)); } pub fn set_fcu_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) { self.ctx .fcu_payload_statuses .lock() - .insert(block_hash, status); + .insert(block_hash, Ok(status)); + } + + pub fn set_new_payload_error(&self, block_hash: ExecutionBlockHash, error: String) { + self.ctx + .new_payload_statuses + .lock() + .insert(block_hash, Err(error)); + } + + pub fn set_syncing_response(&self, res: Result) { + *self.ctx.syncing_response.lock() = res; } } @@ -497,8 +509,11 @@ pub struct Context { // // This is a more flexible and less stateful alternative to `static_new_payload_response` // and `preloaded_responses`. - pub new_payload_statuses: Arc>>, - pub fcu_payload_statuses: Arc>>, + pub new_payload_statuses: + Arc>>>, + pub fcu_payload_statuses: + Arc>>>, + pub syncing_response: Arc>>, pub engine_capabilities: Arc>, pub _phantom: PhantomData, @@ -508,14 +523,14 @@ impl Context { pub fn get_new_payload_status( &self, block_hash: &ExecutionBlockHash, - ) -> Option { + ) -> Option> { self.new_payload_statuses.lock().get(block_hash).cloned() } pub fn get_fcu_payload_status( &self, block_hash: &ExecutionBlockHash, - ) -> Option { + ) -> Option> { self.fcu_payload_statuses.lock().get(block_hash).cloned() } } diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 87c56d360..8a7d22496 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -16,9 +16,9 @@ eth1 = { path = "../eth1"} rayon = "1.4.1" state_processing = { path = "../../consensus/state_processing" } merkle_proof = { path = "../../consensus/merkle_proof" } -eth2_ssz = "0.4.1" -eth2_hashing = "0.3.0" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +tree_hash = "0.5.0" tokio = { version = "1.14.0", features = ["full"] } slog = "2.5.2" int_to_bytes = { path = "../../consensus/int_to_bytes" } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index 122ca8eda..d01298343 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -1,5 +1,5 @@ use crate::common::genesis_deposits; -use eth2_hashing::hash; +use ethereum_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs index aaf6a7bea..f99fcb55b 100644 --- a/beacon_node/genesis/tests/tests.rs +++ b/beacon_node/genesis/tests/tests.rs @@ -1,11 +1,11 @@ -//! NOTE: These tests will not pass unless ganache is running on `ENDPOINT` (see below). +//! NOTE: These tests will not pass unless an anvil is running on `ENDPOINT` (see below). //! -//! You can start a suitable instance using the `ganache_test_node.sh` script in the `scripts` +//! You can start a suitable instance using the `anvil_test_node.sh` script in the `scripts` //! dir in the root of the `lighthouse` repo. #![cfg(test)] use environment::{Environment, EnvironmentBuilder}; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; -use eth1_test_rig::{DelayThenDeposit, GanacheEth1Instance}; +use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware}; use genesis::{Eth1Config, Eth1GenesisService}; use sensitive_url::SensitiveUrl; use state_processing::is_valid_genesis_state; @@ -29,15 +29,14 @@ fn basic() { let mut spec = env.eth2_config().spec.clone(); env.runtime().block_on(async { - let eth1 = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()) + let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()) .await .expect("should start eth1 environment"); let deposit_contract = ð1.deposit_contract; - let web3 = eth1.web3(); + let client = eth1.json_rpc_client(); - let now = web3 - .eth() - .block_number() + let now = client + .get_block_number() .await .map(|v| v.as_u64()) .expect("should get block number"); @@ -89,7 +88,7 @@ fn basic() { .map(|(_, state)| state) .expect("should finish waiting for genesis"); - // Note: using ganache these deposits are 1-per-block, therefore we know there should only be + // Note: using anvil these deposits are 1-per-block, therefore we know there should only be // the minimum number of validators. assert_eq!( state.validators().len(), diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index e251b0485..2b117b26c 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -24,7 +24,7 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lazy_static = "1.4.0" warp_utils = { path = "../../common/warp_utils" } slot_clock = { path = "../../common/slot_clock" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" bs58 = "0.4.0" futures = "0.3.8" execution_layer = {path = "../execution_layer"} @@ -32,15 +32,15 @@ parking_lot = "0.12.0" safe_arith = {path = "../../consensus/safe_arith"} task_executor = { path = "../../common/task_executor" } lru = "0.7.7" -tree_hash = "0.4.1" +tree_hash = "0.5.0" sysinfo = "0.26.5" system_health = { path = "../../common/system_health" } directory = { path = "../../common/directory" } -eth2_serde_utils = "0.1.1" +logging = { path = "../../common/logging" } +ethereum_serde_utils = "0.5.0" operation_pool = { path = "../operation_pool" } sensitive_url = { path = "../../common/sensitive_url" } unused_port = {path = "../../common/unused_port"} -logging = { path = "../../common/logging" } store = { path = "../store" } [dev-dependencies] @@ -51,4 +51,4 @@ genesis = { path = "../genesis" } [[test]] name = "bn_http_api_tests" -path = "tests/main.rs" +path = "tests/main.rs" \ No newline at end of file diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 9f92ef2e9..a29e41f41 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -37,6 +37,7 @@ use eth2::types::{ }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; +use logging::SSELoggingComponents; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; @@ -109,6 +110,7 @@ pub struct Context { pub network_senders: Option>, pub network_globals: Option>>, pub eth1_service: Option, + pub sse_logging_components: Option, pub log: Logger, } @@ -449,6 +451,9 @@ pub fn serve( let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); + let inner_components = ctx.sse_logging_components.clone(); + let sse_component_filter = warp::any().map(move || inner_components.clone()); + // Create a `warp` filter that provides access to local system information. let system_info = Arc::new(RwLock::new(sysinfo::System::new())); { @@ -2238,12 +2243,8 @@ pub fn serve( .parent .and_then(|index| proto_array.nodes.get(index)) .map(|parent| parent.root), - justified_epoch: node - .justified_checkpoint - .map(|checkpoint| checkpoint.epoch), - finalized_epoch: node - .finalized_checkpoint - .map(|checkpoint| checkpoint.epoch), + justified_epoch: node.justified_checkpoint.epoch, + finalized_epoch: node.finalized_checkpoint.epoch, weight: node.weight, validity: execution_status, execution_block_hash: node @@ -2325,28 +2326,40 @@ pub fn serve( .and(chain_filter.clone()) .and_then( |network_globals: Arc>, chain: Arc>| { - blocking_json_task(move || { - let head_slot = chain.canonical_head.cached_head().head_slot(); - let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { - warp_utils::reject::custom_server_error("Unable to read slot clock".into()) - })?; - - // Taking advantage of saturating subtraction on slot. - let sync_distance = current_slot - head_slot; - - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; - - let syncing_data = api_types::SyncingData { - is_syncing: network_globals.sync_state.read().is_syncing(), - is_optimistic: Some(is_optimistic), - head_slot, - sync_distance, + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true }; - Ok(api_types::GenericResponse::from(syncing_data)) - }) + blocking_json_task(move || { + let head_slot = chain.canonical_head.cached_head().head_slot(); + let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error( + "Unable to read slot clock".into(), + ) + })?; + + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; + + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::beacon_chain_error)?; + + let syncing_data = api_types::SyncingData { + is_syncing: network_globals.sync_state.read().is_syncing(), + is_optimistic: Some(is_optimistic), + el_offline: Some(el_offline), + head_slot, + sync_distance, + }; + + Ok(api_types::GenericResponse::from(syncing_data)) + }) + .await + } }, ); @@ -3760,6 +3773,44 @@ pub fn serve( }, ); + // Subscribe to logs via Server Side Events + // /lighthouse/logs + let lighthouse_log_events = warp::path("lighthouse") + .and(warp::path("logs")) + .and(warp::path::end()) + .and(sse_component_filter) + .and_then(|sse_component: Option| { + blocking_response_task(move || { + if let Some(logging_components) = sse_component { + // Build a JSON stream + let s = + BroadcastStream::new(logging_components.sender.subscribe()).map(|msg| { + match msg { + Ok(data) => { + // Serialize to json + match data.to_json_string() { + // Send the json as a Server Side Event + Ok(json) => Ok(Event::default().data(json)), + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to serialize to JSON {}", e), + )), + } + } + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to receive event {}", e), + )), + } + }); + + Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s))) + } else { + Err(warp_utils::reject::custom_server_error( + "SSE Logging is not enabled".to_string(), + )) + } + }) + }); + // Define the ultimate set of routes that will be provided to the server. // Use `uor` rather than `or` in order to simplify types (see `UnifyingOrFilter`). let routes = warp::get() @@ -3828,6 +3879,7 @@ pub fn serve( .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) + .uor(lighthouse_log_events.boxed()) .recover(warp_utils::reject::handle_rejection), ) .boxed() diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index a6acf308f..c728fbeb1 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -199,10 +199,14 @@ pub fn process_sync_committee_signatures( Err(SyncVerificationError::PriorSyncCommitteeMessageKnown { validator_index, slot, + prev_root, + new_root, }) => { debug!( log, "Ignoring already-known sync message"; + "new_root" => ?new_root, + "prev_root" => ?prev_root, "slot" => slot, "validator_index" => validator_index, ); diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index f995cf8f7..2371096e5 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -195,6 +195,7 @@ pub async fn create_api_server_on_port( network_senders: Some(network_senders), network_globals: Some(network_globals), eth1_service: Some(eth1_service), + sse_logging_components: None, log, }); diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index e8280a796..616745dbe 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -75,15 +75,15 @@ pub fn get_validator_count( #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoRequestData { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] indices: Vec, } #[derive(PartialEq, Serialize, Deserialize)] pub struct ValidatorInfoValues { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] total_balance: u64, } @@ -165,6 +165,7 @@ pub struct ValidatorMetrics { attestation_target_hits: u64, attestation_target_misses: u64, attestation_target_hit_percentage: f64, + latest_attestation_inclusion_distance: u64, } #[derive(PartialEq, Serialize, Deserialize)] @@ -210,6 +211,8 @@ pub fn post_validator_monitor_metrics( let attestation_head_misses = val_metrics.attestation_head_misses; let attestation_target_hits = val_metrics.attestation_target_hits; let attestation_target_misses = val_metrics.attestation_target_misses; + let latest_attestation_inclusion_distance = + val_metrics.latest_attestation_inclusion_distance; drop(val_metrics); let attestations = attestation_hits + attestation_misses; @@ -242,6 +245,7 @@ pub fn post_validator_monitor_metrics( attestation_target_hits, attestation_target_misses, attestation_target_hit_percentage, + latest_attestation_inclusion_distance, }; validators.insert(id.clone(), metrics); diff --git a/beacon_node/http_api/tests/main.rs b/beacon_node/http_api/tests/main.rs index 342b72cc7..f5916d850 100644 --- a/beacon_node/http_api/tests/main.rs +++ b/beacon_node/http_api/tests/main.rs @@ -2,4 +2,5 @@ pub mod fork_tests; pub mod interactive_tests; +pub mod status_tests; pub mod tests; diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs new file mode 100644 index 000000000..04df2379b --- /dev/null +++ b/beacon_node/http_api/tests/status_tests.rs @@ -0,0 +1,151 @@ +//! Tests related to the beacon node's sync status +use beacon_chain::{ + test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, + BlockError, +}; +use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; +use http_api::test_utils::InteractiveTester; +use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; + +type E = MinimalEthSpec; + +/// Create a new test environment that is post-merge with `chain_depth` blocks. +async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> InteractiveTester { + // Test using latest fork so that we simulate conditions as similar to mainnet as possible. + // TODO(jimmy): We should change this back to `latest()`. These tests currently fail on Deneb because: + // 1. KZG library doesn't support Minimal spec, changing to Mainnet spec fixes some tests; BUT + // 2. `harness.process_block_result` in the test below panics due to + // `AvailabilityProcessingStatus::PendingBlobs`, and there seems to be some race + // condition going on, because the test passes if I step through the code in debug. + let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); + spec.terminal_total_difficulty = 1.into(); + + let tester = InteractiveTester::::new(Some(spec), validator_count as usize).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + let execution_ctx = mock_el.server.ctx.clone(); + + // Move to terminal block. + mock_el.server.all_payloads_valid(); + execution_ctx + .execution_block_generator + .write() + .move_to_terminal_block() + .unwrap(); + + // Create some chain depth. + harness.advance_slot(); + harness + .extend_chain_with_sync( + chain_depth as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::AllValidators, + ) + .await; + tester +} + +/// Check `syncing` endpoint when the EL is syncing. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn el_syncing_then_synced() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL syncing + mock_el.server.set_syncing_response(Ok(true)); + mock_el.el.upcheck().await; + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(false)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); + + // EL synced + mock_el.server.set_syncing_response(Ok(false)); + mock_el.el.upcheck().await; + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(false)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); +} + +/// Check `syncing` endpoint when the EL is offline (errors on upcheck). +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn el_offline() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // EL offline + mock_el.server.set_syncing_response(Err("offline".into())); + mock_el.el.upcheck().await; + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(true)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); +} + +/// Check `syncing` endpoint when the EL errors on newPaylod but is not fully offline. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn el_error_on_new_payload() { + let num_blocks = E::slots_per_epoch() / 2; + let num_validators = E::slots_per_epoch(); + let tester = post_merge_tester(num_blocks, num_validators).await; + let harness = &tester.harness; + let mock_el = harness.mock_execution_layer.as_ref().unwrap(); + + // Make a block. + let pre_state = harness.get_current_state(); + let (block_contents, _) = harness + .make_block(pre_state, Slot::new(num_blocks + 1)) + .await; + let block = block_contents.0; + let block_hash = block + .message() + .body() + .execution_payload() + .unwrap() + .block_hash(); + + // Make sure `newPayload` errors for the new block. + mock_el + .server + .set_new_payload_error(block_hash, "error".into()); + + // Attempt to process the block, which should error. + harness.advance_slot(); + assert!(matches!( + harness.process_block_result(block.clone()).await, + Err(BlockError::ExecutionPayloadError(_)) + )); + + // The EL should now be *offline* according to the API. + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(true)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); + + // Processing a block successfully should remove the status. + mock_el.server.set_new_payload_status( + block_hash, + PayloadStatusV1 { + status: PayloadStatusV1Status::Valid, + latest_valid_hash: Some(block_hash), + validation_error: None, + }, + ); + harness.process_block_result(block).await.unwrap(); + + let api_response = tester.client.get_node_syncing().await.unwrap().data; + assert_eq!(api_response.el_offline, Some(false)); + assert_eq!(api_response.is_optimistic, Some(false)); + assert_eq!(api_response.is_syncing, false); +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 739bab285..491c55845 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1729,6 +1729,8 @@ impl ApiTester { let expected = SyncingData { is_syncing: false, is_optimistic: Some(false), + // these tests run without the Bellatrix fork enabled + el_offline: Some(true), head_slot, sync_distance, }; @@ -1964,8 +1966,8 @@ impl ApiTester { .parent .and_then(|index| expected_proto_array.nodes.get(index)) .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.map(|checkpoint| checkpoint.epoch), - finalized_epoch: node.finalized_checkpoint.map(|checkpoint| checkpoint.epoch), + justified_epoch: node.justified_checkpoint.epoch, + finalized_epoch: node.finalized_checkpoint.epoch, weight: node.weight, validity: execution_status, execution_block_hash: node diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index dda797187..c1b4d7217 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -8,13 +8,13 @@ edition = "2021" discv5 = { version = "0.2.2", features = ["libp2p"] } unsigned-varint = { version = "0.6.0", features = ["codec"] } types = { path = "../../consensus/types" } -eth2_ssz_types = "0.2.2" +ssz_types = "0.5.0" serde = { version = "1.0.116", features = ["derive"] } serde_derive = "1.0.116" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.0" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace"] } lighthouse_version = { path = "../../common/lighthouse_version" } tokio = { version = "1.14.0", features = ["time", "macros"] } diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 95935e0f7..139974082 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -134,6 +134,9 @@ pub struct Config { /// List of extra topics to initially subscribe to as strings. pub topics: Vec, + /// Whether we are running a block proposer only node. + pub proposer_only: bool, + /// Whether metrics are enabled. pub metrics_enabled: bool, @@ -142,6 +145,9 @@ pub struct Config { /// Configuration for the outbound rate limiter (requests made by this node). pub outbound_rate_limiter_config: Option, + + /// Configures if/where invalid blocks should be stored. + pub invalid_block_storage: Option, } impl Config { @@ -322,9 +328,11 @@ impl Default for Config { import_all_attestations: false, shutdown_after_sync: false, topics: Vec::new(), + proposer_only: false, metrics_enabled: false, enable_light_client_server: false, outbound_rate_limiter_config: None, + invalid_block_storage: None, } } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 8052d2a4f..2650d084d 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -952,6 +952,10 @@ impl PeerManager { /// MIN_SYNC_COMMITTEE_PEERS /// number should be set low as an absolute lower bound to maintain peers on the sync /// committees. + /// - Do not prune trusted peers. NOTE: This means if a user has more trusted peers than the + /// excess peer limit, all of the following logic is subverted as we will not prune any peers. + /// Also, the more trusted peers a user has, the less room Lighthouse has to efficiently manage + /// its peers across the subnets. /// /// Prune peers in the following order: /// 1. Remove worst scoring peers @@ -982,7 +986,9 @@ impl PeerManager { .read() .worst_connected_peers() .iter() - .filter(|(_, info)| !info.has_future_duty() && $filter(*info)) + .filter(|(_, info)| { + !info.has_future_duty() && !info.is_trusted() && $filter(*info) + }) { if peers_to_prune.len() >= connected_peer_count.saturating_sub(self.target_peers) @@ -1032,8 +1038,8 @@ impl PeerManager { > = HashMap::new(); for (peer_id, info) in self.network_globals.peers.read().connected_peers() { - // Ignore peers we are already pruning - if peers_to_prune.contains(peer_id) { + // Ignore peers we trust or that we are already pruning + if info.is_trusted() || peers_to_prune.contains(peer_id) { continue; } @@ -1330,25 +1336,47 @@ mod tests { ..Default::default() }; let log = build_log(slog::Level::Debug, false); - let globals = NetworkGlobals::new_test_globals(&log); + let globals = NetworkGlobals::new_test_globals(vec![], &log); + PeerManager::new(config, Arc::new(globals), &log).unwrap() + } + + async fn build_peer_manager_with_trusted_peers( + trusted_peers: Vec, + target_peer_count: usize, + ) -> PeerManager { + let config = config::Config { + target_peer_count, + discovery_enabled: false, + ..Default::default() + }; + let log = build_log(slog::Level::Debug, false); + let globals = NetworkGlobals::new_test_globals(trusted_peers, &log); PeerManager::new(config, Arc::new(globals), &log).unwrap() } #[tokio::test] async fn test_peer_manager_disconnects_correctly_during_heartbeat() { - let mut peer_manager = build_peer_manager(3).await; - - // Create 5 peers to connect to. + // Create 6 peers to connect to with a target of 3. // 2 will be outbound-only, and have the lowest score. + // 1 will be a trusted peer. + // The other 3 will be ingoing peers. + + // We expect this test to disconnect from 3 peers. 1 from the outbound peer (the other must + // remain due to the outbound peer limit) and 2 from the ingoing peers (the trusted peer + // should remain connected). let peer0 = PeerId::random(); let peer1 = PeerId::random(); let peer2 = PeerId::random(); let outbound_only_peer1 = PeerId::random(); let outbound_only_peer2 = PeerId::random(); + let trusted_peer = PeerId::random(); + + let mut peer_manager = build_peer_manager_with_trusted_peers(vec![trusted_peer], 3).await; peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None); + peer_manager.inject_connect_ingoing(&trusted_peer, "/ip4/0.0.0.0".parse().unwrap(), None); peer_manager.inject_connect_outgoing( &outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap(), @@ -1378,7 +1406,7 @@ mod tests { .add_to_score(-2.0); // Check initial connected peers. - assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 5); + assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 6); peer_manager.heartbeat(); @@ -1397,8 +1425,22 @@ mod tests { .read() .is_connected(&outbound_only_peer2)); + // The trusted peer remains connected + assert!(peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer)); + peer_manager.heartbeat(); + // The trusted peer remains connected, even after subsequent heartbeats. + assert!(peer_manager + .network_globals + .peers + .read() + .is_connected(&trusted_peer)); + // Check that if we are at target number of peers, we do not disconnect any. assert_eq!(peer_manager.network_globals.connected_or_dialing_peers(), 3); } @@ -2143,7 +2185,7 @@ mod tests { #[cfg(test)] mod property_based_tests { use crate::peer_manager::config::DEFAULT_TARGET_PEERS; - use crate::peer_manager::tests::build_peer_manager; + use crate::peer_manager::tests::build_peer_manager_with_trusted_peers; use crate::rpc::MetaData; use libp2p::PeerId; use quickcheck::{Arbitrary, Gen, TestResult}; @@ -2154,10 +2196,12 @@ mod tests { #[derive(Clone, Debug)] struct PeerCondition { + peer_id: PeerId, outgoing: bool, attestation_net_bitfield: Vec, sync_committee_net_bitfield: Vec, score: f64, + trusted: bool, gossipsub_score: f64, } @@ -2182,10 +2226,12 @@ mod tests { }; PeerCondition { + peer_id: PeerId::random(), outgoing: bool::arbitrary(g), attestation_net_bitfield, sync_committee_net_bitfield, score: f64::arbitrary(g), + trusted: bool::arbitrary(g), gossipsub_score: f64::arbitrary(g), } } @@ -2197,26 +2243,36 @@ mod tests { if peer_conditions.len() < target_peer_count { return TestResult::discard(); } + let trusted_peers: Vec<_> = peer_conditions + .iter() + .filter_map(|p| if p.trusted { Some(p.peer_id) } else { None }) + .collect(); + // If we have a high percentage of trusted peers, it is very difficult to reason about + // the expected results of the pruning. + if trusted_peers.len() > peer_conditions.len() / 3_usize { + return TestResult::discard(); + } let rt = Runtime::new().unwrap(); rt.block_on(async move { - let mut peer_manager = build_peer_manager(target_peer_count).await; + // Collect all the trusted peers + let mut peer_manager = + build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await; // Create peers based on the randomly generated conditions. for condition in &peer_conditions { - let peer = PeerId::random(); let mut attnets = crate::types::EnrAttestationBitfield::::new(); let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); if condition.outgoing { peer_manager.inject_connect_outgoing( - &peer, + &condition.peer_id, "/ip4/0.0.0.0".parse().unwrap(), None, ); } else { peer_manager.inject_connect_ingoing( - &peer, + &condition.peer_id, "/ip4/0.0.0.0".parse().unwrap(), None, ); @@ -2237,22 +2293,51 @@ mod tests { }; let mut peer_db = peer_manager.network_globals.peers.write(); - let peer_info = peer_db.peer_info_mut(&peer).unwrap(); + let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); peer_info.set_meta_data(MetaData::V2(metadata)); peer_info.set_gossipsub_score(condition.gossipsub_score); peer_info.add_to_score(condition.score); for subnet in peer_info.long_lived_subnets() { - peer_db.add_subscription(&peer, subnet); + peer_db.add_subscription(&condition.peer_id, subnet); } } // Perform the heartbeat. peer_manager.heartbeat(); - TestResult::from_bool( + // The minimum number of connected peers cannot be less than the target peer count + // or submitted peers. + + let expected_peer_count = target_peer_count.min(peer_conditions.len()); + // Trusted peers could make this larger however. + let no_of_trusted_peers = peer_conditions + .iter() + .filter(|condition| condition.trusted) + .count(); + let expected_peer_count = expected_peer_count.max(no_of_trusted_peers); + + let target_peer_condition = peer_manager.network_globals.connected_or_dialing_peers() - == target_peer_count.min(peer_conditions.len()), + == expected_peer_count; + + // It could be that we reach our target outbound limit and are unable to prune any + // extra, which violates the target_peer_condition. + let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers(); + let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers(); + + // No trusted peers should be disconnected + let trusted_peer_disconnected = peer_conditions.iter().any(|condition| { + condition.trusted + && !peer_manager + .network_globals + .peers + .read() + .is_connected(&condition.peer_id) + }); + + TestResult::from_bool( + (target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected, ) }) } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 208706568..52f0bbd9d 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1062,7 +1062,7 @@ impl PeerDB { if let Some(to_drop) = self .peers .iter() - .filter(|(_, info)| info.is_disconnected()) + .filter(|(_, info)| info.is_disconnected() && !info.is_trusted()) .filter_map(|(id, info)| match info.connection_status() { PeerConnectionStatus::Disconnected { since } => Some((id, since)), _ => None, diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 43e8ebd76..295616f36 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -129,7 +129,10 @@ impl NetworkGlobals { } /// TESTING ONLY. Build a dummy NetworkGlobals instance. - pub fn new_test_globals(log: &slog::Logger) -> NetworkGlobals { + pub fn new_test_globals( + trusted_peers: Vec, + log: &slog::Logger, + ) -> NetworkGlobals { use crate::CombinedKeyExt; let keypair = libp2p::identity::Keypair::generate_secp256k1(); let enr_key: discv5::enr::CombinedKey = @@ -144,7 +147,7 @@ impl NetworkGlobals { attnets: Default::default(), syncnets: Default::default(), }), - vec![], + trusted_peers, false, log, ) diff --git a/beacon_node/lighthouse_network/src/types/sync_state.rs b/beacon_node/lighthouse_network/src/types/sync_state.rs index 5f09aec27..b82e63bd9 100644 --- a/beacon_node/lighthouse_network/src/types/sync_state.rs +++ b/beacon_node/lighthouse_network/src/types/sync_state.rs @@ -13,7 +13,7 @@ pub enum SyncState { /// The node is undertaking a backfill sync. This occurs when a user has specified a trusted /// state. The node first syncs "forward" by downloading blocks up to the current head as /// specified by its peers. Once completed, the node enters this sync state and attempts to - /// download all required historical blocks to complete its chain. + /// download all required historical blocks. BackFillSyncing { completed: usize, remaining: usize }, /// The node has completed syncing a finalized chain and is in the process of re-evaluating /// which sync state to progress to. diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 40cc77b8b..fbcd7f604 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -126,36 +126,6 @@ pub fn get_enr(node: &LibP2PService) -> Enr { node.local_enr() } -// Returns `n` libp2p peers in fully connected topology. -#[allow(dead_code)] -/* -pub async fn build_full_mesh( - rt: Weak, - log: slog::Logger, - n: usize, - fork_name: ForkName, -) -> Vec { - let mut nodes = Vec::with_capacity(n); - for _ in 0..n { - nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await); - } - let multiaddrs: Vec = nodes - .iter() - .map(|x| get_enr(x).multiaddr()[1].clone()) - .collect(); - - for (i, node) in nodes.iter_mut().enumerate().take(n) { - for (j, multiaddr) in multiaddrs.iter().enumerate().skip(i) { - if i != j { - match libp2p::Swarm::dial(&mut node.swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Connected"), - Err(_) => error!(log, "Failed to connect"), - }; - } - } - } - nodes -}*/ // Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] diff --git a/beacon_node/lighthouse_network/tests/gossipsub_tests.rs b/beacon_node/lighthouse_network/tests/gossipsub_tests.rs deleted file mode 100644 index c5b661cf7..000000000 --- a/beacon_node/lighthouse_network/tests/gossipsub_tests.rs +++ /dev/null @@ -1,171 +0,0 @@ -/* These are temporarily disabled due to their non-deterministic behaviour and impending update to - * gossipsub 1.1. We leave these here as a template for future test upgrades - - -#![cfg(test)] -use crate::types::GossipEncoding; -use ::types::{BeaconBlock, EthSpec, MinimalEthSpec, Signature, SignedBeaconBlock}; -use lighthouse_network::*; -use slog::{debug, Level}; - -type E = MinimalEthSpec; - -mod common; - -/* Gossipsub tests */ -// Note: The aim of these tests is not to test the robustness of the gossip network -// but to check if the gossipsub implementation is behaving according to the specifications. - -// Test if gossipsub message are forwarded by nodes with a simple linear topology. -// -// Topology used in test -// -// node1 <-> node2 <-> node3 ..... <-> node(n-1) <-> node(n) - -#[tokio::test] -async fn test_gossipsub_forward() { - // set up the logging. The level and enabled or not - let log = common::build_log(Level::Info, false); - - let num_nodes = 20; - let mut nodes = common::build_linear(log.clone(), num_nodes); - let mut received_count = 0; - let spec = E::default_spec(); - let empty_block = BeaconBlock::empty(&spec); - let signed_block = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty_signature(), - }; - let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block)); - let publishing_topic: String = pubsub_message - .topics(GossipEncoding::default(), [0, 0, 0, 0]) - .first() - .unwrap() - .clone() - .into(); - let mut subscribed_count = 0; - let fut = async move { - for node in nodes.iter_mut() { - loop { - match node.next_event().await { - Libp2pEvent::Behaviour(b) => match b { - BehaviourEvent::PubsubMessage { - topics, - message, - source, - id, - } => { - assert_eq!(topics.len(), 1); - // Assert topic is the published topic - assert_eq!( - topics.first().unwrap(), - &TopicHash::from_raw(publishing_topic.clone()) - ); - // Assert message received is the correct one - assert_eq!(message, pubsub_message.clone()); - received_count += 1; - // Since `propagate_message` is false, need to propagate manually - node.swarm.propagate_message(&source, id); - // Test should succeed if all nodes except the publisher receive the message - if received_count == num_nodes - 1 { - debug!(log.clone(), "Received message at {} nodes", num_nodes - 1); - return; - } - } - BehaviourEvent::PeerSubscribed(_, topic) => { - // Publish on beacon block topic - if topic == TopicHash::from_raw(publishing_topic.clone()) { - subscribed_count += 1; - // Every node except the corner nodes are connected to 2 nodes. - if subscribed_count == (num_nodes * 2) - 2 { - node.swarm.publish(vec![pubsub_message.clone()]); - } - } - } - _ => break, - }, - _ => break, - } - } - } - }; - - tokio::select! { - _ = fut => {} - _ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => { - panic!("Future timed out"); - } - } -} - -// Test publishing of a message with a full mesh for the topic -// Not very useful but this is the bare minimum functionality. -#[tokio::test] -async fn test_gossipsub_full_mesh_publish() { - // set up the logging. The level and enabled or not - let log = common::build_log(Level::Debug, false); - - // Note: This test does not propagate gossipsub messages. - // Having `num_nodes` > `mesh_n_high` may give inconsistent results - // as nodes may get pruned out of the mesh before the gossipsub message - // is published to them. - let num_nodes = 12; - let mut nodes = common::build_full_mesh(log, num_nodes); - let mut publishing_node = nodes.pop().unwrap(); - let spec = E::default_spec(); - let empty_block = BeaconBlock::empty(&spec); - let signed_block = SignedBeaconBlock { - message: empty_block, - signature: Signature::empty_signature(), - }; - let pubsub_message = PubsubMessage::BeaconBlock(Box::new(signed_block)); - let publishing_topic: String = pubsub_message - .topics(GossipEncoding::default(), [0, 0, 0, 0]) - .first() - .unwrap() - .clone() - .into(); - let mut subscribed_count = 0; - let mut received_count = 0; - let fut = async move { - for node in nodes.iter_mut() { - while let Libp2pEvent::Behaviour(BehaviourEvent::PubsubMessage { - topics, - message, - .. - }) = node.next_event().await - { - assert_eq!(topics.len(), 1); - // Assert topic is the published topic - assert_eq!( - topics.first().unwrap(), - &TopicHash::from_raw(publishing_topic.clone()) - ); - // Assert message received is the correct one - assert_eq!(message, pubsub_message.clone()); - received_count += 1; - if received_count == num_nodes - 1 { - return; - } - } - } - while let Libp2pEvent::Behaviour(BehaviourEvent::PeerSubscribed(_, topic)) = - publishing_node.next_event().await - { - // Publish on beacon block topic - if topic == TopicHash::from_raw(publishing_topic.clone()) { - subscribed_count += 1; - if subscribed_count == num_nodes - 1 { - publishing_node.swarm.publish(vec![pubsub_message.clone()]); - } - } - } - }; - tokio::select! { - _ = fut => {} - _ = tokio::time::delay_for(tokio::time::Duration::from_millis(800)) => { - panic!("Future timed out"); - } - } -} -*/ diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index d068a2007..a234165d1 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -21,8 +21,8 @@ types = { path = "../../consensus/types" } slot_clock = { path = "../../common/slot_clock" } slog = { version = "2.5.2", features = ["max_level_trace"] } hex = "0.4.2" -eth2_ssz = "0.4.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ssz_types = "0.5.0" futures = "0.3.7" error-chain = "0.12.4" tokio = { version = "1.14.0", features = ["full"] } @@ -35,7 +35,7 @@ lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } logging = { path = "../../common/logging" } task_executor = { path = "../../common/task_executor" } -igd = "0.11.1" +igd = "0.12.1" itertools = "0.10.0" num_cpus = "1.13.0" lru_cache = { path = "../../common/lru_cache" } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index edacb0d80..1f66dc7ad 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -56,6 +56,7 @@ use logging::TimeLatch; use slog::{crit, debug, error, trace, warn, Logger}; use std::collections::VecDeque; use std::future::Future; +use std::path::PathBuf; use std::pin::Pin; use std::sync::{Arc, Weak}; use std::task::Context; @@ -1069,6 +1070,13 @@ impl Stream for InboundEvents { } } +/// Defines if and where we will store the SSZ files of invalid blocks. +#[derive(Clone)] +pub enum InvalidBlockStorage { + Enabled(PathBuf), + Disabled, +} + /// A mutli-threaded processor for messages received on the network /// that need to be processed by the `BeaconChain` /// @@ -1082,6 +1090,7 @@ pub struct BeaconProcessor { pub max_workers: usize, pub current_workers: usize, pub importing_blocks: DuplicateCache, + pub invalid_block_storage: InvalidBlockStorage, pub log: Logger, } @@ -1783,19 +1792,23 @@ impl BeaconProcessor { peer_client, block, seen_timestamp, - } => task_spawner.spawn_async(async move { - worker - .process_gossip_block( - message_id, - peer_id, - peer_client, - block.into(), - work_reprocessing_tx, - duplicate_cache, - seen_timestamp, - ) - .await - }), + } => { + let invalid_block_storage = self.invalid_block_storage.clone(); + task_spawner.spawn_async(async move { + worker + .process_gossip_block( + message_id, + peer_id, + peer_client, + block.into(), + work_reprocessing_tx, + duplicate_cache, + invalid_block_storage, + seen_timestamp, + ) + .await + }) + } /* * Verification for blobs sidecars received on gossip. */ @@ -1825,12 +1838,16 @@ impl BeaconProcessor { peer_id, block, seen_timestamp, - } => task_spawner.spawn_async(worker.process_gossip_verified_block( - peer_id, - *block, - work_reprocessing_tx, - seen_timestamp, - )), + } => { + let invalid_block_storage = self.invalid_block_storage.clone(); + task_spawner.spawn_async(worker.process_gossip_verified_block( + peer_id, + *block, + work_reprocessing_tx, + invalid_block_storage, + seen_timestamp, + )) + } /* * Voluntary exits received on gossip. */ diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 0f434fdc3..fe2eaae57 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -203,6 +203,7 @@ impl TestRig { max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, importing_blocks: duplicate_cache.clone(), + invalid_block_storage: InvalidBlockStorage::Disabled, log: log.clone(), } .spawn_manager(beacon_processor_rx, Some(work_journal_tx)); diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index c3298d870..6d8cba105 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -14,17 +14,20 @@ use beacon_chain::{ }; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use operation_pool::ReceivedPreCapella; -use slog::{crit, debug, error, info, trace, warn}; +use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; +use std::fs; +use std::io::Write; +use std::path::PathBuf; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBlobSidecar, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, - SyncCommitteeMessage, SyncSubnetId, + LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, + SignedBlobSidecar, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, + Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, }; use super::{ @@ -34,7 +37,7 @@ use super::{ }, Worker, }; -use crate::beacon_processor::DuplicateCache; +use crate::beacon_processor::{DuplicateCache, InvalidBlockStorage}; /// Set to `true` to introduce stricter penalties for peers who send some types of late consensus /// messages. @@ -796,6 +799,7 @@ impl Worker { block: BlockWrapper, reprocess_tx: mpsc::Sender>, duplicate_cache: DuplicateCache, + invalid_block_storage: InvalidBlockStorage, seen_duration: Duration, ) { if let Some(gossip_verified_block) = self @@ -816,6 +820,7 @@ impl Worker { peer_id, gossip_verified_block, reprocess_tx, + invalid_block_storage, seen_duration, ) .await; @@ -1082,13 +1087,14 @@ impl Worker { peer_id: PeerId, verified_block: GossipVerifiedBlock, reprocess_tx: mpsc::Sender>, + invalid_block_storage: InvalidBlockStorage, // This value is not used presently, but it might come in handy for debugging. _seen_duration: Duration, ) { let block = verified_block.block.block_cloned(); let block_root = verified_block.block_root; - match self + let result = self .chain .process_block( block_root, @@ -1096,14 +1102,15 @@ impl Worker { CountUnrealized::True, NotifyExecutionLayer::Yes, ) - .await - { + .await; + + match &result { Ok(AvailabilityProcessingStatus::Imported(block_root)) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); if reprocess_tx .try_send(ReprocessQueueMessage::BlockImported { - block_root, + block_root: *block_root, parent_root: block.message().parent_root(), }) .is_err() @@ -1137,7 +1144,7 @@ impl Worker { // make rpc request for blob self.send_sync_message(SyncMessage::UnknownBlobHash { peer_id, - pending_blobs, + pending_blobs: pending_blobs.to_vec(), }); } Err(BlockError::AvailabilityCheck(_)) => { @@ -1151,7 +1158,11 @@ impl Worker { "Block with unknown parent attempted to be processed"; "peer_id" => %peer_id ); - self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); + self.send_sync_message(SyncMessage::UnknownBlock( + peer_id, + block.clone(), + block_root, + )); } Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => { debug!( @@ -1180,6 +1191,16 @@ impl Worker { ); } }; + + if let Err(e) = &result { + self.maybe_store_invalid_block( + &invalid_block_storage, + block_root, + &block, + e, + &self.log, + ); + } } pub fn process_gossip_voluntary_exit( @@ -2487,6 +2508,25 @@ impl Worker { "peer_id" => %peer_id, "type" => ?message_type, ); + + // Do not penalize the peer. + + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore); + + return; + } + SyncCommitteeError::PriorSyncContributionMessageKnown { .. } => { + /* + * We have already seen a sync contribution message from this validator for this epoch. + * + * The peer is not necessarily faulty. + */ + debug!( + self.log, + "Prior sync contribution message known"; + "peer_id" => %peer_id, + "type" => ?message_type, + ); // We still penalize the peer slightly. We don't want this to be a recurring // behaviour. self.gossip_penalize_peer( @@ -2651,4 +2691,62 @@ impl Worker { self.propagate_if_timely(is_timely, message_id, peer_id) } + + /// Stores a block as a SSZ file, if and where `invalid_block_storage` dictates. + fn maybe_store_invalid_block( + &self, + invalid_block_storage: &InvalidBlockStorage, + block_root: Hash256, + block: &SignedBeaconBlock, + error: &BlockError, + log: &Logger, + ) { + if let InvalidBlockStorage::Enabled(base_dir) = invalid_block_storage { + let block_path = base_dir.join(format!("{}_{:?}.ssz", block.slot(), block_root)); + let error_path = base_dir.join(format!("{}_{:?}.error", block.slot(), block_root)); + + let write_file = |path: PathBuf, bytes: &[u8]| { + // No need to write the same file twice. For the error file, + // this means that we'll remember the first error message but + // forget the rest. + if path.exists() { + return; + } + + // Write to the file. + let write_result = fs::OpenOptions::new() + // Only succeed if the file doesn't already exist. We should + // have checked for this earlier. + .create_new(true) + .write(true) + .open(&path) + .map_err(|e| format!("Failed to open file: {:?}", e)) + .map(|mut file| { + file.write_all(bytes) + .map_err(|e| format!("Failed to write file: {:?}", e)) + }); + if let Err(e) = write_result { + error!( + log, + "Failed to store invalid block/error"; + "error" => e, + "path" => ?path, + "root" => ?block_root, + "slot" => block.slot(), + ) + } else { + info!( + log, + "Stored invalid block/error "; + "path" => ?path, + "root" => ?block_root, + "slot" => block.slot(), + ) + } + }; + + write_file(block_path, &block.as_ssz_bytes()); + write_file(error_path, error.to_string().as_bytes()); + } + } } diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index a497a0c04..e8dcf747b 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -116,10 +116,26 @@ impl Worker { } }; + // Returns `true` if the block is already known to fork choice. Notably, + // this will return `false` for blocks that we've already imported but + // ancestors of the finalized checkpoint. That should not be an issue + // for our use here since finalized blocks will always be late and won't + // be requeued anyway. + let block_is_already_known = || { + self.chain + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + }; + // If we've already seen a block from this proposer *and* the block // arrived before the attestation deadline, requeue it to ensure it is // imported late enough that it won't receive a proposer boost. - if !block_is_late && proposal_already_known() { + // + // Don't requeue blocks if they're already known to fork choice, just + // push them through to block processing so they can be handled through + // the normal channels. + if !block_is_late && proposal_already_known() && !block_is_already_known() { debug!( self.log, "Delaying processing of duplicate RPC block"; diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index fed799988..087087777 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -6,7 +6,7 @@ #![allow(clippy::unit_arg)] use crate::beacon_processor::{ - BeaconProcessor, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, + BeaconProcessor, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, }; use crate::error; use crate::service::{NetworkMessage, RequestId}; @@ -80,6 +80,7 @@ impl Router { network_globals: Arc>, network_send: mpsc::UnboundedSender>, executor: task_executor::TaskExecutor, + invalid_block_storage: InvalidBlockStorage, log: slog::Logger, ) -> error::Result>> { let message_handler_log = log.new(o!("service"=> "router")); @@ -111,6 +112,7 @@ impl Router { max_workers: cmp::max(1, num_cpus::get()), current_workers: 0, importing_blocks: Default::default(), + invalid_block_storage, log: log.clone(), } .spawn_manager(beacon_processor_receive, None); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 265a41189..bb2af785b 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,4 +1,5 @@ use super::sync::manager::RequestId as SyncId; +use crate::beacon_processor::InvalidBlockStorage; use crate::persisted_dht::{clear_dht, load_dht, persist_dht}; use crate::router::{Router, RouterMessage}; use crate::subnet_service::SyncCommitteeService; @@ -13,6 +14,7 @@ use futures::future::OptionFuture; use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; +use lighthouse_network::types::GossipKind; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, @@ -23,7 +25,7 @@ use lighthouse_network::{ MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; -use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; +use std::{collections::HashSet, net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; use store::HotColdDB; use strum::IntoStaticStr; use task_executor::ShutdownReason; @@ -294,6 +296,12 @@ impl NetworkService { } } + let invalid_block_storage = config + .invalid_block_storage + .clone() + .map(InvalidBlockStorage::Enabled) + .unwrap_or(InvalidBlockStorage::Disabled); + // launch derived network services // router task @@ -302,6 +310,7 @@ impl NetworkService { network_globals.clone(), network_senders.network_send(), executor.clone(), + invalid_block_storage, network_log.clone(), )?; @@ -672,6 +681,10 @@ impl NetworkService { source, } => self.libp2p.goodbye_peer(&peer_id, reason, source), NetworkMessage::SubscribeCoreTopics => { + if self.subscribed_core_topics() { + return; + } + if self.shutdown_after_sync { if let Err(e) = shutdown_sender .send(ShutdownReason::Success( @@ -912,6 +925,16 @@ impl NetworkService { crit!(self.log, "Unknown new enr fork id"; "new_fork_id" => ?new_enr_fork_id); } } + + fn subscribed_core_topics(&self) -> bool { + let core_topics = core_topics_to_subscribe::(self.fork_context.current_fork()); + let core_topics: HashSet<&GossipKind> = HashSet::from_iter(&core_topics); + let subscriptions = self.network_globals.gossipsub_subscriptions.read(); + let subscribed_topics: HashSet<&GossipKind> = + subscriptions.iter().map(|topic| topic.kind()).collect(); + + core_topics.is_subset(&subscribed_topics) + } } /// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. diff --git a/beacon_node/network/src/subnet_service/attestation_subnets.rs b/beacon_node/network/src/subnet_service/attestation_subnets.rs index 70ba1c817..e46a52cfb 100644 --- a/beacon_node/network/src/subnet_service/attestation_subnets.rs +++ b/beacon_node/network/src/subnet_service/attestation_subnets.rs @@ -112,6 +112,9 @@ pub struct AttestationService { #[cfg(feature = "deterministic_long_lived_attnets")] next_long_lived_subscription_event: Pin>, + /// Whether this node is a block proposer-only node. + proposer_only: bool, + /// The logger for the attestation service. log: slog::Logger, } @@ -155,6 +158,7 @@ impl AttestationService { known_validators: HashSetDelay::new(last_seen_val_timeout), waker: None, discovery_disabled: config.disable_discovery, + proposer_only: config.proposer_only, subscribe_all_subnets: config.subscribe_all_subnets, long_lived_subnet_subscription_slots, log, @@ -256,6 +260,11 @@ impl AttestationService { &mut self, subscriptions: Vec, ) -> Result<(), String> { + // If the node is in a proposer-only state, we ignore all subnet subscriptions. + if self.proposer_only { + return Ok(()); + } + // Maps each subnet_id subscription to it's highest slot let mut subnets_to_discover: HashMap = HashMap::new(); for subscription in subscriptions { @@ -450,6 +459,10 @@ impl AttestationService { subnet: SubnetId, attestation: &Attestation, ) -> bool { + // Proposer-only mode does not need to process attestations + if self.proposer_only { + return false; + } self.aggregate_validators_on_subnet .as_ref() .map(|tracked_vals| { diff --git a/beacon_node/network/src/subnet_service/sync_subnets.rs b/beacon_node/network/src/subnet_service/sync_subnets.rs index 0b27ff527..eda7ce8ef 100644 --- a/beacon_node/network/src/subnet_service/sync_subnets.rs +++ b/beacon_node/network/src/subnet_service/sync_subnets.rs @@ -54,6 +54,9 @@ pub struct SyncCommitteeService { /// We are always subscribed to all subnets. subscribe_all_subnets: bool, + /// Whether this node is a block proposer-only node. + proposer_only: bool, + /// The logger for the attestation service. log: slog::Logger, } @@ -82,6 +85,7 @@ impl SyncCommitteeService { waker: None, subscribe_all_subnets: config.subscribe_all_subnets, discovery_disabled: config.disable_discovery, + proposer_only: config.proposer_only, log, } } @@ -110,6 +114,11 @@ impl SyncCommitteeService { &mut self, subscriptions: Vec, ) -> Result<(), String> { + // A proposer-only node does not subscribe to any sync-committees + if self.proposer_only { + return Ok(()); + } + let mut subnets_to_discover = Vec::new(); for subscription in subscriptions { metrics::inc_counter(&metrics::SYNC_COMMITTEE_SUBSCRIPTION_REQUESTS); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index c2dc31cc6..4efe4fb9e 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -160,20 +160,20 @@ impl BackFillSync { // If, for some reason a backfill has already been completed (or we've used a trusted // genesis root) then backfill has been completed. - let (state, current_start) = if let Some(anchor_info) = beacon_chain.store.get_anchor_info() - { - if anchor_info.block_backfill_complete() { - (BackFillState::Completed, Epoch::new(0)) - } else { - ( - BackFillState::Paused, - anchor_info - .oldest_block_slot - .epoch(T::EthSpec::slots_per_epoch()), - ) + let (state, current_start) = match beacon_chain.store.get_anchor_info() { + Some(anchor_info) => { + if anchor_info.block_backfill_complete(beacon_chain.genesis_backfill_slot) { + (BackFillState::Completed, Epoch::new(0)) + } else { + ( + BackFillState::Paused, + anchor_info + .oldest_block_slot + .epoch(T::EthSpec::slots_per_epoch()), + ) + } } - } else { - (BackFillState::NotRequired, Epoch::new(0)) + None => (BackFillState::NotRequired, Epoch::new(0)), }; let bfs = BackFillSync { @@ -288,6 +288,7 @@ impl BackFillSync { remaining: self .current_start .start_slot(T::EthSpec::slots_per_epoch()) + .saturating_sub(self.beacon_chain.genesis_backfill_slot) .as_usize(), }) } @@ -1096,7 +1097,12 @@ impl BackFillSync { match self.batches.entry(batch_id) { Entry::Occupied(_) => { // this batch doesn't need downloading, let this same function decide the next batch - if batch_id == 0 { + if batch_id + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { self.last_batch_downloaded = true; } @@ -1112,7 +1118,12 @@ impl BackFillSync { BACKFILL_EPOCHS_PER_BATCH, batch_type, )); - if batch_id == 0 { + if batch_id + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { self.last_batch_downloaded = true; } self.to_be_downloaded = self @@ -1129,7 +1140,7 @@ impl BackFillSync { /// not required. fn reset_start_epoch(&mut self) -> Result<(), ResetEpochError> { if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { - if anchor_info.block_backfill_complete() { + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { Err(ResetEpochError::SyncCompleted) } else { self.current_start = anchor_info @@ -1144,12 +1155,17 @@ impl BackFillSync { /// Checks with the beacon chain if backfill sync has completed. fn check_completed(&mut self) -> bool { - if self.current_start == 0 { + if self.current_start + == self + .beacon_chain + .genesis_backfill_slot + .epoch(T::EthSpec::slots_per_epoch()) + { // Check that the beacon chain agrees if let Some(anchor_info) = self.beacon_chain.store.get_anchor_info() { // Conditions that we have completed a backfill sync - if anchor_info.block_backfill_complete() { + if anchor_info.block_backfill_complete(self.beacon_chain.genesis_backfill_slot) { return true; } else { error!(self.log, "Backfill out of sync with beacon chain"); diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index d7eca40fc..e491d5c84 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -56,7 +56,7 @@ impl TestRig { }; let bl = BlockLookups::new(log.new(slog::o!("component" => "block_lookups"))); let cx = { - let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); SyncNetworkContext::new( network_tx, globals, diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index b53fa872d..9afbed96a 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -599,7 +599,7 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); - let globals = Arc::new(NetworkGlobals::new_test_globals(&log)); + let globals = Arc::new(NetworkGlobals::new_test_globals(Vec::new(), &log)); let cx = SyncNetworkContext::new( network_tx, globals.clone(), diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index cc4eacde8..fdbecb656 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -12,8 +12,8 @@ lighthouse_metrics = { path = "../../common/lighthouse_metrics" } parking_lot = "0.12.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" rayon = "1.5.0" serde = "1.0.116" serde_derive = "1.0.116" diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 31a3cab36..d734b58d2 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -130,7 +130,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { Arg::with_name("target-peers") .long("target-peers") .help("The target number of peers.") - .default_value("80") .takes_value(true), ) .arg( @@ -255,6 +254,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("One or more comma-delimited trusted peer ids which always have the highest score according to the peer scoring system.") .takes_value(true), ) + .arg( + Arg::with_name("genesis-backfill") + .long("genesis-backfill") + .help("Attempts to download blocks all the way back to genesis when checkpoint syncing.") + .takes_value(false), + ) .arg( Arg::with_name("enable-private-discovery") .long("enable-private-discovery") @@ -276,6 +281,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .min_values(0) .hidden(true) ) + .arg( + Arg::with_name("proposer-only") + .long("proposer-only") + .help("Sets this beacon node at be a block proposer only node. \ + This will run the beacon node in a minimal configuration that is sufficient for block publishing only. This flag should be used \ + for a beacon node being referenced by validator client using the --proposer-node flag. This configuration is for enabling more secure setups.") + .takes_value(false), + ) + .arg( Arg::with_name("disable-backfill-rate-limiting") .long("disable-backfill-rate-limiting") @@ -518,6 +532,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Specifies how many blocks the database should cache in memory [default: 5]") .takes_value(true) ) + .arg( + Arg::with_name("historic-state-cache-size") + .long("historic-state-cache-size") + .value_name("SIZE") + .help("Specifies how many states from the freezer database should cache in memory [default: 1]") + .takes_value(true) + ) /* * Execution Layer Integration */ @@ -858,7 +879,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("reconstruct-historic-states") .long("reconstruct-historic-states") - .help("After a checkpoint sync, reconstruct historic states in the database.") + .help("After a checkpoint sync, reconstruct historic states in the database. This requires syncing all the way back to genesis.") .takes_value(false) ) .arg( @@ -1102,7 +1123,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .long("gui") .hidden(true) .help("Enable the graphical user interface and all its requirements. \ - This is equivalent to --http and --validator-monitor-auto.") + This enables --http and --validator-monitor-auto and enables SSE logging.") .takes_value(false) ) .arg( @@ -1114,4 +1135,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { // always using the builder. .conflicts_with("builder-profit-threshold") ) + .arg( + Arg::with_name("invalid-gossip-verified-blocks-path") + .long("invalid-gossip-verified-blocks-path") + .value_name("PATH") + .help("If a block succeeds gossip validation whilst failing full validation, store \ + the block SSZ as a file at this path. This feature is only recommended for \ + developers. This directory is not pruned, users should be careful to avoid \ + filling up their disks.") + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6ec109945..bcdf28131 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -404,6 +404,12 @@ pub fn get_config( .map_err(|_| "block-cache-size is not a valid integer".to_string())?; } + if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { + client_config.store.historic_state_cache_size = historic_state_cache_size + .parse() + .map_err(|_| "historic-state-cache-size is not a valid integer".to_string())?; + } + client_config.store.compact_on_init = cli_args.is_present("compact-db"); if let Some(compact_on_prune) = cli_args.value_of("auto-compact-db") { client_config.store.compact_on_prune = compact_on_prune @@ -539,6 +545,7 @@ pub fn get_config( if cli_args.is_present("reconstruct-historic-states") { client_config.chain.reconstruct_historic_states = true; + client_config.chain.genesis_backfill = true; } let raw_graffiti = if let Some(graffiti) = cli_args.value_of("graffiti") { @@ -811,6 +818,9 @@ pub fn get_config( client_config.chain.optimistic_finalized_sync = !cli_args.is_present("disable-optimistic-finalized-sync"); + if cli_args.is_present("genesis-backfill") { + client_config.chain.genesis_backfill = true; + } // Payload selection configs if cli_args.is_present("always-prefer-builder-payload") { client_config.always_prefer_builder_payload = true; @@ -820,6 +830,11 @@ pub fn get_config( client_config.chain.enable_backfill_rate_limiting = !cli_args.is_present("disable-backfill-rate-limiting"); + if let Some(path) = clap_utils::parse_optional(cli_args, "invalid-gossip-verified-blocks-path")? + { + client_config.network.invalid_block_storage = Some(path); + } + Ok(client_config) } @@ -1016,10 +1031,13 @@ pub fn set_network_config( config.set_listening_addr(parse_listening_addresses(cli_args, log)?); + // A custom target-peers command will overwrite the --proposer-only default. if let Some(target_peers_str) = cli_args.value_of("target-peers") { config.target_peers = target_peers_str .parse::() .map_err(|_| format!("Invalid number of target peers: {}", target_peers_str))?; + } else { + config.target_peers = 80; // default value } if let Some(value) = cli_args.value_of("network-load") { @@ -1078,6 +1096,9 @@ pub fn set_network_config( .map_err(|_| format!("Invalid trusted peer id: {}", peer_id)) }) .collect::, _>>()?; + if config.trusted_peers.len() >= config.target_peers { + slog::warn!(log, "More trusted peers than the target peer limit. This will prevent efficient peer selection criteria."; "target_peers" => config.target_peers, "trusted_peers" => config.trusted_peers.len()); + } } if let Some(enr_udp_port_str) = cli_args.value_of("enr-udp-port") { @@ -1255,6 +1276,20 @@ pub fn set_network_config( config.outbound_rate_limiter_config = Some(Default::default()); } + // Proposer-only mode overrides a number of previous configuration parameters. + // Specifically, we avoid subscribing to long-lived subnets and wish to maintain a minimal set + // of peers. + if cli_args.is_present("proposer-only") { + config.subscribe_all_subnets = false; + + if cli_args.value_of("target-peers").is_none() { + // If a custom value is not set, change the default to 15 + config.target_peers = 15; + } + config.proposer_only = true; + warn!(log, "Proposer-only mode enabled"; "info"=> "Do not connect a validator client to this node unless via the --proposer-nodes flag"); + } + Ok(()) } diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 7ec2af9f9..a1c65bd26 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -13,8 +13,8 @@ db-key = "0.0.5" leveldb = { version = "0.8.6", default-features = false } parking_lot = "0.12.0" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" types = { path = "../../consensus/types" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index ec5ee382b..460084653 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -7,6 +7,7 @@ use types::{EthSpec, MinimalEthSpec}; pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; +pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; pub const DEFAULT_BLOB_CACHE_SIZE: usize = 5; pub const DEFAULT_EPOCHS_PER_BLOB_PRUNE: u64 = 1; pub const DEFAULT_BLOB_PUNE_MARGIN_EPOCHS: u64 = 0; @@ -20,6 +21,8 @@ pub struct StoreConfig { pub slots_per_restore_point_set_explicitly: bool, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, + /// Maximum number of states from freezer database to store in the in-memory state cache. + pub historic_state_cache_size: usize, /// Maximum number of blobs to store in the in-memory blob cache. pub blob_cache_size: usize, /// Whether to compact the database on initialization. @@ -55,6 +58,7 @@ impl Default for StoreConfig { slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, slots_per_restore_point_set_explicitly: false, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, blob_cache_size: DEFAULT_BLOB_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index a4cf263f1..dde8af8a6 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -30,7 +30,7 @@ use slog::{debug, error, info, trace, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - BlockProcessingError, BlockReplayer, SlotProcessingError, StateRootStrategy, + BlockProcessingError, BlockReplayer, SlotProcessingError, StateProcessingStrategy, }; use std::cmp::min; use std::convert::TryInto; @@ -70,6 +70,8 @@ pub struct HotColdDB, Cold: ItemStore> { blob_cache: Mutex>>, /// LRU cache of deserialized blocks. Updated whenever a block is loaded. block_cache: Mutex>>, + /// LRU cache of replayed states. + state_cache: Mutex>>, /// Chain spec. pub(crate) spec: ChainSpec, /// Logger. @@ -143,6 +145,7 @@ impl HotColdDB, MemoryStore> { blobs_db: Some(MemoryStore::open()), hot_db: MemoryStore::open(), block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), config, spec, @@ -180,6 +183,7 @@ impl HotColdDB, LevelDB> { blobs_db: None, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(LruCache::new(config.block_cache_size)), + state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), config, spec, @@ -632,10 +636,10 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.load_hot_state(state_root, StateRootStrategy::Accurate) + self.load_hot_state(state_root, StateProcessingStrategy::Accurate) } } else { - match self.load_hot_state(state_root, StateRootStrategy::Accurate)? { + match self.load_hot_state(state_root, StateProcessingStrategy::Accurate)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } @@ -673,7 +677,7 @@ impl, Cold: ItemStore> HotColdDB } .into()) } else { - self.load_hot_state(state_root, StateRootStrategy::Inconsistent) + self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent) } } @@ -763,10 +767,13 @@ impl, Cold: ItemStore> HotColdDB { // NOTE: minor inefficiency here because we load an unnecessary hot state summary // - // `StateRootStrategy` should be irrelevant here since we never replay blocks for an epoch + // `StateProcessingStrategy` should be irrelevant here since we never replay blocks for an epoch // boundary state in the hot DB. let state = self - .load_hot_state(&epoch_boundary_state_root, StateRootStrategy::Accurate)? + .load_hot_state( + &epoch_boundary_state_root, + StateProcessingStrategy::Accurate, + )? .ok_or(HotColdDBError::MissingEpochBoundaryState( epoch_boundary_state_root, ))?; @@ -1026,7 +1033,7 @@ impl, Cold: ItemStore> HotColdDB pub fn load_hot_state( &self, state_root: &Hash256, - state_root_strategy: StateRootStrategy, + state_processing_strategy: StateProcessingStrategy, ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); @@ -1059,7 +1066,7 @@ impl, Cold: ItemStore> HotColdDB blocks, slot, no_state_root_iter(), - state_root_strategy, + state_processing_strategy, )? }; @@ -1173,40 +1180,70 @@ impl, Cold: ItemStore> HotColdDB /// Load a frozen state that lies between restore points. fn load_cold_intermediate_state(&self, slot: Slot) -> Result, Error> { + if let Some(state) = self.state_cache.lock().get(&slot) { + return Ok(state.clone()); + } + // 1. Load the restore points either side of the intermediate state. let low_restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point; let high_restore_point_idx = low_restore_point_idx + 1; + // Use low restore point as the base state. + let mut low_slot: Slot = + Slot::new(low_restore_point_idx * self.config.slots_per_restore_point); + let mut low_state: Option> = None; + + // Try to get a more recent state from the cache to avoid massive blocks replay. + for (s, state) in self.state_cache.lock().iter() { + if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx + && *s < slot + && low_slot < *s + { + low_slot = *s; + low_state = Some(state.clone()); + } + } + + // If low_state is still None, use load_restore_point_by_index to load the state. + let low_state = match low_state { + Some(state) => state, + None => self.load_restore_point_by_index(low_restore_point_idx)?, + }; + // Acquire the read lock, so that the split can't change while this is happening. let split = self.split.read_recursive(); - let low_restore_point = self.load_restore_point_by_index(low_restore_point_idx)?; let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?; - // 2. Load the blocks from the high restore point back to the low restore point. + // 2. Load the blocks from the high restore point back to the low point. let blocks = self.load_blocks_to_replay( - low_restore_point.slot(), + low_slot, slot, self.get_high_restore_point_block_root(&high_restore_point, slot)?, )?; - // 3. Replay the blocks on top of the low restore point. + // 3. Replay the blocks on top of the low point. // Use a forwards state root iterator to avoid doing any tree hashing. // The state root of the high restore point should never be used, so is safely set to 0. let state_root_iter = self.forwards_state_roots_iterator_until( - low_restore_point.slot(), + low_slot, slot, || Ok((high_restore_point, Hash256::zero())), &self.spec, )?; - self.replay_blocks( - low_restore_point, + let state = self.replay_blocks( + low_state, blocks, slot, Some(state_root_iter), - StateRootStrategy::Accurate, - ) + StateProcessingStrategy::Accurate, + )?; + + // If state is not error, put it in the cache. + self.state_cache.lock().put(slot, state.clone()); + + Ok(state) } /// Get the restore point with the given index, or if it is out of bounds, the split state. @@ -1292,10 +1329,10 @@ impl, Cold: ItemStore> HotColdDB blocks: Vec>>, target_slot: Slot, state_root_iter: Option>>, - state_root_strategy: StateRootStrategy, + state_processing_strategy: StateProcessingStrategy, ) -> Result, Error> { let mut block_replayer = BlockReplayer::new(state, &self.spec) - .state_root_strategy(state_root_strategy) + .state_processing_strategy(state_processing_strategy) .no_signature_verification() .minimal_block_root_verification(); diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 6d27f0cc8..95e646ff5 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(16); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(17); // All the keys that get stored under the `BeaconMeta` column. // @@ -100,8 +100,10 @@ pub struct AnchorInfo { impl AnchorInfo { /// Returns true if the block backfill has completed. - pub fn block_backfill_complete(&self) -> bool { - self.oldest_block_slot == 0 + /// This is a comparison between the oldest block slot and the target backfill slot (which is + /// likely to be the closest WSP). + pub fn block_backfill_complete(&self, target_slot: Slot) -> bool { + self.oldest_block_slot <= target_slot } } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index c399f1b45..cd50babdb 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -5,7 +5,7 @@ use itertools::{process_results, Itertools}; use slog::info; use state_processing::{ per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext, - VerifyBlockRoot, + StateProcessingStrategy, VerifyBlockRoot, }; use std::sync::Arc; use types::{EthSpec, Hash256}; @@ -96,6 +96,7 @@ where &mut state, &block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &self.spec, diff --git a/book/src/LaTeX/full-withdrawal.tex b/book/src/LaTeX/full-withdrawal.tex new file mode 100644 index 000000000..2447ba097 --- /dev/null +++ b/book/src/LaTeX/full-withdrawal.tex @@ -0,0 +1,66 @@ +% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain + +% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted. + +\documentclass[]{article} +\usepackage{pst-all} +\pagestyle{empty} + + + +\begin{document} + + +\begin{figure} + \psscalebox{1.0 1.0} % Change this value to rescale the drawing. + { + \begin{pspicture}(0,-9.09)(11.8,6.13) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](7.3,6.13)(4.2,5.21) + \rput[bl](4.6,5.51){Voluntary exit} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.8,5.21)(5.8,3.71)(5.8,3.81) + \psline[linecolor=black, linewidth=0.04](1.7,3.61)(9.8,3.61) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,3.61)(1.7,2.61) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](2.9,2.63)(0.8,1.55) + \rput[bl](1.0,1.91){Type 0x00} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,2.63)(8.6,1.55) + \rput[bl](8.8,1.91){Type 0x01} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,3.61)(9.8,2.61) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,1.51)(1.7,0.61) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,0.61)(0.0,-1.19) + \rput[bl](0.6,-0.19){Funds locked in} + \rput[bl](0.7,-0.79){Beacon chain} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.8,0.73)(7.9,-1.39) + \rput[bl](9.0,-0.59){Exit queue} + \rput[bl](8.8,0.01){Varying time} + \rput[bl](8.3,-1.09){32 minutes to weeks} + \rput[bl](9.0,-2.89){Fixed time} + \rput[bl](9.0,-3.49){27.3 hours} + \rput[bl](8.8,-5.49){Varying time} + \rput[bl](8.7,-5.99){validator sweep} + \rput[bl](8.9,-6.59){up to 5 days} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.6,-2.19)(8.0,-3.89) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-4.79)(7.9,-6.89) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-2.49)(0.0,-4.29) + \rput[bl](1.3,-3.29){BLS to} + \rput[bl](0.6,-3.89){execution change} + \psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.19)(1.7,-2.49) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,1.51)(9.8,0.71) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-1.39)(9.8,-2.19) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-3.89)(9.8,-4.79) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm](3.7,-3.39)(5.8,-3.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-0.39)(7.9,-0.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(8.0,-3.39) + \psline[linecolor=black, linewidth=0.04, linestyle=dotted, dotsep=0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(5.8,-3.39)(5.8,-6.09)(7.9,-6.09) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](11.7,-7.79)(7.9,-9.09) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(9.8,-6.89)(9.8,-7.79) + \rput[bl](8.1,-8.59){\Large{Full withdrawal}} + \rput[bl](1.8,-2.09){\textit{\Large{anytime}}} + \rput[bl](4.0,-3.19){\textit{\Large{either}}} + \rput[bl](4.2,-3.89){\textit{\Large{one}}} + \end{pspicture} + } +\end{figure} + + + +\end{document} diff --git a/book/src/LaTeX/partial-withdrawal.tex b/book/src/LaTeX/partial-withdrawal.tex new file mode 100644 index 000000000..05db3b688 --- /dev/null +++ b/book/src/LaTeX/partial-withdrawal.tex @@ -0,0 +1,50 @@ +% To compile the file using PdfLaTeX, you may use the latex+dvips+ps2pdf compilation. If you are using TeXstudio, this is builtin and you can choose this option by going to Options > Configure TeXstudio under Build & View, choose DVI -> PS -> PDF Chain + +% Alternatively, you may use XeLaTeX with --shell-escape command. To do so in TeXstuidio, go to Options > Configure TeXstudio > Build. Under "Add Commands", enter a user of your choice, and in the right empty space, insert: txs:///xelatex/[--shell-escape]. When compile, go to Tools > User and select the user you just inserted. + + +\documentclass[]{article} +\usepackage{pst-all} +\pagestyle{empty} + + + +\begin{document} + +\begin{figure} + \psscalebox{1.0 1.0} % Change this value to rescale the drawing. + { + \begin{pspicture}(0,-8.09)(10.7,5.53) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](7.14,5.53)(3.6,4.45) + \rput[bl](3.8,4.81){Partial withdrawals} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{-<}(5.2,4.41)(5.2,2.91)(5.2,3.01) + \psline[linecolor=black, linewidth=0.04](1.8,2.81)(8.9,2.81) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.8,2.81)(1.8,1.81) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](2.7,1.83)(0.6,0.75) + \rput[bl](0.8,1.09){Type 0x00} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](9.8,1.83)(7.7,0.75) + \rput[bl](7.92,1.07){Type 0x01} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,2.81)(8.9,1.81) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,0.71)(1.7,-0.19) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-0.19)(0.0,-1.99) + \rput[bl](0.66,-0.99){Funds locked in} + \rput[bl](0.9,-1.59){Beacon chain} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-3.29)(6.8,-5.09) + \rput[bl](7.6,-3.99){validator sweep} + \rput[bl](7.5,-4.69){$\sim$ every 5 days} + \psframe[linecolor=black, linewidth=0.04, dimen=outer](3.7,-3.29)(0.0,-5.09) + \rput[bl](1.3,-4.09){BLS to} + \rput[bl](0.5,-4.69){execution change} + \psline[linecolor=black, linewidth=0.04, linestyle=dashed, dash=0.17638889cm 0.10583334cm, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(1.7,-1.99)(1.7,-3.29) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,0.71)(8.9,-3.29) + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(3.7,-4.19)(6.7,-4.19) + \psframe[linecolor=black, linewidth=0.04, dimen=outer](10.7,-6.29)(6.9,-8.09) + \rput[bl](7.0,-6.99){Balance above 32 ETH} + \rput[bl](7.9,-7.59){withdrawn} + \psline[linecolor=black, linewidth=0.04, arrowsize=0.05291667cm 2.0,arrowlength=1.4,arrowinset=0.0]{->}(8.9,-5.09)(8.9,-6.29) + \rput[bl](1.8,-2.89){\textit{\Large{anytime}}} + \end{pspicture} + } +\end{figure} + +\end{document} \ No newline at end of file diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index ff5c1e980..bfd5a02a6 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -11,15 +11,10 @@ * [Update Priorities](./installation-priorities.md) * [Run a Node](./run_a_node.md) * [Become a Validator](./mainnet-validator.md) - * [Become a Testnet Validator](./testnet-validator.md) -* [Key Management](./key-management.md) - * [Create a wallet](./wallet-create.md) - * [Create a validator](./validator-create.md) - * [Key recovery](./key-recovery.md) * [Validator Management](./validator-management.md) - * [Importing from the Staking Launchpad](./validator-import-launchpad.md) * [Slashing Protection](./slashing-protection.md) * [Voluntary Exits](./voluntary-exit.md) + * [Partial Withdrawals](./partial-withdrawal.md) * [Validator Monitoring](./validator-monitoring.md) * [Doppelganger Protection](./validator-doppelganger.md) * [Suggested Fee Recipient](./suggested-fee-recipient.md) @@ -34,6 +29,7 @@ * [Prometheus Metrics](./advanced_metrics.md) * [Lighthouse UI (Siren)](./lighthouse-ui.md) * [Installation](./ui-installation.md) + * [Authentication](./ui-authentication.md) * [Configuration](./ui-configuration.md) * [Usage](./ui-usage.md) * [FAQs](./ui-faqs.md) @@ -41,9 +37,12 @@ * [Checkpoint Sync](./checkpoint-sync.md) * [Custom Data Directories](./advanced-datadir.md) * [Validator Graffiti](./graffiti.md) + * [Proposer Only Beacon Nodes](./advanced-proposer-only.md) * [Remote Signing with Web3Signer](./validator-web3signer.md) * [Database Configuration](./advanced_database.md) * [Database Migrations](./database-migrations.md) + * [Key Management](./key-management.md) + * [Key Recovery](./key-recovery.md) * [Advanced Networking](./advanced_networking.md) * [Running a Slasher](./slasher.md) * [Redundancy](./redundancy.md) diff --git a/book/src/advanced-proposer-only.md b/book/src/advanced-proposer-only.md new file mode 100644 index 000000000..c3347e044 --- /dev/null +++ b/book/src/advanced-proposer-only.md @@ -0,0 +1,71 @@ +# Advanced Proposer-Only Beacon Nodes + +Lighthouse allows for more exotic setups that can minimize attack vectors by +adding redundant beacon nodes and dividing the roles of attesting and block +production between them. + +The purpose of this is to minimize attack vectors +where malicious users obtain the network identities (IP addresses) of beacon +nodes corresponding to individual validators and subsequently perform Denial Of Service +attacks on the beacon nodes when they are due to produce a block on the +network. By splitting the duties of attestation and block production across +different beacon nodes, an attacker may not know which node is the block +production node, especially if the user rotates IP addresses of the block +production beacon node in between block proposals (this is in-frequent with +networks with large validator counts). + +## The Beacon Node + +A Lighthouse beacon node can be configured with the `--proposer-only` flag +(i.e. `lighthouse bn --proposer-only`). +Setting a beacon node with this flag will limit its use as a beacon node for +normal activities such as performing attestations, but it will make the node +harder to identify as a potential node to attack and will also consume less +resources. + +Specifically, this flag reduces the default peer count (to a safe minimal +number as maintaining peers on attestation subnets do not need to be considered), +prevents the node from subscribing to any attestation-subnets or +sync-committees which is a primary way for attackers to de-anonymize +validators. + +> Note: Beacon nodes that have set the `--proposer-only` flag should not be connected +> to validator clients unless via the `--proposer-nodes` flag. If connected as a +> normal beacon node, the validator may fail to handle its duties correctly and +> result in a loss of income. + + +## The Validator Client + +The validator client can be given a list of HTTP API endpoints representing +beacon nodes that will be solely used for block propagation on the network, via +the CLI flag `--proposer-nodes`. These nodes can be any working beacon nodes +and do not specifically have to be proposer-only beacon nodes that have been +executed with the `--proposer-only` (although we do recommend this flag for +these nodes for added security). + +> Note: The validator client still requires at least one other beacon node to +> perform its duties and must be specified in the usual `--beacon-nodes` flag. + +> Note: The validator client will attempt to get a block to propose from the +> beacon nodes specified in `--beacon-nodes` before trying `--proposer-nodes`. +> This is because the nodes subscribed to subnets have a higher chance of +> producing a more profitable block. Any block builders should therefore be +> attached to the `--beacon-nodes` and not necessarily the `--proposer-nodes`. + + +## Setup Overview + +The intended set-up to take advantage of this mechanism is to run one (or more) +normal beacon nodes in conjunction with one (or more) proposer-only beacon +nodes. See the [Redundancy](./redundancy.md) section for more information about +setting up redundant beacon nodes. The proposer-only beacon nodes should be +setup to use a different IP address than the primary (non proposer-only) nodes. +For added security, the IP addresses of the proposer-only nodes should be +rotated occasionally such that a new IP-address is used per block proposal. + +A single validator client can then connect to all of the above nodes via the +`--beacon-nodes` and `--proposer-nodes` flags. The resulting setup will allow +the validator client to perform its regular duties on the standard beacon nodes +and when the time comes to propose a block, it will send this block via the +specified proposer-only nodes. diff --git a/book/src/advanced_database.md b/book/src/advanced_database.md index 397d9a28b..57e49531c 100644 --- a/book/src/advanced_database.md +++ b/book/src/advanced_database.md @@ -58,6 +58,16 @@ the `--slots-per-restore-point` flag: lighthouse beacon_node --slots-per-restore-point 32 ``` +### Historic state cache + +Lighthouse includes a cache to avoid repeatedly replaying blocks when loading historic states. Lighthouse will cache a limited number of reconstructed states and will re-use them when serving requests for subsequent states at higher slots. This greatly reduces the cost of requesting several states in order, and we recommend that applications like block explorers take advantage of this cache. + +The historical state cache size can be specified with the flag `--historic-state-cache-size` (default value is 1): + +```bash +lighthouse beacon_node --historic-state-cache-size 4 +``` + ## Glossary * _Freezer DB_: part of the database storing finalized states. States are stored in a sparser diff --git a/book/src/api-bn.md b/book/src/api-bn.md index 481c00169..b86e593bf 100644 --- a/book/src/api-bn.md +++ b/book/src/api-bn.md @@ -72,8 +72,7 @@ specification][OpenAPI]. Returns the block header at the head of the canonical chain. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: -application/json" +curl -X GET "http://localhost:5052/eth/v1/beacon/headers/head" -H "accept: application/json" | jq ``` ```json @@ -100,7 +99,7 @@ application/json" Shows the status of validator at index `1` at the `head` state. ```bash -curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" +curl -X GET "http://localhost:5052/eth/v1/beacon/states/head/validators/1" -H "accept: application/json" | jq ``` ```json @@ -159,8 +158,7 @@ The API is now being served at `https://localhost:5052`. To test connectivity, you can run the following: ```bash -curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem - +curl -X GET "https://localhost:5052/eth/v1/node/version" -H "accept: application/json" --cacert cert.pem | jq ``` ### Connecting a validator client In order to connect a validator client to a beacon node over TLS, the validator @@ -203,7 +201,7 @@ Ensure the `--http` flag has been supplied at the CLI. You can quickly check that the HTTP endpoint is up using `curl`: ```bash -curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" +curl -X GET "http://localhost:5052/eth/v1/node/version" -H "accept: application/json" | jq ``` The beacon node should respond with its version: diff --git a/book/src/api-lighthouse.md b/book/src/api-lighthouse.md index 284818097..47fe62f50 100644 --- a/book/src/api-lighthouse.md +++ b/book/src/api-lighthouse.md @@ -141,7 +141,8 @@ curl -X POST "http://localhost:5052/lighthouse/ui/validator_metrics" -d '{"indic "attestation_head_hit_percentage": 100, "attestation_target_hits": 5, "attestation_target_misses": 5, - "attestation_target_hit_percentage": 50 + "attestation_target_hit_percentage": 50, + "latest_attestation_inclusion_distance": 1 } } } @@ -455,6 +456,7 @@ curl "http://localhost:5052/lighthouse/database/info" | jq "config": { "slots_per_restore_point": 2048, "block_cache_size": 5, + "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true }, @@ -677,3 +679,31 @@ Caveats: This is because the state _prior_ to the `start_epoch` needs to be loaded from the database, and loading a state on a boundary is most efficient. + +### `/lighthouse/logs` + +This is a Server Side Event subscription endpoint. This allows a user to read +the Lighthouse logs directly from the HTTP API endpoint. This currently +exposes INFO and higher level logs. It is only enabled when the `--gui` flag is set in the CLI. + +Example: + +```bash +curl -N "http://localhost:5052/lighthouse/logs" +``` + +Should provide an output that emits log events as they occur: +```json +{ +"data": { + "time": "Mar 13 15:28:41", + "level": "INFO", + "msg": "Syncing", + "service": "slot_notifier", + "est_time": "1 hr 27 mins", + "speed": "5.33 slots/sec", + "distance": "28141 slots (3 days 21 hrs)", + "peers": "8" + } +} +``` diff --git a/book/src/api-vc-endpoints.md b/book/src/api-vc-endpoints.md index 80a14ae77..d5d76e4ef 100644 --- a/book/src/api-vc-endpoints.md +++ b/book/src/api-vc-endpoints.md @@ -578,3 +578,33 @@ The following fields may be omitted or nullified to obtain default values: ### Example Response Body *No data is included in the response body.* + +## `GET /lighthouse/logs` + +Provides a subscription to receive logs as Server Side Events. Currently the +logs emitted are INFO level or higher. + +### HTTP Specification + +| Property | Specification | +|-------------------|--------------------------------------------| +| Path | `/lighthouse/logs` | +| Method | GET | +| Required Headers | None | +| Typical Responses | 200 | + +### Example Response Body + +```json +{ + "data": { + "time": "Mar 13 15:26:53", + "level": "INFO", + "msg": "Connected to beacon node(s)", + "service": "notifier", + "synced": 1, + "available": 1, + "total": 1 + } +} +``` \ No newline at end of file diff --git a/book/src/builders.md b/book/src/builders.md index f2a4b3936..fc42f9b74 100644 --- a/book/src/builders.md +++ b/book/src/builders.md @@ -178,7 +178,7 @@ You can check that your builder is configured correctly by looking for these log On start-up, the beacon node will log if a builder is configured: ``` -INFO Connected to external block builder +INFO Using external block builder ``` At regular intervals the validator client will log that it successfully registered its validators diff --git a/book/src/database-migrations.md b/book/src/database-migrations.md index d2b7b518d..5e0b89635 100644 --- a/book/src/database-migrations.md +++ b/book/src/database-migrations.md @@ -92,6 +92,7 @@ curl "http://localhost:5052/lighthouse/database/info" "slots_per_restore_point": 8192, "slots_per_restore_point_set_explicitly": true, "block_cache_size": 5, + "historic_state_cache_size": 1, "compact_on_init": false, "compact_on_prune": true } diff --git a/book/src/imgs/full-withdrawal.png b/book/src/imgs/full-withdrawal.png new file mode 100644 index 000000000..6fa2db6a9 Binary files /dev/null and b/book/src/imgs/full-withdrawal.png differ diff --git a/book/src/imgs/partial-withdrawal.png b/book/src/imgs/partial-withdrawal.png new file mode 100644 index 000000000..0bf90b91d Binary files /dev/null and b/book/src/imgs/partial-withdrawal.png differ diff --git a/book/src/imgs/ui-autoconnect-auth.png b/book/src/imgs/ui-autoconnect-auth.png new file mode 100644 index 000000000..4121f56ca Binary files /dev/null and b/book/src/imgs/ui-autoconnect-auth.png differ diff --git a/book/src/imgs/ui-exit.png b/book/src/imgs/ui-exit.png new file mode 100644 index 000000000..7061fab38 Binary files /dev/null and b/book/src/imgs/ui-exit.png differ diff --git a/book/src/imgs/ui-fail-auth.png b/book/src/imgs/ui-fail-auth.png new file mode 100644 index 000000000..dece7b707 Binary files /dev/null and b/book/src/imgs/ui-fail-auth.png differ diff --git a/book/src/imgs/ui-session-auth.png b/book/src/imgs/ui-session-auth.png new file mode 100644 index 000000000..c66b92af7 Binary files /dev/null and b/book/src/imgs/ui-session-auth.png differ diff --git a/book/src/installation.md b/book/src/installation.md index 627326d2a..4adaf8da7 100644 --- a/book/src/installation.md +++ b/book/src/installation.md @@ -29,6 +29,10 @@ After [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th Sep * CPU: Quad-core AMD Ryzen, Intel Broadwell, ARMv8 or newer -* Memory: 16 GB RAM or more -* Storage: 2 TB solid state storage +* Memory: 32 GB RAM* +* Storage: 2 TB solid state drive * Network: 100 Mb/s download, 20 Mb/s upload broadband connection + +> *Note: 16 GB RAM is becoming rather limited due to the increased resources required. 16 GB RAM would likely result in out of memory errors in the case of a spike in computing demand (e.g., caused by a bug) or during periods of non-finality of the beacon chain. Users with 16 GB RAM also have a limited choice when it comes to selecting an execution client, which does not help with the [client diversity](https://clientdiversity.org/). We therefore recommend users to have at least 32 GB RAM for long term health of the node, while also giving users the flexibility to change client should the thought arise. + +Last update: April 2023 diff --git a/book/src/key-management.md b/book/src/key-management.md index bb1751be1..084b1fbe4 100644 --- a/book/src/key-management.md +++ b/book/src/key-management.md @@ -3,12 +3,12 @@ [launchpad]: https://launchpad.ethereum.org/ > -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** +> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.** Lighthouse uses a _hierarchical_ key management system for producing validator keys. It is hierarchical because each validator key can be _derived_ from a master key, making the validators keys _children_ of the master key. This -scheme means that a single 24-word mnemonic can be used to backup all of your +scheme means that a single 24-word mnemonic can be used to back up all of your validator keys without providing any observable link between them (i.e., it is privacy-retaining). Hierarchical key derivation schemes are common-place in cryptocurrencies, they are already used by most hardware and software wallets @@ -30,37 +30,63 @@ We defined some terms in the context of validator key management: keypair. - Defined in EIP-2335. - **Voting Keypair**: a BLS public and private keypair which is used for - signing blocks, attestations and other messages on regular intervals, - whilst staking in Phase 0. + signing blocks, attestations and other messages on regular intervals in the beacon chain. - **Withdrawal Keypair**: a BLS public and private keypair which will be required _after_ Phase 0 to manage ETH once a validator has exited. -## Overview +## Create a validator +There are 2 steps involved to create a validator key using Lighthouse: + 1. [Create a wallet](#step-1-create-a-wallet-and-record-the-mnemonic) + 1. [Create a validator](#step-2-create-a-validator) -The key management system in Lighthouse involves moving down the above list of -items, starting at one easy-to-backup mnemonic and ending with multiple -keypairs. Creating a single validator looks like this: +The following example demonstrates how to create a single validator key. -1. Create a **wallet** and record the **mnemonic**: - - `lighthouse --network prater account wallet create --name wally --password-file wally.pass` -1. Create the voting and withdrawal **keystores** for one validator: - - `lighthouse --network prater account validator create --wallet-name wally --wallet-password wally.pass --count 1` +### Step 1: Create a wallet and record the mnemonic +A wallet allows for generating practically unlimited validators from an +easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is +backed up, all validator keys can be trivially re-generated. + +Whilst the wallet stores the mnemonic, it does not store it in plain-text: the +mnemonic is encrypted with a password. It is the responsibility of the user to +define a strong password. The password is only required for interacting with +the wallet, it is not required for recovering keys from a mnemonic. + +To create a wallet, use the `lighthouse account wallet` command. For example, if we wish to create a new wallet for the Goerli testnet named `wally` and saves it in `~/.lighthouse/goerli/wallets` with a randomly generated password saved +to `./wallet.pass`: + +```bash +lighthouse --network goerli account wallet create --name wally --password-file wally.pass +``` +Using the above command, a wallet will be created in `~/.lighthouse/goerli/wallets` with the name +`wally`. It is encrypted using the password defined in the +`wally.pass` file. + +During the wallet creation process, a 24-word mnemonic will be displayed. Record the mnemonic because it allows you to recreate the files in the case of data loss. +> Notes: +> - When navigating to the directory `~/.lighthouse/goerli/wallets`, one will not see the wallet name `wally`, but a hexadecimal folder containing the wallet file. However, when interacting with `lighthouse` in the CLI, the name `wally` will be used. +> - The password is not `wally.pass`, it is the _content_ of the +> `wally.pass` file. +> - If `wally.pass` already exists, the wallet password will be set to the content +> of that file. + +### Step 2: Create a validator +Validators are fundamentally represented by a BLS keypair. In Lighthouse, we use a wallet to generate these keypairs. Once a wallet exists, the `lighthouse account validator create` command can be used to generate the BLS keypair and all necessary information to submit a validator deposit. With the `wally` wallet created in [Step 1](#step-1-create-a-wallet-and-record-the-mnemonic), we can create a validator with the command: + +```bash +lighthouse --network goerli account validator create --wallet-name wally --wallet-password wally.pass --count 1 +``` +This command will: + +- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/goerli/wallets`, updating it so that it generates a new key next time. +- Create a new directory `~/.lighthouse/goerli/validators` containing: + - An encrypted keystore file `voting-keystore.json` containing the validator's voting keypair. + - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH`) which can be submitted to the deposit + contract for the Goerli testnet. Other networks can be set via the + `--network` parameter. +- Create a new directory `~/.lighthouse/goerli/secrets` which stores a password to the validator's voting keypair. -In step (1), we created a wallet in `~/.lighthouse/{network}/wallets` with the name -`wally`. We encrypted this using a pre-defined password in the -`wally.pass` file. Then, in step (2), we created one new validator in the -`~/.lighthouse/{network}/validators` directory using `wally` (unlocking it with -`wally.pass`) and storing the passwords to the validators voting key in -`~/.lighthouse/{network}/secrets`. - -Thanks to the hierarchical key derivation scheme, we can delete all of the -aforementioned directories and then regenerate them as long as we remembered -the 24-word mnemonic (we don't recommend doing this, though). - -Creating another validator is easy, it's just a matter of repeating step (2). -The wallet keeps track of how many validators it has generated and ensures that -a new validator is generated each time. +If you want to create another validator in the future, repeat [Step 2](#step-2-create-a-validator). The wallet keeps track of how many validators it has generated and ensures that a new validator is generated each time. The important thing is to keep the 24-word mnemonic safe so that it can be used to generate new validator keys if needed. ## Detail @@ -76,36 +102,17 @@ There are three important directories in Lighthouse validator key management: - Defaults to `~/.lighthouse/{network}/validators` - `secrets/`: since the validator signing keys are "hot", the validator process needs access to the passwords to decrypt the keystores in the validators - dir. These passwords are stored here. - - Defaults to `~/.lighthouse/{network}/secrets` where `network` is the name of the network passed in the `--network` parameter (default is `mainnet`). + directory. These passwords are stored here. + - Defaults to `~/.lighthouse/{network}/secrets` + +where `{network}` is the name of the network passed in the `--network` parameter. When the validator client boots, it searches the `validators/` for directories containing voting keystores. When it discovers a keystore, it searches the -`secrets/` dir for a file with the same name as the 0x-prefixed hex -representation of the keystore public key. If it finds this file, it attempts +`secrets/` directory for a file with the same name as the 0x-prefixed validator public key. If it finds this file, it attempts to decrypt the keystore using the contents of this file as the password. If it fails, it logs an error and moves onto the next keystore. The `validators/` and `secrets/` directories are kept separate to allow for ease-of-backup; you can safely backup `validators/` without worrying about leaking private key data. - -### Withdrawal Keypairs - -In Ethereum consensus Phase 0, withdrawal keypairs do not serve any immediate purpose. -However, they become very important _after_ Phase 0: they will provide the -ultimate control of the ETH of withdrawn validators. - -This presents an interesting key management scenario: withdrawal keys are very -important, but not right now. Considering this, Lighthouse has adopted a -strategy where **we do not save withdrawal keypairs to disk by default** (it is -opt-in). Instead, we assert that since the withdrawal keys can be regenerated -from a mnemonic, having them lying around on the file-system only presents risk -and complexity. - -At the time of writing, we do not expose the commands to regenerate keys from -mnemonics. However, key regeneration is tested on the public Lighthouse -repository and will be exposed prior to mainnet launch. - -So, in summary, withdrawal keypairs can be trivially regenerated from the -mnemonic via EIP-2333 so they are not saved to disk like the voting keypairs. diff --git a/book/src/key-recovery.md b/book/src/key-recovery.md index 2474d123c..a996e95cb 100644 --- a/book/src/key-recovery.md +++ b/book/src/key-recovery.md @@ -1,8 +1,8 @@ -# Key recovery +# Key Recovery Generally, validator keystore files are generated alongside a *mnemonic*. If -the keystore and/or the keystore password are lost this mnemonic can +the keystore and/or the keystore password are lost, this mnemonic can regenerate a new, equivalent keystore with a new password. There are two ways to recover keys using the `lighthouse` CLI: @@ -48,7 +48,7 @@ which contains all the information necessary to run a validator using the `lighthouse vc` command. The password to this new keystore will be placed in the `--secrets-dir` (default `~/.lighthouse/{network}/secrets`). -where `network` is the name of the consensus layer network passed in the `--network` parameter (default is `mainnet`). +where `{network}` is the name of the consensus layer network passed in the `--network` parameter (default is `mainnet`). ## Recover a EIP-2386 wallet diff --git a/book/src/lighthouse-ui.md b/book/src/lighthouse-ui.md index 225f293f9..4182314da 100644 --- a/book/src/lighthouse-ui.md +++ b/book/src/lighthouse-ui.md @@ -11,7 +11,7 @@ _Documentation for Siren users and developers._ Siren is a user interface built for Lighthouse that connects to a Lighthouse Beacon Node and a Lighthouse Validator Client to monitor performance and display key validator -metrics. +metrics. The UI is currently in active development. Its resides in the [Siren](https://github.com/sigp/siren) repository. @@ -24,7 +24,8 @@ information: - [Installation Guide](./ui-installation.md) - Information to install and run the Lighthouse UI. - [Configuration Guide](./ui-configuration.md) - Explanation of how to setup and configure Siren. -- [Usage](./ui-usage.md) - Details various Siren components. +- [Authentication Guide](./ui-authentication.md) - Explanation of how Siren authentication works and protects validator actions. +- [Usage](./ui-usage.md) - Details various Siren components. - [FAQs](./ui-faqs.md) - Frequently Asked Questions. ## Contributing diff --git a/book/src/mainnet-validator.md b/book/src/mainnet-validator.md index 41735f85b..377e5ebaa 100644 --- a/book/src/mainnet-validator.md +++ b/book/src/mainnet-validator.md @@ -2,7 +2,6 @@ [launchpad]: https://launchpad.ethereum.org/ [lh-book]: https://lighthouse-book.sigmaprime.io/ -[testnet-validator]: ./testnet-validator.md [advanced-datadir]: ./advanced-datadir.md [license]: https://github.com/sigp/lighthouse/blob/stable/LICENSE [slashing]: ./slashing-protection.md @@ -12,25 +11,13 @@ Becoming an Ethereum consensus validator is rewarding, but it's not for the fain familiar with the rules of staking (e.g., rewards, penalties, etc.) and also configuring and managing servers. You'll also need at least 32 ETH! -For those with an understanding of Ethereum consensus and server maintenance, you'll find that running Lighthouse -is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact -with it on a day-to-day basis. +Being educated is critical to a validator's success. Before submitting your mainnet deposit, we recommend: -Being educated is critical to validator success. Before submitting your mainnet deposit, we -recommend: - -- Thoroughly exploring the [Staking Launchpad][launchpad] website - - Try running through the deposit process *without* actually submitting a deposit. +- Thoroughly exploring the [Staking Launchpad][launchpad] website, try running through the deposit process using a testnet launchpad such as the [Goerli staking launchpad](https://goerli.launchpad.ethereum.org/en/). +- Running a testnet validator. - Reading through this documentation, especially the [Slashing Protection][slashing] section. -- Running a [testnet validator][testnet-validator]. - Performing a web search and doing your own research. -By far, the best technical learning experience is to run a [Testnet Validator][testnet-validator]. -You can get hands-on experience with all the tools and it's a great way to test your staking -hardware. We recommend *all* mainnet validators to run a testnet validator initially; 32 ETH is a -significant outlay and joining a testnet is a great way to "try before you buy". - -Remember, if you get stuck you can always reach out on our [Discord][discord]. > > **Please note**: the Lighthouse team does not take any responsibility for losses or damages @@ -40,116 +27,187 @@ Remember, if you get stuck you can always reach out on our [Discord][discord]. > due to the actions of other actors on the consensus layer or software bugs. See the > [software license][license] for more detail on liability. -## Using Lighthouse for Mainnet -When using Lighthouse, the `--network` flag selects a network. E.g., +## Become a validator -- `lighthouse` (no flag): Mainnet. -- `lighthouse --network mainnet`: Mainnet. -- `lighthouse --network prater`: Prater (testnet). +There are five primary steps to become a validator: -Using the correct `--network` flag is very important; using the wrong flag can -result in penalties, slashings or lost deposits. As a rule of thumb, always -provide a `--network` flag instead of relying on the default. +1. [Create validator keys](#step-1-create-validator-keys) +1. [Start an execution client and Lighthouse beacon node](#step-2-start-an-execution-client-and-lighthouse-beacon-node) +1. [Import validator keys into Lighthouse](#step-3-import-validator-keys-to-lighthouse) +1. [Start Lighthouse validator client](#step-4-start-lighthouse-validator-client) +1. [Submit deposit](#step-5-submit-deposit-32eth-per-validator) -## Joining a Testnet +> **Important note**: The guide below contains both mainnet and testnet instructions. We highly recommend *all* users to **run a testnet validator** prior to staking mainnet ETH. By far, the best technical learning experience is to run a testnet validator. You can get hands-on experience with all the tools and it's a great way to test your staking +hardware. 32 ETH is a significant outlay and joining a testnet is a great way to "try before you buy". -There are five primary steps to become a testnet validator: + -1. Create validator keys and submit deposits. -1. Start an execution client. -1. Install Lighthouse. -1. Import the validator keys into Lighthouse. -1. Start Lighthouse. -1. Leave Lighthouse running. -Each of these primary steps has several intermediate steps, so we recommend -setting aside one or two hours for this process. +> **Never use real ETH to join a testnet!** Testnet such as the Goerli testnet uses Goerli ETH which is worthless. This allows experimentation without real-world costs. ### Step 1. Create validator keys -The Ethereum Foundation provides a "Staking Launchpad" for creating validator keypairs and submitting -deposits: - -- [Staking Launchpad][launchpad] - -Please follow the steps on the launch pad site to generate validator keys and submit deposits. Make -sure you select "Lighthouse" as your client. - -Move to the next step once you have completed the steps on the launch pad, -including generating keys via the Python CLI and submitting gETH/ETH deposits. - -### Step 2. Start an execution client - -Since the consensus chain relies upon the execution chain for validator on-boarding, all consensus validators must have a -connection to an execution client. - -We provide instructions for using Geth, but you could use any client that implements the JSON RPC -via HTTP. A fast-synced node is sufficient. - -#### Installing Geth - -If you're using a Mac, follow the instructions [listed -here](https://github.com/ethereum/go-ethereum/wiki/Installation-Instructions-for-Mac) to install -geth. Otherwise [see here](https://github.com/ethereum/go-ethereum/wiki/Installing-Geth). - -#### Starting Geth - -Once you have geth installed, use this command to start your execution node: - +The Ethereum Foundation provides the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases) for creating validator keys. Download and run the `staking-deposit-cli` with the command: ```bash - geth --http +./deposit new-mnemonic +``` +and follow the instructions to generate the keys. When prompted for a network, select `mainnet` if you want to run a mainnet validator, or select `goerli` if you want to run a Goerli testnet validator. A new mnemonic will be generated in the process. + +> **Important note:** A mnemonic (or seed phrase) is a 24-word string randomly generated in the process. It is highly recommended to write down the mnemonic and keep it safe offline. It is important to ensure that the mnemonic is never stored in any digital form (computers, mobile phones, etc) connected to the internet. Please also make one or more backups of the mnemonic to ensure your ETH is not lost in the case of data loss. It is very important to keep your mnemonic private as it represents the ultimate control of your ETH. + +Upon completing this step, the files `deposit_data-*.json` and `keystore-m_*.json` will be created. The keys that are generated from staking-deposit-cli can be easily loaded into a Lighthouse validator client (`lighthouse vc`) in [Step 3](#step-3-import-validator-keys-to-lighthouse). In fact, both of these programs are designed to work with each other. + + +> Lighthouse also supports creating validator keys, see [Key management](./key-management.md) for more info. + +### Step 2. Start an execution client and Lighthouse beacon node + +Start an execution client and Lighthouse beacon node according to the [Run a Node](./run_a_node.md) guide. Make sure that both execution client and consensus client are synced. + +### Step 3. Import validator keys to Lighthouse + +In [Step 1](#step-1-create-validator-keys), the staking-deposit-cli will generate the validator keys into a `validator_keys` directory. Let's assume that +this directory is `$HOME/staking-deposit-cli/validator_keys`. Using the default `validators` directory in Lighthouse (`~/.lighthouse/mainnet/validators`), run the following command to import validator keys: + +Mainnet: +```bash +lighthouse --network mainnet account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` -### Step 3. Install Lighthouse - -*Note: Lighthouse only supports Windows via WSL.* - -Follow the [Lighthouse Installation Instructions](./installation.md) to install -Lighthouse from one of the available options. - -Proceed to the next step once you've successfully installed Lighthouse and viewed -its `--version` info. - -> Note: Some of the instructions vary when using Docker, ensure you follow the -> appropriate sections later in this guide. - -### Step 4. Import validator keys to Lighthouse - -When Lighthouse is installed, follow the [Importing from the Ethereum Staking Launch -pad](./validator-import-launchpad.md) instructions so the validator client can -perform your validator duties. - -Proceed to the next step once you've successfully imported all validators. - -### Step 5. Start Lighthouse - -For staking, one needs to run two Lighthouse processes: - -- `lighthouse bn`: the "beacon node" which connects to the P2P network and - verifies blocks. -- `lighthouse vc`: the "validator client" which manages validators, using data - obtained from the beacon node via a HTTP API. - -Starting these processes is different for binary and docker users: - -#### Binary users - -Those using the pre- or custom-built binaries can start the two processes with: - +Goerli testnet: ```bash -lighthouse --network mainnet bn --staking +lighthouse --network goerli account validator import --directory $HOME/staking-deposit-cli/validator_keys ``` -```bash -lighthouse --network mainnet vc +> Note: The user must specify the consensus client network that they are importing the keys by using the `--network` flag. + +> Note: If the validator_keys directory is in a different location, modify the path accordingly. + +> Note: `~/.lighthouse/mainnet` is the default directory which contains the keys and database. To specify a custom directory, see [Custom Directories][advanced-datadir]. + +> Docker users should use the command from the [Docker](#docker-users) documentation. + + +The user will be prompted for a password for each keystore discovered: + +``` +Keystore found at "/home/{username}/staking-deposit-cli/validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": + + - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 + - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f + +If you enter the password it will be stored as plain text in validator_definitions.yml so that it is not required each time the validator client starts. + +Enter the keystore password, or press enter to omit it: ``` -> Note: `~/.lighthouse/mainnet` is the default directory which contains the keys and databases. -> To specify a custom dir, see [Custom Directories][advanced-datadir]. +The user can choose whether or not they'd like to store the validator password +in the [`validator_definitions.yml`](./validator-management.md) file. If the +password is *not* stored here, the validator client (`lighthouse vc`) +application will ask for the password each time it starts. This might be nice +for some users from a security perspective (i.e., if it is a shared computer), +however it means that if the validator client restarts, the user will be subject +to offline penalties until they can enter the password. If the user trusts the +computer that is running the validator client and they are seeking maximum +validator rewards, we recommend entering a password at this point. -#### Docker users +Once the process is done the user will see: +``` +Successfully imported keystore. +Successfully updated validator_definitions.yml. + +Successfully imported 1 validators (0 skipped). + +WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR YOU WILL GET SLASHED. +``` + +Once you see the above message, you have successfully imported the validator keys. You can now proceed to the next step to start the validator client. + + +### Step 4. Start Lighthouse validator client + +After the keys are imported, the user can start performing their validator duties +by starting the Lighthouse validator client `lighthouse vc`: + +Mainnet: + +```bash +lighthouse vc --network mainnet --suggested-fee-recipient YourFeeRecipientAddress +``` + +Goerli testnet: +```bash +lighthouse vc --network goerli --suggested-fee-recipient YourFeeRecipientAddress +``` + +The `validator client` manages validators using data obtained from the beacon node via a HTTP API. You are highly recommended to enter a fee-recipient by changing `YourFeeRecipientAddress` to an Ethereum address under your control. + +When `lighthouse vc` starts, check that the validator public key appears +as a `voting_pubkey` as shown below: + +``` +INFO Enabled validator voting_pubkey: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 +``` + +Once this log appears (and there are no errors) the `lighthouse vc` application +will ensure that the validator starts performing its duties and being rewarded +by the protocol. + +### Step 5: Submit deposit (32ETH per validator) + +After you have successfully run and synced the execution client, beacon node and validator client, you can now proceed to submit the deposit. Go to the mainnet [Staking launchpad](https://launchpad.ethereum.org/en/) (or [Goerli staking launchpad](https://goerli.launchpad.ethereum.org/en/) for testnet validator) and carefully go through the steps to becoming a validator. Once you are ready, you can submit the deposit by sending 32ETH per validator to the deposit contract. Upload the `deposit_data-*.json` file generated in [Step 1](#step-1-create-validator-keys) to the Staking launchpad. + +> **Important note:** Double check that the deposit contract for mainnet is `0x00000000219ab540356cBB839Cbe05303d7705Fa` before you confirm the transaction. + +Once the deposit transaction is confirmed, it will take a minimum of ~16 hours to a few days/weeks for the beacon chain to process and activate your validator, depending on the queue. Refer to our [FAQ - Why does it take so long for a validator to be activated](./faq.md#why-does-it-take-so-long-for-a-validator-to-be-activated) for more info. + +Once your validator is activated, the validator client will start to publish attestations each epoch: + +``` +Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, +``` + +If you propose a block, the log will look like: + +``` +Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block +``` + +Congratulations! Your validator is now performing its duties and you will receive rewards for securing the Ethereum network. + +### What is next? +After the validator is running and performing its duties, it is important to keep the validator online to continue accumulating rewards. However, there could be problems with the computer, the internet or other factors that cause the validator to be offline. For this, it is best to subscribe to notifications, e.g., via [beaconcha.in](https://beaconcha.in/) which will send notifications about missed attestations and/or proposals. You will be notified about the validator's offline status and will be able to react promptly. + +The next important thing is to stay up to date with updates to Lighthouse and the execution client. Updates are released from time to time, typically once or twice a month. For Lighthouse updates, you can subscribe to notifications on [Github](https://github.com/sigp/lighthouse) by clicking on `Watch`. If you only want to receive notification on new releases, select `Custom`, then `Releases`. You could also join [Lighthouse Discord](https://discord.gg/cyAszAh) where we will make an announcement when there is a new release. + +You may also want to try out [Siren](./lighthouse-ui.md), a UI developed by Lighthouse to monitor validator performance. + +Once you are familiar with running a validator and server maintenance, you'll find that running Lighthouse is easy. Install it, start it, monitor it and keep it updated. You shouldn't need to interact with it on a day-to-day basis. Happy staking! + +## Docker users + +### Import validator keys + +The `import` command is a little more complex for Docker users, but the example +in this document can be substituted with: + +```bash +docker run -it \ + -v $HOME/.lighthouse:/root/.lighthouse \ + -v $(pwd)/validator_keys:/root/validator_keys \ + sigp/lighthouse \ + lighthouse --network mainnet account validator import --directory /root/validator_keys +``` + +Here we use two `-v` volumes to attach: + +- `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. +- The `validator_keys` directory in the present working directory of the host + to the `/root/validator_keys` directory of the Docker container. + +### Start Lighthouse beacon node and validator client Those using Docker images can start the processes with: ```bash @@ -167,29 +225,8 @@ $ docker run \ lighthouse --network mainnet vc ``` -### Step 6. Leave Lighthouse running -Leave your beacon node and validator client running and you'll see logs as the -beacon node stays synced with the network while the validator client produces -blocks and attestations. - -It will take 4-8+ hours for the beacon chain to process and activate your -validator, however you'll know you're active when the validator client starts -successfully publishing attestations each epoch: - -``` -Dec 03 08:49:40.053 INFO Successfully published attestation slot: 98, committee_index: 0, head_block: 0xa208…7fd5, -``` - -Although you'll produce an attestation each epoch, it's less common to produce a -block. Watch for the block production logs too: - -``` -Dec 03 08:49:36.225 INFO Successfully published block slot: 98, attestations: 2, deposits: 0, service: block -``` - -If you see any `ERRO` (error) logs, please reach out on -[Discord](https://discord.gg/cyAszAh) or [create an +If you get stuck you can always reach out on our [Discord][discord] or [create an issue](https://github.com/sigp/lighthouse/issues/new). -Happy staking! + diff --git a/book/src/partial-withdrawal.md b/book/src/partial-withdrawal.md new file mode 100644 index 000000000..db722d729 --- /dev/null +++ b/book/src/partial-withdrawal.md @@ -0,0 +1,23 @@ +# Partial Withdrawals + +After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023: + + - if a validator has a withdrawal credential type `0x00`, the rewards will continue to accumulate and will be locked in the beacon chain. + - if a validator has a withdrawal credential type `0x01`, any rewards above 32ETH will be periodically withdrawn to the withdrawal address. This is also known as the "validator sweep", i.e., once the "validator sweep" reaches your validator's index, your rewards will be withdrawn to the withdrawal address. At the time of writing, with 560,000+ validators on the Ethereum mainnet, you shall expect to receive the rewards approximately every 5 days. + +### FAQ +1. How to know if I have the withdrawal credentials type `0x00` or `0x01`? + + Refer [here](./voluntary-exit.md#1-how-to-know-if-i-have-the-withdrawal-credentials-type-0x01). + +2. My validator has withdrawal credentials type `0x00`, is there a deadline to update my withdrawal credentials? + + No. You can update your withdrawal credentials **anytime**. The catch is that as long as you do not update your withdrawal credentials, your rewards in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the rewards be withdrawn to the withdrawal address. + +3. Do I have to do anything to get my rewards after I update the withdrawal credentials to type `0x01`? + + No. The "validator sweep" occurs automatically and you can expect to receive the rewards every few days. + + Figure below summarizes partial withdrawals. + + ![partial](./imgs/partial-withdrawal.png) \ No newline at end of file diff --git a/book/src/run_a_node.md b/book/src/run_a_node.md index fb112c367..a31aedf78 100644 --- a/book/src/run_a_node.md +++ b/book/src/run_a_node.md @@ -1,77 +1,43 @@ # Run a Node -This document provides detail for users who want to run a Lighthouse beacon node. +This section provides the detail for users who want to run a Lighthouse beacon node. You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps: -1. Set up an [execution node](#step-1-set-up-an-execution-node); -1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider); -1. Run [Lighthouse](#step-3-run-lighthouse); -1. [Check logs](#step-4-check-logs); and -1. [Further readings](#step-5-further-readings). +1. Create a [JWT secret file](#step-1-create-a-jwt-secret-file) +1. Set up an [execution node](#step-2-set-up-an-execution-node); +1. Set up a [beacon node](#step-3-set-up-a-beacon-node-using-lighthouse); +1. [Check logs for sync status](#step-4-check-logs-for-sync-status); -Checkpoint sync is *optional*; however, we recommend it since it is substantially faster -than syncing from genesis while still providing the same functionality. -## Step 1: Set up an execution node -The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions -present in blocks. Two flags are used to configure this connection: +## Step 1: Create a JWT secret file +A JWT secret file is used to secure the communication between the execution client and the consensus client. In this step, we will create a JWT secret file which will be used in later steps. -- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be - `http://localhost:8551`. -- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the - execution engine. This is a mandatory form of authentication that ensures that Lighthouse -has authority to control the execution engine. +```bash +sudo mkdir -p /secrets +openssl rand -hex 32 | tr -d "\n" | sudo tee /secrets/jwt.hex +``` -Each execution engine has its own flags for configuring the engine API and JWT. -Please consult the relevant page of your execution engine for the required flags: +## Step 2: Set up an execution node -- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/getting-started/consensus-clients) -- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) -- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) -- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) +The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions present in blocks. The execution engine connection must be *exclusive*, i.e. you must have one execution node +per beacon node. The reason for this is that the beacon node _controls_ the execution node. Select an execution client from the list below and run it: -The execution engine connection must be *exclusive*, i.e. you must have one execution node -per beacon node. The reason for this is that the beacon node _controls_ the execution node. -## Step 2: Choose a checkpoint sync provider +- [Nethermind](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge) +- [Besu](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/) +- [Erigon](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer) +- [Geth](https://geth.ethereum.org/docs/getting-started/consensus-clients) -Lighthouse supports fast sync from a recent finalized checkpoint. -The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint) -provided by the Ethereum community. -In [step 3](#step-3-run-lighthouse), when running Lighthouse, -we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag. +> Note: Each execution engine has its own flags for configuring the engine API and JWT secret to connect to a beacon node. Please consult the relevant page of your execution engine as above for the required flags. -### Use a community checkpoint sync endpoint -The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL. +Once the execution client is up, just let it continue running. The execution client will start syncing when it connects to a beacon node. Depending on the execution client and computer hardware specifications, syncing can take from a few hours to a few days. You can safely proceed to Step 3 to set up a beacon node while the execution client is still syncing. -For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`, -which we will use in [step 3](#step-3-run-lighthouse). +## Step 3: Set up a beacon node using Lighthouse -## Step 3: Run Lighthouse - -To run Lighthouse, we use the three flags from the steps above: -- `--execution-endpoint`; -- `--execution-jwt`; and -- `--checkpoint-sync-url`. - -Additionally, we run Lighthouse with the `--network` flag, which selects a network: - -- `lighthouse` (no flag): Mainnet. -- `lighthouse --network mainnet`: Mainnet. -- `lighthouse --network goerli`: Goerli (testnet). - -Using the correct `--network` flag is very important; using the wrong flag can -result in penalties, slashings or lost deposits. As a rule of thumb, *always* -provide a `--network` flag instead of relying on the default. - -For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`), -[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`). - -Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking). -In the following, we will provide examples of what a Lighthouse setup could look like. +In this step, we will set up a beacon node. Use the following command to start a beacon node that connects to the execution node: ### Staking @@ -84,9 +50,30 @@ lighthouse bn \ --http ``` -A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag. -The default listen address is `127.0.0.1:5052`. -The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. +> Note: If you download the binary file, you need to navigate to the directory of the binary file to run the above command. + +Notable flags: +- `--network` flag, which selects a network: + - `lighthouse` (no flag): Mainnet. + - `lighthouse --network mainnet`: Mainnet. + - `lighthouse --network goerli`: Goerli (testnet). + - `lighthouse --network sepolia`: Sepolia (testnet). + - `lighthouse --network gnosis`: Gnosis chain + + > Note: Using the correct `--network` flag is very important; using the wrong flag can +result in penalties, slashings or lost deposits. As a rule of thumb, *always* +provide a `--network` flag instead of relying on the default. +- `--execution-endpoint`: the URL of the execution engine API. If the execution engine is running on the same computer with the default port, this will be + `http://localhost:8551`. +- `--execution-jwt`: the path to the JWT secret file shared by Lighthouse and the + execution engine. This is a mandatory form of authentication which ensures that Lighthouse has the authority to control the execution engine. +- `--checkpoint-sync-url`: Lighthouse supports fast sync from a recent finalized checkpoint. Checkpoint sync is *optional*; however, we **highly recommend** it since it is substantially faster than syncing from genesis while still providing the same functionality. The checkpoint sync is done using [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) provided by the Ethereum community. For example, in the above command, we use the URL for Sigma Prime's checkpoint sync server for mainnet `https://mainnet.checkpoint.sigp.io`. +- `--http`: to expose an HTTP server of the beacon chain. The default listening address is `http://localhost:5052`. The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys. + + + +If you intend to run the beacon node without running the validator client (e.g., for non-staking purposes such as supporting the network), you can modify the above command so that the beacon node is configured for non-staking purposes: + ### Non-staking @@ -99,17 +86,19 @@ lighthouse bn \ --disable-deposit-contract-sync ``` -Since we are not staking, we can use the `--disable-deposit-contract-sync` flag. +Since we are not staking, we can use the `--disable-deposit-contract-sync` flag to disable syncing of deposit logs from the execution node. + ---- Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly. -## Step 4: Check logs + + +## Step 4: Check logs for sync status Several logs help you identify if Lighthouse is running correctly. ### Logs - Checkpoint sync -Lighthouse will print a message to indicate that checkpoint sync is being used: +If you run Lighthouse with the flag `--checkpoint-sync-url`, Lighthouse will print a message to indicate that checkpoint sync is being used: ``` INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon @@ -122,16 +111,17 @@ loaded from the remote beacon node: INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon ``` -Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain. +Once the checkpoint is loaded, Lighthouse will sync forwards to the head of the chain. -If a validator client is connected to the node then it will be able to start completing its duties -as soon as forwards sync completes. +If a validator client is connected to the beacon node it will be able to start its duties as soon as forwards sync completes, which typically takes 1-2 minutes. + +> Note: If you have an existing Lighthouse database, you will need to delete the database by using the `--purge-db` flag or manually delete the database with `sudo rm -r /path_to_database/beacon`. If you do use a `--purge-db` flag, once checkpoint sync is complete, you can remove the flag upon a restart. > **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint > against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/), > a friend's node, or a block explorer. -#### Backfilling Blocks +### Backfilling Blocks Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks from the checkpoint back to genesis. @@ -156,16 +146,17 @@ as `verified` indicating that they have been processed successfully by the execu INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78 ``` +Once you see the above message - congratulations! This means that your node is synced and you have contributed to the decentralization and security of the Ethereum network. -## Step 5: Further readings +## Further readings Several other resources are the next logical step to explore after running your beacon node: -- Learn how to [become a validator](./mainnet-validator.md); +- If you intend to run a validator, proceed to [become a validator](./mainnet-validator.md); - Explore how to [manage your keys](./key-management.md); - Research on [validator management](./validator-management.md); - Dig into the [APIs](./api.md) that the beacon node and validator client provide; - Study even more about [checkpoint sync](./checkpoint-sync.md); or - Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md). -Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! \ No newline at end of file +Finally, if you are struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help! diff --git a/book/src/setup.md b/book/src/setup.md index a1febe4a0..62580ac1b 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,9 +9,9 @@ particularly useful for development but still a good way to ensure you have the base dependencies. The additional requirements for developers are: -- [`ganache v7`](https://github.com/trufflesuite/ganache). This is used to +- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil). This is used to simulate the execution chain during tests. You'll get failures during tests if you - don't have `ganache` available on your `PATH` or if ganache is older than v7. + don't have `anvil` available on your `PATH`. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. - [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for diff --git a/book/src/slashing-protection.md b/book/src/slashing-protection.md index a60c8e36d..6e2ca65b4 100644 --- a/book/src/slashing-protection.md +++ b/book/src/slashing-protection.md @@ -21,8 +21,8 @@ and carefully to keep your validators safe. See the [Troubleshooting](#troublesh The database will be automatically created, and your validators registered with it when: -* Importing keys from another source (e.g. Launchpad, Teku, Prysm, `ethdo`). - See [the docs on importing keys](./validator-import-launchpad.md). +* Importing keys from another source (e.g. [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli/releases), Lodestar, Nimbus, Prysm, Teku, [ethdo](https://github.com/wealdtech/ethdo)). + See [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). * Creating keys using Lighthouse itself (`lighthouse account validator create`) * Creating keys via the [validator client API](./api-vc.md). @@ -45,7 +45,7 @@ Examples of circumstances where the slashing protection database is effective ar your client to be imported into Lighthouse's slashing protection database. See [Import and Export](#import-and-export). * Misplacing `slashing_protection.sqlite` during a datadir change or migration between machines. - By default Lighthouse will refuse to start if it finds validator keys that are not registered + By default, Lighthouse will refuse to start if it finds validator keys that are not registered in the slashing protection database. Examples where it is **ineffective** are: @@ -54,7 +54,7 @@ Examples where it is **ineffective** are: clients (e.g. Lighthouse and Prysm) running on the same machine, two Lighthouse instances using different datadirs, or two clients on completely different machines (e.g. one on a cloud server and one running locally). You are responsible for ensuring that your validator keys are never - running simultaneously – the slashing protection DB **cannot protect you in this case**. + running simultaneously – the slashing protection database **cannot protect you in this case**. * Importing keys from another client without also importing voting history. * If you use `--init-slashing-protection` to recreate a missing slashing protection database. @@ -64,19 +64,22 @@ Lighthouse supports the slashing protection interchange format described in [EIP interchange file is a record of blocks and attestations signed by a set of validator keys – basically a portable slashing protection database! -With your validator client stopped, you can import a `.json` interchange file from another client +To import a slashing protection database to Lighthouse, you first need to export your existing client's database. Instructions to export the slashing protection database for other clients are listed below: +- [Lodestar](https://chainsafe.github.io/lodestar/reference/cli/#validator-slashing-protection-export) +- [Nimbus](https://nimbus.guide/migration.html#2-export-slashing-protection-history) +- [Prysm](https://docs.prylabs.network/docs/wallet/slashing-protection#exporting-your-validators-slashing-protection-history) +- [Teku](https://docs.teku.consensys.net/HowTo/Prevent-Slashing#export-a-slashing-protection-file) + + +Once you have the slashing protection database from your existing client, you can now import the database to Lighthouse. With your validator client stopped, you can import a `.json` interchange file from another client using this command: ```bash lighthouse account validator slashing-protection import ``` -Instructions for exporting your existing client's database are out of scope for this document, -please check the other client's documentation for instructions. - When importing an interchange file, you still need to import the validator keystores themselves -separately, using the instructions for [importing keystores into -Lighthouse](./validator-import-launchpad.md). +separately, using the instructions for [import validator keys](./mainnet-validator.md#step-3-import-validator-keys-to-lighthouse). --- @@ -93,7 +96,7 @@ up to date. ### How Import Works -Since version 1.6.0 Lighthouse will ignore any slashable data in the import data and will safely +Since version 1.6.0, Lighthouse will ignore any slashable data in the import data and will safely update the low watermarks for blocks and attestations. It will store only the maximum-slot block for each validator, and the maximum source/target attestation. This is faster than importing all data while also being more resilient to repeated imports & stale data. @@ -121,7 +124,7 @@ Oct 12 14:41:26.415 CRIT Failed to start validator client reason: Failed Ensure that `slashing_protection.sqlite` is in "/home/karlm/.lighthouse/mainnet/validators" folder ``` -Usually this indicates that during some manual intervention the slashing database has been +Usually this indicates that during some manual intervention, the slashing database has been misplaced. This error can also occur if you have upgraded from Lighthouse v0.2.x to v0.3.x without moving the slashing protection database. If you have imported your keys into a new node, you should never see this error (see [Initialization](#initialization)). @@ -137,7 +140,7 @@ the Lighthouse validator client with the `--init-slashing-protection` flag. This dangerous and should not be used lightly, and we **strongly recommend** you try finding your old slashing protection database before using it. If you do decide to use it, you should wait at least 1 epoch (~7 minutes) from when your validator client was last actively signing -messages. If you suspect your node experienced a clock drift issue you should wait +messages. If you suspect your node experienced a clock drift issue, you should wait longer. Remember that the inactivity penalty for being offline for even a day or so is approximately equal to the rewards earned in a day. You will get slashed if you use `--init-slashing-protection` incorrectly. diff --git a/book/src/suggested-fee-recipient.md b/book/src/suggested-fee-recipient.md index c1739aa93..f3ece8506 100644 --- a/book/src/suggested-fee-recipient.md +++ b/book/src/suggested-fee-recipient.md @@ -1,14 +1,12 @@ # Suggested Fee Recipient The _fee recipient_ is an Ethereum address nominated by a beacon chain validator to receive -tips from user transactions. If you run validators on a network that has already merged -or is due to merge soon then you should nominate a fee recipient for your validators. +tips from user transactions. Given that all mainnet and testnets have gone through [The Merge](https://ethereum.org/en/roadmap/merge/), if you run validators on a network, you are strongly recommended to nominate a fee recipient for your validators. Failing to nominate a fee recipient will result in losing the tips from transactions. ## Background During post-merge block production, the Beacon Node (BN) will provide a `suggested_fee_recipient` to -the execution node. This is a 20-byte Ethereum address which the EL might choose to set as the -coinbase and the recipient of other fees or rewards. +the execution node. This is a 20-byte Ethereum address which the execution node might choose to set as the recipient of other fees or rewards. There is no guarantee that an execution node will use the `suggested_fee_recipient` to collect fees, it may use any address it chooses. It is assumed that an honest execution node *will* use the @@ -189,4 +187,4 @@ accumulates other staking rewards. The reason for this is that transaction fees validated by the execution node, and therefore need to be paid to an address that exists on the execution chain. Validators use BLS keys which do not correspond to Ethereum addresses, so they have no "presence" on the execution chain. Therefore, it's necessary for each validator to nominate -a separate fee recipient address. +a fee recipient address. diff --git a/book/src/testnet-validator.md b/book/src/testnet-validator.md deleted file mode 100644 index 98ba66c24..000000000 --- a/book/src/testnet-validator.md +++ /dev/null @@ -1,23 +0,0 @@ -# Become a Testnet Validator - -[mainnet-validator]: ./mainnet-validator.md -[prater-launchpad]: https://prater.launchpad.ethereum.org/ - -Joining an Ethereum consensus testnet is a great way to get familiar with staking in Phase 0. All users should -experiment with a testnet prior to staking mainnet ETH. - -To join a testnet, you can follow the [Become an Ethereum consensus Mainnet Validator][mainnet-validator] -instructions but with a few differences: - -1. Use the appropriate Staking launchpad website: - - [Prater][prater-launchpad] -1. Instead of `--network mainnet`, use the appropriate network flag: - - `--network prater`: Prater. -1. Use a Goerli execution node instead of a mainnet one: - - For Geth, this means using `geth --goerli --http`. -1. Notice that Lighthouse will store its files in a different directory by default: - - `~/.lighthouse/prater`: Prater. - -> -> **Never use real ETH to join a testnet!** All of the testnets listed here use Goerli ETH which is -> basically worthless. This allows experimentation without real-world costs. diff --git a/book/src/ui-authentication.md b/book/src/ui-authentication.md new file mode 100644 index 000000000..0572824d5 --- /dev/null +++ b/book/src/ui-authentication.md @@ -0,0 +1,33 @@ +# Authentication + +To enhance the security of your account, we offer the option to set a session password. This allows the user to avoid re-entering the api-token when performing critical mutating operations on the validator. Instead a user can simply enter their session password. In the absence of a session password, Siren will revert to the api-token specified in your configuration settings as the default security measure. + +> This does not protect your validators from unauthorized device access. + +![](imgs/ui-session-auth.png) + +Session passwords must contain at least: + +- 12 characters +- 1 lowercase letter +- 1 uppercase letter +- 1 number +- 1 special character + + +## Protected Actions + +Prior to executing any sensitive validator action, Siren will request authentication of the session password or api-token. + +![](imgs/ui-exit.png) + + +In the event of three consecutive failed attempts, Siren will initiate a security measure by locking all actions and prompting for configuration settings to be renewed to regain access to these features. + +![](imgs/ui-fail-auth.png) + +## Auto Connect + +In the event that auto-connect is enabled, refreshing the Siren application will result in a prompt to authenticate the session password or api-token. If three consecutive authentication attempts fail, Siren will activate a security measure by locking the session and prompting for configuration settings to be reset to regain access. + +![](imgs/ui-autoconnect-auth.png) \ No newline at end of file diff --git a/book/src/validator-create.md b/book/src/validator-create.md deleted file mode 100644 index f13c449b9..000000000 --- a/book/src/validator-create.md +++ /dev/null @@ -1,90 +0,0 @@ -# Create a validator - -[launchpad]: https://launchpad.ethereum.org/ - -> -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** - -Validators are fundamentally represented by a BLS keypair. In Lighthouse, we -use a [wallet](./wallet-create.md) to generate these keypairs. Once a wallet -exists, the `lighthouse account validator create` command is used to generate -the BLS keypair and all necessary information to submit a validator deposit and -have that validator operate in the `lighthouse validator_client`. - -## Usage - -To create a validator from a [wallet](./wallet-create.md), use the `lighthouse -account validator create` command: - -```bash -lighthouse account validator create --help - -Creates new validators from an existing EIP-2386 wallet using the EIP-2333 HD key derivation scheme. - -USAGE: - lighthouse account_manager validator create [FLAGS] [OPTIONS] - -FLAGS: - -h, --help Prints help information - --stdin-inputs If present, read all user inputs from stdin instead of tty. - --store-withdrawal-keystore If present, the withdrawal keystore will be stored alongside the voting keypair. - It is generally recommended to *not* store the withdrawal key and instead - generate them from the wallet seed when required. - -V, --version Prints version information - -OPTIONS: - --at-most - Observe the number of validators in --validator-dir, only creating enough to reach the given count. Never - deletes an existing validator. - --count - The number of validators to create, regardless of how many already exist - - -d, --datadir - Used to specify a custom root data directory for lighthouse keys and databases. Defaults to - $HOME/.lighthouse/{network} where network is the value of the `network` flag Note: Users should specify - separate custom datadirs for different networks. - --debug-level - The verbosity level for emitting logs. [default: info] [possible values: info, debug, trace, warn, error, - crit] - --deposit-gwei - The GWEI value of the deposit amount. Defaults to the minimum amount required for an active validator - (MAX_EFFECTIVE_BALANCE) - --network - Name of the Eth2 chain Lighthouse will sync and follow. [default: mainnet] [possible values: prater, mainnet] - --secrets-dir - The path where the validator keystore passwords will be stored. Defaults to ~/.lighthouse/{network}/secrets - - -s, --spec - This flag is deprecated, it will be disallowed in a future release. This value is now derived from the - --network or --testnet-dir flags. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --wallet-name Use the wallet identified by this name - --wallet-password - A path to a file containing the password which will unlock the wallet. - - --wallets-dir - A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{network}/wallets -``` - -## Example - -The example assumes that the `wally` wallet was generated from the -[wallet](./wallet-create.md) example. - -```bash -lighthouse --network prater account validator create --wallet-name wally --wallet-password wally.pass --count 1 -``` - -This command will: - -- Derive a single new BLS keypair from wallet `wally` in `~/.lighthouse/{network}/wallets`, updating it so that it generates a - new key next time. -- Create a new directory in `~/.lighthouse/{network}/validators` containing: - - An encrypted keystore containing the validators voting keypair. - - An `eth1_deposit_data.rlp` assuming the default deposit amount (`32 ETH` - for most testnets and mainnet) which can be submitted to the deposit - contract for the Prater testnet. Other testnets can be set via the - `--network` CLI param. -- Store a password to the validators voting keypair in `~/.lighthouse/{network}/secrets`. diff --git a/book/src/validator-doppelganger.md b/book/src/validator-doppelganger.md index d880cce0a..6eaddcc7b 100644 --- a/book/src/validator-doppelganger.md +++ b/book/src/validator-doppelganger.md @@ -16,8 +16,7 @@ achieves this by staying silent for 2-3 epochs after a validator is started so i other instances of that validator before starting to sign potentially slashable messages. > Note: Doppelganger Protection is not yet interoperable, so if it is configured on a Lighthouse -> validator client, the client must be connected to a Lighthouse beacon node. Because Infura -> uses Teku, Lighthouse's Doppelganger Protection cannot yet be used with Infura's Eth2 service. +> validator client, the client must be connected to a Lighthouse beacon node. ## Initial Considerations @@ -30,9 +29,9 @@ is no guarantee that your Beacon Node (BN) will see messages from it. **It is fe doppelganger protection to fail to detect another validator due to network faults or other common circumstances.** -DP should be considered a last-line-of-defence that *might* save a validator from being slashed due +DP should be considered as a last-line-of-defence that *might* save a validator from being slashed due to operator error (i.e. running two instances of the same validator). Users should -*never* rely upon DP and should practice the same caution with regards to duplicating validators as +*never* rely upon DP and should practice the same caution with regard to duplicating validators as if it did not exist. **Remember: even with doppelganger protection enabled, it is not safe to run two instances of the @@ -44,7 +43,7 @@ DP works by staying silent on the network for 2-3 epochs before starting to sign Staying silent and refusing to sign messages will cause the following: - 2-3 missed attestations, incurring penalties and missed rewards. -- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards (post-Altair upgrade only). +- 2-3 epochs of missed sync committee contributions (if the validator is in a sync committee, which is unlikely), incurring penalties and missed rewards. - Potentially missed rewards by missing a block proposal (if the validator is an elected block proposer, which is unlikely). @@ -105,7 +104,7 @@ there is no other instance of that validator running elsewhere!** The steps to solving a doppelganger vary depending on the case, but some places to check are: 1. Is there another validator process running on this host? - - Unix users can check `ps aux | grep lighthouse` + - Unix users can check by running the command `ps aux | grep lighthouse` - Windows users can check the Task Manager. 1. Has this validator recently been moved from another host? Check to ensure it's not running. 1. Has this validator been delegated to a staking service? diff --git a/book/src/validator-import-launchpad.md b/book/src/validator-import-launchpad.md deleted file mode 100644 index 9849b91b7..000000000 --- a/book/src/validator-import-launchpad.md +++ /dev/null @@ -1,111 +0,0 @@ -# Importing from the Ethereum Staking Launch pad - -The [Staking Launchpad](https://github.com/ethereum/eth2.0-deposit) is a website -from the Ethereum Foundation which guides users how to use the -[`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) -command-line program to generate consensus validator keys. - -The keys that are generated from `eth2.0-deposit-cli` can be easily loaded into -a Lighthouse validator client (`lighthouse vc`). In fact, both of these -programs are designed to work with each other. - -This guide will show the user how to import their keys into Lighthouse so they -can perform their duties as a validator. The guide assumes the user has already -[installed Lighthouse](./installation.md). - -## Instructions - -Whilst following the steps on the website, users are instructed to download the -[`eth2.0-deposit-cli`](https://github.com/ethereum/eth2.0-deposit-cli) -repository. This `eth2-deposit-cli` script will generate the validator BLS keys -into a `validator_keys` directory. We assume that the user's -present-working-directory is the `eth2-deposit-cli` repository (this is where -you will be if you just ran the `./deposit.sh` script from the Staking Launch pad -website). If this is not the case, simply change the `--directory` to point to -the `validator_keys` directory. - -Now, assuming that the user is in the `eth2-deposit-cli` directory and they're -using the default (`~/.lighthouse/{network}/validators`) `validators` directory (specify a different one using -`--validators-dir` flag), they can follow these steps: - -### 1. Run the `lighthouse account validator import` command. - -Docker users should use the command from the [Docker](#docker) -section, all other users can use: - - -```bash -lighthouse --network mainnet account validator import --directory validator_keys -``` - -Note: The user must specify the consensus client network that they are importing the keys for using the `--network` flag. - - -After which they will be prompted for a password for each keystore discovered: - -``` -Keystore found at "validator_keys/keystore-m_12381_3600_0_0_0-1595406747.json": - - - Public key: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 - - UUID: 8ea4cf99-8719-43c5-9eda-e97b8a4e074f - -If you enter a password it will be stored in validator_definitions.yml so that it is not required each time the validator client starts. - -Enter a password, or press enter to omit a password: -``` - -The user can choose whether or not they'd like to store the validator password -in the [`validator_definitions.yml`](./validator-management.md) file. If the -password is *not* stored here, the validator client (`lighthouse vc`) -application will ask for the password each time it starts. This might be nice -for some users from a security perspective (i.e., if it is a shared computer), -however it means that if the validator client restarts, the user will be liable -to off-line penalties until they can enter the password. If the user trusts the -computer that is running the validator client and they are seeking maximum -validator rewards, we recommend entering a password at this point. - -Once the process is done the user will see: - -``` -Successfully imported keystore. -Successfully updated validator_definitions.yml. - -Successfully imported 1 validators (0 skipped). - -WARNING: DO NOT USE THE ORIGINAL KEYSTORES TO VALIDATE WITH ANOTHER CLIENT, OR YOU WILL GET SLASHED.. -``` - -The import process is complete! - -### 2. Run the `lighthouse vc` command. - -Now the keys are imported the user can start performing their validator duties -by running `lighthouse vc` and checking that their validator public key appears -as a `voting_pubkey` in one of the following logs: - -``` -INFO Enabled validator voting_pubkey: 0xa5e8702533f6d66422e042a0bf3471ab9b302ce115633fa6fdc5643f804b6b4f1c33baf95f125ec21969a3b1e0dd9e56 -``` - -Once this log appears (and there are no errors) the `lighthouse vc` application -will ensure that the validator starts performing its duties and being rewarded -by the protocol. There is no more input required from the user. - -## Docker - -The `import` command is a little more complex for Docker users, but the example -in this document can be substituted with: - -```bash -docker run -it \ - -v $HOME/.lighthouse:/root/.lighthouse \ - -v $(pwd)/validator_keys:/root/validator_keys \ - sigp/lighthouse \ - lighthouse --network MY_NETWORK account validator import --directory /root/validator_keys -``` - -Here we use two `-v` volumes to attach: - -- `~/.lighthouse` on the host to `/root/.lighthouse` in the Docker container. -- The `validator_keys` directory in the present working directory of the host - to the `/root/validator_keys` directory of the Docker container. diff --git a/book/src/validator-management.md b/book/src/validator-management.md index b7d4442de..be34fef2c 100644 --- a/book/src/validator-management.md +++ b/book/src/validator-management.md @@ -1,10 +1,10 @@ # Validator Management The `lighthouse vc` command starts a *validator client* instance which connects -to a beacon node performs the duties of a staked validator. +to a beacon node to perform the duties of a staked validator. This document provides information on how the validator client discovers the -validators it will act for and how it should obtain their cryptographic +validators it will act for and how it obtains their cryptographic signatures. Users that create validators using the `lighthouse account` tool in the @@ -49,7 +49,7 @@ Each permitted field of the file is listed below for reference: - `enabled`: A `true`/`false` indicating if the validator client should consider this validator "enabled". - `voting_public_key`: A validator public key. -- `type`: How the validator signs messages (currently restricted to `local_keystore`). +- `type`: How the validator signs messages (this can be `local_keystore` or `web3signer` (see [Web3Signer](./validator-web3signer.md))). - `voting_keystore_path`: The path to a EIP-2335 keystore. - `voting_keystore_password_path`: The path to the password for the EIP-2335 keystore. - `voting_keystore_password`: The password to the EIP-2335 keystore. @@ -59,7 +59,7 @@ Each permitted field of the file is listed below for reference: ## Populating the `validator_definitions.yml` file -When validator client starts and the `validator_definitions.yml` file doesn't +When a validator client starts and the `validator_definitions.yml` file doesn't exist, a new file will be created. If the `--disable-auto-discover` flag is provided, the new file will be empty and the validator client will not start any validators. If the `--disable-auto-discover` flag is **not** provided, an @@ -71,7 +71,7 @@ recap: ### Automatic validator discovery -When the `--disable-auto-discover` flag is **not** provided, the validator will search the +When the `--disable-auto-discover` flag is **not** provided, the validator client will search the `validator-dir` for validators and add any *new* validators to the `validator_definitions.yml` with `enabled: true`. @@ -89,7 +89,7 @@ name identical to the `voting_public_key` value. #### Discovery Example -Lets assume the following directory structure: +Let's assume the following directory structure: ``` ~/.lighthouse/{network}/validators @@ -158,7 +158,7 @@ start. If a validator client were to start using the [first example `validator_definitions.yml` file](#example) it would print the following log, -acknowledging there there are two validators and one is disabled: +acknowledging there are two validators and one is disabled: ``` INFO Initialized validators enabled: 1, disabled: 1 @@ -180,8 +180,8 @@ should not be opened by another process. 1. Proceed to act for that validator, creating blocks and attestations if/when required. If there is an error during any of these steps (e.g., a file is missing or -corrupt) the validator client will log an error and continue to attempt to +corrupt), the validator client will log an error and continue to attempt to process other validators. -When the validator client exits (or the validator is deactivated) it will +When the validator client exits (or the validator is deactivated), it will remove the `voting-keystore.json.lock` to indicate that the keystore is free for use again. diff --git a/book/src/validator-monitoring.md b/book/src/validator-monitoring.md index 9074bc027..893ec90bd 100644 --- a/book/src/validator-monitoring.md +++ b/book/src/validator-monitoring.md @@ -38,7 +38,7 @@ minutes after start up. #### Example ``` -lighthouse bn --staking --validator-monitor-auto +lighthouse bn --http --validator-monitor-auto ``` ### Manual diff --git a/book/src/voluntary-exit.md b/book/src/voluntary-exit.md index 5056040e4..d90395c07 100644 --- a/book/src/voluntary-exit.md +++ b/book/src/voluntary-exit.md @@ -1,7 +1,7 @@ -# Voluntary exits +# Voluntary Exits (Full Withdrawals) A validator may chose to voluntarily stop performing duties (proposing blocks and attesting to blocks) by submitting -a voluntary exit transaction to the beacon chain. +a voluntary exit message to the beacon chain. A validator can initiate a voluntary exit provided that the validator is currently active, has not been slashed and has been active for at least 256 epochs (~27 hours) since it has been activated. @@ -10,24 +10,15 @@ A validator can initiate a voluntary exit provided that the validator is current It takes at a minimum 5 epochs (32 minutes) for a validator to exit after initiating a voluntary exit. This number can be much higher depending on how many other validators are queued to exit. -## Withdrawal of exited funds - -Even though users can currently perform a voluntary exit, they **cannot withdraw their exited funds at this point in time**. -This implies that the staked funds are effectively **frozen** until withdrawals are enabled in a future hard fork (Capella). - -To understand the rollout strategy for Ethereum upgrades, please visit . - - - ## Initiating a voluntary exit In order to initiate an exit, users can use the `lighthouse account validator exit` command. -- The `--keystore` flag is used to specify the path to the EIP-2335 voting keystore for the validator. +- The `--keystore` flag is used to specify the path to the EIP-2335 voting keystore for the validator. The path should point directly to the validator key `.json` file, _not_ the folder containing the `.json` file. - The `--beacon-node` flag is used to specify a beacon chain HTTP endpoint that confirms to the [Beacon Node API](https://ethereum.github.io/beacon-APIs/) specifications. That beacon node will be used to validate and propagate the voluntary exit. The default value for this flag is `http://localhost:5052`. -- The `--network` flag is used to specify a particular Eth2 network (default is `mainnet`). +- The `--network` flag is used to specify the network (default is `mainnet`). - The `--password-file` flag is used to specify the path to the file containing the password for the voting keystore. If this flag is not provided, the user will be prompted to enter the password. @@ -39,13 +30,13 @@ The exit phrase is the following: -Below is an example for initiating a voluntary exit on the Prater testnet. +Below is an example for initiating a voluntary exit on the Goerli testnet. ``` -$ lighthouse --network prater account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 +$ lighthouse --network goerli account validator exit --keystore /path/to/keystore --beacon-node http://localhost:5052 Running account manager for Prater network -validator-dir path: ~/.lighthouse/prater/validators +validator-dir path: ~/.lighthouse/goerli/validators Enter the keystore password for validator in 0xabcd @@ -55,7 +46,7 @@ Publishing a voluntary exit for validator 0xabcd WARNING: WARNING: THIS IS AN IRREVERSIBLE OPERATION -WARNING: WITHDRAWING STAKED ETH WILL NOT BE POSSIBLE UNTIL ETH1/ETH2 MERGE. + PLEASE VISIT https://lighthouse-book.sigmaprime.io/voluntary-exit.html TO MAKE SURE YOU UNDERSTAND THE IMPLICATIONS OF A VOLUNTARY EXIT. @@ -70,3 +61,47 @@ Please keep your validator running till exit epoch Exit epoch in approximately 1920 secs ``` +## Full withdrawal of staked fund + +After the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023, if a user initiates a voluntary exit, they will receive the full staked funds to the withdrawal address, provided that the validator has withdrawal credentials of type `0x01`. For more information on how fund withdrawal works, please visit [Ethereum.org](https://ethereum.org/en/staking/withdrawals/#how-do-withdrawals-work) website. + +## FAQ + +### 1. How to know if I have the withdrawal credentials type `0x01`? + +There are two types of withdrawal credentials, `0x00` and `0x01`. To check which type your validator has, go to [Staking launchpad](https://launchpad.ethereum.org/en/withdrawals), enter your validator index and click `verify on mainnet`: + + - `withdrawals enabled` means your validator is of type `0x01`, and you will automatically receive the full withdrawal to the withdrawal address that you set. +- `withdrawals not enabled` means your validator is of type `0x00`, and will need to update your withdrawal credentials from `0x00` type to `0x01` type (also known as BLS-to-execution-change, or BTEC) to receive the staked funds. The common way to do this is using `Staking deposit CLI` or `ethdo`, with the instructions available [here](https://launchpad.ethereum.org/en/withdrawals#update-your-keys). + + +### 2. What if my validator is of type `0x00` and I do not update my withdrawal credentials after I initiated a voluntary exit? + + Your staked fund will continue to be locked on the beacon chain. You can update your withdrawal credentials **anytime**, and there is no deadline for that. The catch is that as long as you do not update your withdrawal credentials, your staked funds in the beacon chain will continue to be locked in the beacon chain. Only after you update the withdrawal credentials, will the staked funds be withdrawn to the withdrawal address. + +### 3. How many times can I update my withdrawal credentials? + + If your withdrawal credentials is of type `0x00`, you can only update it once to type `0x01`. It is therefore very important to ensure that the withdrawal address you set is an address under your control, preferably an address controlled by a hardware wallet. + + If your withdrawal credentials is of type `0x01`, it means you have set your withdrawal address previously, and you will not be able to change the withdrawal address. + +### 3. When will my BTEC request (update withdrawal credentials to type `0x01`) be processed ? + + Your BTEC request will be included very quickly as soon as a new block is proposed. This should be the case most (if not all) of the time, given that the peak BTEC request time has now past (right after the [Capella](https://ethereum.org/en/history/#capella) upgrade on 12th April 2023 and lasted for ~ 2 days) . + +### 4. When will I get my staked fund after voluntary exit if my validator is of type `0x01`? + + There are 3 waiting periods until you get the staked funds in your withdrawal address: + + - An exit queue: a varying time that takes at a minimum 5 epochs (32 minutes) if there is no queue; or if there are many validators exiting at the same time, it has to go through the exit queue. The exit queue can be from hours to weeks, depending on the number of validators in the exit queue. During this time your validator has to stay online to perform its duties to avoid penalties. + + - A fixed waiting period of 256 epochs (27.3 hours) for the validator's status to become withdrawable. + + - A varying time of "validator sweep" that can take up to 5 days (at the time of writing with ~560,000 validators on the mainnet). The "validator sweep" is the process of skimming through all validators by index number for eligible withdrawals (those with type `0x01` and balance above 32ETH). Once the "validator sweep" reaches your validator's index, your staked fund will be fully withdrawn to the withdrawal address set. + + The total time taken is the summation of the above 3 waiting periods. After these waiting periods, you will receive the staked funds in your withdrawal address. + +The voluntary exit and full withdrawal process is summarized in the Figure below. + +![full](./imgs/full-withdrawal.png) + diff --git a/book/src/wallet-create.md b/book/src/wallet-create.md deleted file mode 100644 index 25cac8d34..000000000 --- a/book/src/wallet-create.md +++ /dev/null @@ -1,74 +0,0 @@ -# Create a wallet - -[launchpad]: https://launchpad.ethereum.org/ - -> -> **Note: we recommend using the [Staking launchpad][launchpad] to create validators.** - -A wallet allows for generating practically unlimited validators from an -easy-to-remember 24-word string (a mnemonic). As long as that mnemonic is -backed up, all validator keys can be trivially re-generated. - -The 24-word string is randomly generated during wallet creation and printed out -to the terminal. It's important to **make one or more backups of the mnemonic** -to ensure your ETH is not lost in the case of data loss. It is very important to -**keep your mnemonic private** as it represents the ultimate control of your -ETH. - -Whilst the wallet stores the mnemonic, it does not store it in plain-text: the -mnemonic is encrypted with a password. It is the responsibility of the user to -define a strong password. The password is only required for interacting with -the wallet, it is not required for recovering keys from a mnemonic. - -## Usage - -To create a wallet, use the `lighthouse account wallet` command: - -```bash -lighthouse account wallet create --help - -Creates a new HD (hierarchical-deterministic) EIP-2386 wallet. - -USAGE: - lighthouse account_manager wallet create [OPTIONS] --name --password-file - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -OPTIONS: - -d, --datadir Data directory for lighthouse keys and databases. - --mnemonic-output-path - If present, the mnemonic will be saved to this file. DO NOT SHARE THE MNEMONIC. - - --name - The wallet will be created with this name. It is not allowed to create two wallets with the same name for - the same --base-dir. - --password-file - A path to a file containing the password which will unlock the wallet. If the file does not exist, a random - password will be generated and saved at that path. To avoid confusion, if the file does not already exist it - must include a '.pass' suffix. - -t, --testnet-dir - Path to directory containing eth2_testnet specs. Defaults to a hard-coded Lighthouse testnet. Only effective - if there is no existing database. - --type - The type of wallet to create. Only HD (hierarchical-deterministic) wallets are supported presently.. - [default: hd] [possible values: hd] -``` - - -## Example - -Creates a new wallet named `wally` and saves it in `~/.lighthouse/prater/wallets` with a randomly generated password saved -to `./wallet.pass`: - -```bash -lighthouse --network prater account wallet create --name wally --password-file wally.pass -``` - -> Notes: -> -> - The password is not `wally.pass`, it is the _contents_ of the -> `wally.pass` file. -> - If `wally.pass` already exists the wallet password will be set to contents -> of that file. diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 081ab285e..5c2dc9b4f 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "4.1.0" +version = "4.2.0" authors = ["Sigma Prime "] edition = "2021" @@ -10,7 +10,7 @@ clap = "2.33.3" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } types = { path = "../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" slog = "2.5.2" tokio = "1.14.0" log = "0.4.11" diff --git a/common/clap_utils/Cargo.toml b/common/clap_utils/Cargo.toml index 62eb8aa3d..a882b7ce6 100644 --- a/common/clap_utils/Cargo.toml +++ b/common/clap_utils/Cargo.toml @@ -11,7 +11,7 @@ clap = "2.33.3" hex = "0.4.2" dirs = "3.0.1" eth2_network_config = { path = "../eth2_network_config" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" ethereum-types = "0.14.1" serde = "1.0.116" serde_json = "1.0.59" diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 7be0e8f3d..aabc07fc5 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -14,6 +14,6 @@ hex = "0.4.2" [dependencies] types = { path = "../../consensus/types"} -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" ethabi = "16.0.0" diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index eca086d83..2c5e7060b 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -13,15 +13,15 @@ types = { path = "../../consensus/types" } reqwest = { version = "0.11.0", features = ["json","stream"] } lighthouse_network = { path = "../../beacon_node/lighthouse_network" } proto_array = { path = "../../consensus/proto_array", optional = true } -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" eth2_keystore = { path = "../../crypto/eth2_keystore" } libsecp256k1 = "0.7.0" ring = "0.16.19" bytes = "1.0.1" account_utils = { path = "../../common/account_utils" } sensitive_url = { path = "../../common/sensitive_url" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" futures-util = "0.3.8" futures = "0.3.8" store = { path = "../../beacon_node/store", optional = true } diff --git a/common/eth2/src/lighthouse/attestation_rewards.rs b/common/eth2/src/lighthouse/attestation_rewards.rs index 314ffb851..bebd1c661 100644 --- a/common/eth2/src/lighthouse/attestation_rewards.rs +++ b/common/eth2/src/lighthouse/attestation_rewards.rs @@ -6,32 +6,32 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct IdealAttestationRewards { // Validator's effective balance in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, // Ideal attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // Ideal attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target: u64, // Ideal attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub source: u64, } #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct TotalAttestationRewards { // one entry for every validator based on their attestations in the epoch - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // attester's reward for head vote in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub head: u64, // attester's reward for target vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub target: i64, // attester's reward for source vote in gwei - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub source: i64, // TBD attester's inclusion_delay reward in gwei (phase0 only) // pub inclusion_delay: u64, diff --git a/common/eth2/src/lighthouse/standard_block_rewards.rs b/common/eth2/src/lighthouse/standard_block_rewards.rs index 502577500..15fcdc606 100644 --- a/common/eth2/src/lighthouse/standard_block_rewards.rs +++ b/common/eth2/src/lighthouse/standard_block_rewards.rs @@ -5,22 +5,22 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct StandardBlockReward { // proposer of the block, the proposer index who receives these rewards - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, // total block reward in gwei, // equal to attestations + sync_aggregate + proposer_slashings + attester_slashings - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub total: u64, // block reward component due to included attestations in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attestations: u64, // block reward component due to included sync_aggregate in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_aggregate: u64, // block reward component due to included proposer_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_slashings: u64, // block reward component due to included attester_slashings in gwei - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub attester_slashings: u64, } diff --git a/common/eth2/src/lighthouse/sync_committee_rewards.rs b/common/eth2/src/lighthouse/sync_committee_rewards.rs index e215d8e3e..66a721dc2 100644 --- a/common/eth2/src/lighthouse/sync_committee_rewards.rs +++ b/common/eth2/src/lighthouse/sync_committee_rewards.rs @@ -5,9 +5,9 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct SyncCommitteeReward { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // sync committee reward in gwei for the validator - #[serde(with = "eth2_serde_utils::quoted_i64")] + #[serde(with = "serde_utils::quoted_i64")] pub reward: i64, } diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 90c128751..e576cfcb3 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -57,7 +57,7 @@ pub fn parse_pubkey(secret: &str) -> Result, Error> { &secret[SECRET_PREFIX.len()..] }; - eth2_serde_utils::hex::decode(secret) + serde_utils::hex::decode(secret) .map_err(|e| Error::InvalidSecret(format!("invalid hex: {:?}", e))) .and_then(|bytes| { if bytes.len() != PK_LEN { @@ -174,7 +174,7 @@ impl ValidatorClientHttpClient { let message = Message::parse_slice(digest(&SHA256, &body).as_ref()).expect("sha256 is 32 bytes"); - eth2_serde_utils::hex::decode(&sig) + serde_utils::hex::decode(&sig) .ok() .and_then(|bytes| { let sig = Signature::parse_der(&bytes).ok()?; diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 887bcb99e..0d67df47a 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -13,7 +13,7 @@ pub struct GetFeeRecipientResponse { #[derive(Debug, Deserialize, Serialize, PartialEq)] pub struct GetGasLimitResponse { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } @@ -45,7 +45,7 @@ pub struct ImportKeystoresRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct KeystoreJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Keystore); +pub struct KeystoreJsonStr(#[serde(with = "serde_utils::json_str")] pub Keystore); impl std::ops::Deref for KeystoreJsonStr { type Target = Keystore; @@ -56,7 +56,7 @@ impl std::ops::Deref for KeystoreJsonStr { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(transparent)] -pub struct InterchangeJsonStr(#[serde(with = "eth2_serde_utils::json_str")] pub Interchange); +pub struct InterchangeJsonStr(#[serde(with = "serde_utils::json_str")] pub Interchange); #[derive(Debug, Deserialize, Serialize)] pub struct ImportKeystoresResponse { @@ -103,7 +103,7 @@ pub struct DeleteKeystoresRequest { #[derive(Debug, Deserialize, Serialize)] pub struct DeleteKeystoresResponse { pub data: Vec>, - #[serde(with = "eth2_serde_utils::json_str")] + #[serde(with = "serde_utils::json_str")] pub slashing_protection: Interchange, } diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index fa5d4ae11..dd2ed0322 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -32,14 +32,14 @@ pub struct ValidatorRequest { #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct CreateValidatorsMnemonicRequest { pub mnemonic: ZeroizeString, - #[serde(with = "eth2_serde_utils::quoted_u32")] + #[serde(with = "serde_utils::quoted_u32")] pub key_derivation_path_offset: u32, pub validators: Vec, } @@ -62,7 +62,7 @@ pub struct CreatedValidator { #[serde(skip_serializing_if = "Option::is_none")] pub builder_proposals: Option, pub eth1_deposit_tx_data: String, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_gwei: u64, } @@ -141,7 +141,7 @@ pub struct UpdateFeeRecipientRequest { #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct UpdateGasLimitRequest { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 36f80afd1..bee9b6f13 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -83,10 +83,10 @@ impl std::fmt::Display for EndpointVersion { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct GenesisData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, pub genesis_validators_root: Hash256, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub genesis_fork_version: [u8; 4], } @@ -317,9 +317,9 @@ impl fmt::Display for ValidatorId { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, pub status: ValidatorStatus, pub validator: Validator, @@ -327,9 +327,9 @@ pub struct ValidatorData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorBalanceData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub balance: u64, } @@ -492,16 +492,16 @@ pub struct ValidatorsQuery { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CommitteeData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncCommitteeByValidatorIndices { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validators: Vec, pub validator_aggregates: Vec, } @@ -514,7 +514,7 @@ pub struct RandaoMix { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct SyncSubcommittee { - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } @@ -539,7 +539,7 @@ pub struct BlockHeaderData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DepositContractData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub chain_id: u64, pub address: Address, } @@ -563,7 +563,7 @@ pub struct IdentityData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct MetaData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub seq_number: u64, pub attnets: String, pub syncnets: String, @@ -578,6 +578,7 @@ pub struct VersionData { pub struct SyncingData { pub is_syncing: bool, pub is_optimistic: Option, + pub el_offline: Option, pub head_slot: Slot, pub sync_distance: Slot, } @@ -650,27 +651,27 @@ pub struct ValidatorBalancesQuery { #[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] -pub struct ValidatorIndexData(#[serde(with = "eth2_serde_utils::quoted_u64_vec")] pub Vec); +pub struct ValidatorIndexData(#[serde(with = "serde_utils::quoted_u64_vec")] pub Vec); /// Borrowed variant of `ValidatorIndexData`, for serializing/sending. #[derive(Clone, Copy, Serialize)] #[serde(transparent)] pub struct ValidatorIndexDataRef<'a>( - #[serde(serialize_with = "eth2_serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], + #[serde(serialize_with = "serde_utils::quoted_u64_vec::serialize")] pub &'a [u64], ); #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AttesterData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: CommitteeIndex, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_length: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_committee_index: u64, pub slot: Slot, } @@ -678,7 +679,7 @@ pub struct AttesterData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct ProposerData { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub slot: Slot, } @@ -727,11 +728,11 @@ pub struct ValidatorAggregateAttestationQuery { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committee_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, pub slot: Slot, pub is_aggregator: bool, @@ -852,13 +853,13 @@ impl fmt::Display for PeerDirection { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PeerCount { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub connecting: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnected: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub disconnecting: u64, } @@ -893,7 +894,7 @@ pub struct SseHead { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct SseChainReorg { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub depth: u64, pub old_head_block: Hash256, pub old_head_state: Hash256, @@ -926,7 +927,7 @@ pub struct SseLateHead { #[serde(untagged)] pub struct SsePayloadAttributes { #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, #[superstruct(getter(copy))] pub prev_randao: Hash256, @@ -939,10 +940,10 @@ pub struct SsePayloadAttributes { #[derive(PartialEq, Debug, Deserialize, Serialize, Clone)] pub struct SseExtendedPayloadAttributesGeneric { pub proposal_slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub parent_block_number: u64, pub parent_block_hash: ExecutionBlockHash, pub payload_attributes: T, @@ -1206,13 +1207,13 @@ fn parse_accept(accept: &str) -> Result, String> { #[derive(Debug, Serialize, Deserialize)] pub struct LivenessRequestData { pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub indices: Vec, } #[derive(PartialEq, Debug, Serialize, Deserialize)] pub struct LivenessResponseData { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub epoch: Epoch, pub is_live: bool, @@ -1230,9 +1231,9 @@ pub struct ForkChoiceNode { pub slot: Slot, pub block_root: Hash256, pub parent_root: Option, - pub justified_epoch: Option, - pub finalized_epoch: Option, - #[serde(with = "eth2_serde_utils::quoted_u64")] + pub justified_epoch: Epoch, + pub finalized_epoch: Epoch, + #[serde(with = "serde_utils::quoted_u64")] pub weight: u64, pub validity: Option, pub execution_block_hash: Option, diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index 5f577bedc..7a376568e 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] lazy_static = "1.4.0" num-bigint = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" hex = "0.4.2" serde_yaml = "0.8.13" serde = "1.0.116" diff --git a/common/eth2_interop_keypairs/src/lib.rs b/common/eth2_interop_keypairs/src/lib.rs index eb26f563e..7b5fa7a8e 100644 --- a/common/eth2_interop_keypairs/src/lib.rs +++ b/common/eth2_interop_keypairs/src/lib.rs @@ -20,7 +20,7 @@ extern crate lazy_static; use bls::{Keypair, PublicKey, SecretKey}; -use eth2_hashing::hash; +use ethereum_hashing::hash; use num_bigint::BigUint; use serde_derive::{Deserialize, Serialize}; use std::convert::TryInto; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index 042bf9b1c..33f8ba035 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -18,6 +18,6 @@ serde_yaml = "0.8.13" serde_json = "1.0.58" types = { path = "../../consensus/types"} kzg = { path = "../../crypto/kzg" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" eth2_config = { path = "../eth2_config"} discv5 = "0.2.2" diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml index abb3b1250..f88fbc765 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/boot_enr.yaml @@ -1 +1,5 @@ +# EF Team - enr:-Iq4QMCTfIMXnow27baRUb35Q8iiFHSIDBJh6hQM5Axohhf4b6Kr_cOCu0htQ5WvVqKvFgY28893DHAg8gnBAXsAVqmGAX53x8JggmlkgnY0gmlwhLKAlv6Jc2VjcDI1NmsxoQK6S-Cii_KmfFdUJL2TANL3ksaKUnNXvTCv1tLwXs0QgIN1ZHCCIyk +- enr:-KG4QE5OIg5ThTjkzrlVF32WT_-XT14WeJtIz2zoTqLLjQhYAmJlnk4ItSoH41_2x0RX0wTFIe5GgjRzU2u7Q1fN4vADhGV0aDKQqP7o7pAAAHAyAAAAAAAAAIJpZIJ2NIJpcISlFsStiXNlY3AyNTZrMaEC-Rrd_bBZwhKpXzFCrStKp1q_HmGOewxY3KwM8ofAj_ODdGNwgiMog3VkcIIjKA +# Teku team (Consensys) +- enr:-Ly4QFoZTWR8ulxGVsWydTNGdwEESueIdj-wB6UmmjUcm-AOPxnQi7wprzwcdo7-1jBW_JxELlUKJdJES8TDsbl1EdNlh2F0dG5ldHOI__78_v2bsV-EZXRoMpA2-lATkAAAcf__________gmlkgnY0gmlwhBLYJjGJc2VjcDI1NmsxoQI0gujXac9rMAb48NtMqtSTyHIeNYlpjkbYpWJw46PmYYhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index d30f45ca2..3f2745bf9 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v4.1.0-", - fallback = "Lighthouse/v4.1.0" + prefix = "Lighthouse/v4.2.0-", + fallback = "Lighthouse/v4.2.0" ); /// Returns `VERSION`, but with platform information appended to the end. diff --git a/common/logging/Cargo.toml b/common/logging/Cargo.toml index e56a1a235..b6179d9e7 100644 --- a/common/logging/Cargo.toml +++ b/common/logging/Cargo.toml @@ -10,6 +10,13 @@ test_logger = [] # Print log output to stderr when running tests instead of drop [dependencies] slog = "2.5.2" slog-term = "2.6.0" +tokio = { version = "1.26.0", features = ["sync"] } lighthouse_metrics = { path = "../lighthouse_metrics" } lazy_static = "1.4.0" sloggers = { version = "2.1.1", features = ["json"] } +slog-async = "2.7.0" +take_mut = "0.2.2" +parking_lot = "0.12.1" +serde = "1.0.153" +serde_json = "1.0.94" +chrono = "0.4.23" diff --git a/common/logging/src/async_record.rs b/common/logging/src/async_record.rs new file mode 100644 index 000000000..6f998c619 --- /dev/null +++ b/common/logging/src/async_record.rs @@ -0,0 +1,309 @@ +//! An object that can be used to pass through a channel and be cloned. It can therefore be used +//! via the broadcast channel. + +use parking_lot::Mutex; +use serde::ser::SerializeMap; +use serde::serde_if_integer128; +use serde::Serialize; +use slog::{BorrowedKV, Key, Level, OwnedKVList, Record, RecordStatic, Serializer, SingleKV, KV}; +use std::cell::RefCell; +use std::fmt; +use std::fmt::Write; +use std::sync::Arc; +use take_mut::take; + +thread_local! { + static TL_BUF: RefCell = RefCell::new(String::with_capacity(128)) +} + +/// Serialized record. +#[derive(Clone)] +pub struct AsyncRecord { + msg: String, + level: Level, + location: Box, + tag: String, + logger_values: OwnedKVList, + kv: Arc>, +} + +impl AsyncRecord { + /// Serializes a `Record` and an `OwnedKVList`. + pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self { + let mut ser = ToSendSerializer::new(); + record + .kv() + .serialize(record, &mut ser) + .expect("`ToSendSerializer` can't fail"); + + AsyncRecord { + msg: fmt::format(*record.msg()), + level: record.level(), + location: Box::new(*record.location()), + tag: String::from(record.tag()), + logger_values: logger_values.clone(), + kv: Arc::new(Mutex::new(ser.finish())), + } + } + + pub fn to_json_string(&self) -> Result { + serde_json::to_string(&self).map_err(|e| format!("{:?}", e)) + } +} + +pub struct ToSendSerializer { + kv: Box, +} + +impl ToSendSerializer { + fn new() -> Self { + ToSendSerializer { kv: Box::new(()) } + } + + fn finish(self) -> Box { + self.kv + } +} + +impl Serializer for ToSendSerializer { + fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_unit(&mut self, key: Key) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ())))); + Ok(()) + } + fn emit_none(&mut self, key: Key) -> slog::Result { + let val: Option<()> = None; + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_char(&mut self, key: Key, val: char) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + #[cfg(integer128)] + fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + #[cfg(integer128)] + fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { + let val = val.to_owned(); + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { + let val = fmt::format(*val); + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } +} + +impl Serialize for AsyncRecord { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + // Get the current time + let dt = chrono::Local::now().format("%b %e %T").to_string(); + + let rs = RecordStatic { + location: &self.location, + level: self.level, + tag: &self.tag, + }; + let mut map_serializer = SerdeSerializer::new(serializer)?; + + // Serialize the time and log level first + map_serializer.serialize_entry("time", &dt)?; + map_serializer.serialize_entry("level", self.level.as_short_str())?; + + let kv = self.kv.lock(); + + // Convoluted pattern to avoid binding `format_args!` to a temporary. + // See: https://stackoverflow.com/questions/56304313/cannot-use-format-args-due-to-temporary-value-is-freed-at-the-end-of-this-state + let mut f = |msg: std::fmt::Arguments| { + map_serializer.serialize_entry("msg", &msg.to_string())?; + + let record = Record::new(&rs, &msg, BorrowedKV(&(*kv))); + self.logger_values + .serialize(&record, &mut map_serializer) + .map_err(serde::ser::Error::custom)?; + record + .kv() + .serialize(&record, &mut map_serializer) + .map_err(serde::ser::Error::custom) + }; + f(format_args!("{}", self.msg))?; + map_serializer.end() + } +} + +struct SerdeSerializer { + /// Current state of map serializing: `serde::Serializer::MapState` + ser_map: S::SerializeMap, +} + +impl SerdeSerializer { + fn new(ser: S) -> Result { + let ser_map = ser.serialize_map(None)?; + Ok(SerdeSerializer { ser_map }) + } + + fn serialize_entry(&mut self, key: K, value: V) -> Result<(), S::Error> + where + K: serde::Serialize, + V: serde::Serialize, + { + self.ser_map.serialize_entry(&key, &value) + } + + /// Finish serialization, and return the serializer + fn end(self) -> Result { + self.ser_map.end() + } +} + +// NOTE: This is borrowed from slog_json +macro_rules! impl_m( + ($s:expr, $key:expr, $val:expr) => ({ + let k_s: &str = $key.as_ref(); + $s.ser_map.serialize_entry(k_s, $val) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("serde serialization error: {}", e)))?; + Ok(()) + }); +); + +impl slog::Serializer for SerdeSerializer +where + S: serde::Serializer, +{ + fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { + impl_m!(self, key, &val) + } + + fn emit_unit(&mut self, key: Key) -> slog::Result { + impl_m!(self, key, &()) + } + + fn emit_char(&mut self, key: Key, val: char) -> slog::Result { + impl_m!(self, key, &val) + } + + fn emit_none(&mut self, key: Key) -> slog::Result { + let val: Option<()> = None; + impl_m!(self, key, &val) + } + fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { + impl_m!(self, key, &val) + } + serde_if_integer128! { + fn emit_u128(&mut self, key: Key, val: u128) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_i128(&mut self, key: Key, val: i128) -> slog::Result { + impl_m!(self, key, &val) + } + } + fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { + impl_m!(self, key, &val) + } + fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { + TL_BUF.with(|buf| { + let mut buf = buf.borrow_mut(); + + buf.write_fmt(*val).unwrap(); + + let res = { || impl_m!(self, key, &*buf) }(); + buf.clear(); + res + }) + } +} diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 85c425574..a9ad25f3f 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -11,6 +11,10 @@ use std::time::{Duration, Instant}; pub const MAX_MESSAGE_WIDTH: usize = 40; +pub mod async_record; +mod sse_logging_components; +pub use sse_logging_components::SSELoggingComponents; + /// The minimum interval between log messages indicating that a queue is full. const LOG_DEBOUNCE_INTERVAL: Duration = Duration::from_secs(30); diff --git a/common/logging/src/sse_logging_components.rs b/common/logging/src/sse_logging_components.rs new file mode 100644 index 000000000..244d09fbd --- /dev/null +++ b/common/logging/src/sse_logging_components.rs @@ -0,0 +1,46 @@ +//! This module provides an implementation of `slog::Drain` that optionally writes to a channel if +//! there are subscribers to a HTTP SSE stream. + +use crate::async_record::AsyncRecord; +use slog::{Drain, OwnedKVList, Record}; +use std::panic::AssertUnwindSafe; +use std::sync::Arc; +use tokio::sync::broadcast::Sender; + +/// Default log level for SSE Events. +// NOTE: Made this a constant. Debug level seems to be pretty intense. Can make this +// configurable later if needed. +const LOG_LEVEL: slog::Level = slog::Level::Info; + +/// The components required in the HTTP API task to receive logged events. +#[derive(Clone)] +pub struct SSELoggingComponents { + /// The channel to receive events from. + pub sender: Arc>>, +} + +impl SSELoggingComponents { + /// Create a new SSE drain. + pub fn new(channel_size: usize) -> Self { + let (sender, _receiver) = tokio::sync::broadcast::channel(channel_size); + + let sender = Arc::new(AssertUnwindSafe(sender)); + SSELoggingComponents { sender } + } +} + +impl Drain for SSELoggingComponents { + type Ok = (); + type Err = &'static str; + + fn log(&self, record: &Record, logger_values: &OwnedKVList) -> Result { + if record.level().is_at_least(LOG_LEVEL) { + // Attempt to send the logs + match self.sender.send(AsyncRecord::from(record, logger_values)) { + Ok(_num_sent) => {} // Everything got sent + Err(_err) => {} // There are no subscribers, do nothing + } + } + Ok(()) + } +} diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 0eba4cf23..39a14e283 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -16,7 +16,7 @@ filesystem = { path = "../filesystem" } types = { path = "../../consensus/types" } rand = "0.8.5" deposit_contract = { path = "../deposit_contract" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" hex = "0.4.2" derivative = "2.1.1" lockfile = { path = "../lockfile" } diff --git a/consensus/cached_tree_hash/Cargo.toml b/consensus/cached_tree_hash/Cargo.toml index 0e0ef0707..c2856003b 100644 --- a/consensus/cached_tree_hash/Cargo.toml +++ b/consensus/cached_tree_hash/Cargo.toml @@ -6,11 +6,11 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_ssz_types = "0.2.2" -eth2_hashing = "0.3.0" -eth2_ssz_derive = "0.3.1" -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ssz_types = "0.5.0" +ethereum_hashing = "1.0.0-beta.2" +ethereum_ssz_derive = "0.5.0" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" smallvec = "1.6.1" [dev-dependencies] diff --git a/consensus/cached_tree_hash/src/cache.rs b/consensus/cached_tree_hash/src/cache.rs index edb60f306..3b4878503 100644 --- a/consensus/cached_tree_hash/src/cache.rs +++ b/consensus/cached_tree_hash/src/cache.rs @@ -1,7 +1,7 @@ use crate::cache_arena; use crate::SmallVec8; use crate::{Error, Hash256}; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use smallvec::smallvec; use ssz_derive::{Decode, Encode}; use tree_hash::BYTES_PER_CHUNK; diff --git a/consensus/cached_tree_hash/src/test.rs b/consensus/cached_tree_hash/src/test.rs index 244439ab3..69b49826b 100644 --- a/consensus/cached_tree_hash/src/test.rs +++ b/consensus/cached_tree_hash/src/test.rs @@ -1,6 +1,6 @@ use crate::impls::hash256_iter; use crate::{CacheArena, CachedTreeHash, Error, Hash256, TreeHashCache}; -use eth2_hashing::ZERO_HASHES; +use ethereum_hashing::ZERO_HASHES; use quickcheck_macros::quickcheck; use ssz_types::{ typenum::{Unsigned, U16, U255, U256, U257}, diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index f0381e5ad..3864d52d4 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -10,8 +10,8 @@ edition = "2021" types = { path = "../types" } state_processing = { path = "../state_processing" } proto_array = { path = "../proto_array" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } [dev-dependencies] diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index 2c0dbf1a7..2b883f864 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [dependencies] ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" lazy_static = "1.4.0" safe_arith = { path = "../safe_arith" } diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 887deb1ef..dc3de71ce 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -1,4 +1,4 @@ -use eth2_hashing::{hash, hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash, hash32_concat, ZERO_HASHES}; use ethereum_types::H256; use lazy_static::lazy_static; use safe_arith::ArithError; diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index 205ef8f52..81a535e34 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -10,9 +10,10 @@ path = "src/bin.rs" [dependencies] types = { path = "../types" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" serde = "1.0.116" serde_derive = "1.0.116" serde_yaml = "0.8.13" safe_arith = { path = "../safe_arith" } +superstruct = "0.5.0" \ No newline at end of file diff --git a/consensus/proto_array/src/error.rs b/consensus/proto_array/src/error.rs index 1fe45fd0f..35cb4007b 100644 --- a/consensus/proto_array/src/error.rs +++ b/consensus/proto_array/src/error.rs @@ -14,6 +14,8 @@ pub enum Error { InvalidBestDescendant(usize), InvalidParentDelta(usize), InvalidNodeDelta(usize), + MissingJustifiedCheckpoint, + MissingFinalizedCheckpoint, DeltaOverflow(usize), ProposerBoostOverflow(usize), ReOrgThresholdOverflow, @@ -67,6 +69,6 @@ pub struct InvalidBestNodeInfo { pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, pub head_root: Hash256, - pub head_justified_checkpoint: Option, - pub head_finalized_checkpoint: Option, + pub head_justified_checkpoint: Checkpoint, + pub head_finalized_checkpoint: Checkpoint, } diff --git a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs index ede5bb394..aa26a8430 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/execution_status.rs @@ -987,11 +987,11 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::AssertWeight { block_root: get_root(0), - weight: 33_000, + weight: 33_250, }); ops.push(Operation::AssertWeight { block_root: get_root(1), - weight: 33_000, + weight: 33_250, }); ops.push(Operation::AssertWeight { block_root: get_root(2), @@ -1000,7 +1000,7 @@ pub fn get_execution_status_test_definition_03() -> ForkChoiceTestDefinition { ops.push(Operation::AssertWeight { block_root: get_root(3), // This is a "magic number" generated from `calculate_committee_fraction`. - weight: 31_000, + weight: 31_250, }); // Invalidate the payload of 3. diff --git a/consensus/proto_array/src/lib.rs b/consensus/proto_array/src/lib.rs index 481daba47..780563954 100644 --- a/consensus/proto_array/src/lib.rs +++ b/consensus/proto_array/src/lib.rs @@ -16,5 +16,5 @@ pub use error::Error; pub mod core { pub use super::proto_array::{ProposerBoost, ProtoArray, ProtoNode}; pub use super::proto_array_fork_choice::VoteTracker; - pub use super::ssz_container::SszContainer; + pub use super::ssz_container::{SszContainer, SszContainerV16, SszContainerV17}; } diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 2c19206cb..88111b461 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -5,6 +5,7 @@ use ssz::four_byte_option_impl; use ssz::Encode; use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; +use superstruct::superstruct; use types::{ AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, Slot, @@ -66,7 +67,13 @@ impl InvalidationOperation { } } -#[derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)] +pub type ProtoNode = ProtoNodeV17; + +#[superstruct( + variants(V16, V17), + variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode, Serialize, Deserialize)), + no_enum +)] pub struct ProtoNode { /// The `slot` is not necessary for `ProtoArray`, it just exists so external components can /// easily query the block slot. This is useful for upstream fork choice logic. @@ -85,10 +92,16 @@ pub struct ProtoNode { pub root: Hash256, #[ssz(with = "four_byte_option_usize")] pub parent: Option, + #[superstruct(only(V16))] #[ssz(with = "four_byte_option_checkpoint")] pub justified_checkpoint: Option, + #[superstruct(only(V16))] #[ssz(with = "four_byte_option_checkpoint")] pub finalized_checkpoint: Option, + #[superstruct(only(V17))] + pub justified_checkpoint: Checkpoint, + #[superstruct(only(V17))] + pub finalized_checkpoint: Checkpoint, pub weight: u64, #[ssz(with = "four_byte_option_usize")] pub best_child: Option, @@ -103,6 +116,57 @@ pub struct ProtoNode { pub unrealized_finalized_checkpoint: Option, } +impl TryInto for ProtoNodeV16 { + type Error = Error; + + fn try_into(self) -> Result { + let result = ProtoNode { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: self + .justified_checkpoint + .ok_or(Error::MissingJustifiedCheckpoint)?, + finalized_checkpoint: self + .finalized_checkpoint + .ok_or(Error::MissingFinalizedCheckpoint)?, + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + }; + Ok(result) + } +} + +impl Into for ProtoNode { + fn into(self) -> ProtoNodeV16 { + ProtoNodeV16 { + slot: self.slot, + state_root: self.state_root, + target_root: self.target_root, + current_epoch_shuffling_id: self.current_epoch_shuffling_id, + next_epoch_shuffling_id: self.next_epoch_shuffling_id, + root: self.root, + parent: self.parent, + justified_checkpoint: Some(self.justified_checkpoint), + finalized_checkpoint: Some(self.finalized_checkpoint), + weight: self.weight, + best_child: self.best_child, + best_descendant: self.best_descendant, + execution_status: self.execution_status, + unrealized_justified_checkpoint: self.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint, + } + } +} + #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] pub struct ProposerBoost { pub root: Hash256, @@ -320,8 +384,8 @@ impl ProtoArray { parent: block .parent_root .and_then(|parent| self.indices.get(&parent).copied()), - justified_checkpoint: Some(block.justified_checkpoint), - finalized_checkpoint: Some(block.finalized_checkpoint), + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, weight: 0, best_child: None, best_descendant: None, @@ -883,14 +947,7 @@ impl ProtoArray { let genesis_epoch = Epoch::new(0); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let node_epoch = node.slot.epoch(E::slots_per_epoch()); - let node_justified_checkpoint = - if let Some(justified_checkpoint) = node.justified_checkpoint { - justified_checkpoint - } else { - // The node does not have any information about the justified - // checkpoint. This indicates an inconsistent proto-array. - return false; - }; + let node_justified_checkpoint = node.justified_checkpoint; let voting_source = if current_epoch > node_epoch { // The block is from a prior epoch, the voting source will be pulled-up. @@ -998,9 +1055,13 @@ impl ProtoArray { // Run this check once, outside of the loop rather than inside the loop. // If the conditions don't match for this node then they're unlikely to // start matching for its ancestors. + for checkpoint in &[node.finalized_checkpoint, node.justified_checkpoint] { + if checkpoint == &self.finalized_checkpoint { + return true; + } + } + for checkpoint in &[ - node.finalized_checkpoint, - node.justified_checkpoint, node.unrealized_finalized_checkpoint, node.unrealized_justified_checkpoint, ] { @@ -1055,13 +1116,9 @@ pub fn calculate_committee_fraction( justified_balances: &JustifiedBalances, proposer_score_boost: u64, ) -> Option { - let average_balance = justified_balances + let committee_weight = justified_balances .total_effective_balance - .checked_div(justified_balances.num_active_validators)?; - let committee_size = justified_balances - .num_active_validators .checked_div(E::slots_per_epoch())?; - let committee_weight = committee_size.checked_mul(average_balance)?; committee_weight .checked_mul(proposer_score_boost)? .checked_div(100) diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index d376e62e8..fe831b3c3 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -754,29 +754,20 @@ impl ProtoArrayForkChoice { .and_then(|i| self.proto_array.nodes.get(i)) .map(|parent| parent.root); - // If a node does not have a `finalized_checkpoint` or `justified_checkpoint` populated, - // it means it is not a descendant of the finalized checkpoint, so it is valid to return - // `None` here. - if let (Some(justified_checkpoint), Some(finalized_checkpoint)) = - (block.justified_checkpoint, block.finalized_checkpoint) - { - Some(Block { - slot: block.slot, - root: block.root, - parent_root, - state_root: block.state_root, - target_root: block.target_root, - current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), - next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), - justified_checkpoint, - finalized_checkpoint, - execution_status: block.execution_status, - unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, - unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, - }) - } else { - None - } + Some(Block { + slot: block.slot, + root: block.root, + parent_root, + state_root: block.state_root, + target_root: block.target_root, + current_epoch_shuffling_id: block.current_epoch_shuffling_id.clone(), + next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), + justified_checkpoint: block.justified_checkpoint, + finalized_checkpoint: block.finalized_checkpoint, + execution_status: block.execution_status, + unrealized_justified_checkpoint: block.unrealized_justified_checkpoint, + unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint, + }) } /// Returns the `block.execution_status` field, if the block is present. diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index ed1efaae1..de7fa70d6 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -1,6 +1,6 @@ use crate::proto_array::ProposerBoost; use crate::{ - proto_array::{ProtoArray, ProtoNode}, + proto_array::{ProtoArray, ProtoNodeV16, ProtoNodeV17}, proto_array_fork_choice::{ElasticList, ProtoArrayForkChoice, VoteTracker}, Error, JustifiedBalances, }; @@ -8,24 +8,71 @@ use ssz::{four_byte_option_impl, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use std::convert::TryFrom; +use superstruct::superstruct; use types::{Checkpoint, Hash256}; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union // selector. four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); -#[derive(Encode, Decode)] +pub type SszContainer = SszContainerV17; + +#[superstruct( + variants(V16, V17), + variant_attributes(derive(Encode, Decode)), + no_enum +)] pub struct SszContainer { pub votes: Vec, pub balances: Vec, pub prune_threshold: usize, pub justified_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint, - pub nodes: Vec, + #[superstruct(only(V16))] + pub nodes: Vec, + #[superstruct(only(V17))] + pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, pub previous_proposer_boost: ProposerBoost, } +impl TryInto for SszContainerV16 { + type Error = Error; + + fn try_into(self) -> Result { + let nodes: Result, Error> = + self.nodes.into_iter().map(TryInto::try_into).collect(); + + Ok(SszContainer { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes: nodes?, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + }) + } +} + +impl Into for SszContainer { + fn into(self) -> SszContainerV16 { + let nodes = self.nodes.into_iter().map(Into::into).collect(); + + SszContainerV16 { + votes: self.votes, + balances: self.balances, + prune_threshold: self.prune_threshold, + justified_checkpoint: self.justified_checkpoint, + finalized_checkpoint: self.finalized_checkpoint, + nodes, + indices: self.indices, + previous_proposer_boost: self.previous_proposer_boost, + } + } +} + impl From<&ProtoArrayForkChoice> for SszContainer { fn from(from: &ProtoArrayForkChoice) -> Self { let proto_array = &from.proto_array; diff --git a/consensus/serde_utils/Cargo.toml b/consensus/serde_utils/Cargo.toml deleted file mode 100644 index d4ba02765..000000000 --- a/consensus/serde_utils/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "eth2_serde_utils" -version = "0.1.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Serialization and deserialization utilities useful for JSON representations of Ethereum 2.0 types." -license = "Apache-2.0" - -[dependencies] -serde = { version = "1.0.116", features = ["derive"] } -serde_derive = "1.0.116" -serde_json = "1.0.58" -hex = "0.4.2" -ethereum-types = "0.14.1" diff --git a/consensus/serde_utils/src/fixed_bytes_hex.rs b/consensus/serde_utils/src/fixed_bytes_hex.rs deleted file mode 100644 index 4e9dc98ac..000000000 --- a/consensus/serde_utils/src/fixed_bytes_hex.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Formats `[u8; n]` as a 0x-prefixed hex string. -//! -//! E.g., `[0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -macro_rules! bytes_hex { - ($num_bytes: tt) => { - use super::*; - - const BYTES_LEN: usize = $num_bytes; - - pub fn serialize(bytes: &[u8; BYTES_LEN], serializer: S) -> Result - where - S: Serializer, - { - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(&bytes)); - - serializer.serialize_str(&hex_string) - } - - pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; BYTES_LEN], D::Error> - where - D: Deserializer<'de>, - { - let decoded = deserializer.deserialize_str(PrefixedHexVisitor)?; - - if decoded.len() != BYTES_LEN { - return Err(D::Error::custom(format!( - "expected {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array.copy_from_slice(&decoded); - Ok(array) - } - }; -} - -pub mod bytes_4_hex { - bytes_hex!(4); -} - -pub mod bytes_8_hex { - bytes_hex!(8); -} diff --git a/consensus/serde_utils/src/hex.rs b/consensus/serde_utils/src/hex.rs deleted file mode 100644 index 9a2cd65c7..000000000 --- a/consensus/serde_utils/src/hex.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Provides utilities for parsing 0x-prefixed hex strings. - -use serde::de::{self, Visitor}; -use std::fmt; - -/// Encode `data` as a 0x-prefixed hex string. -pub fn encode>(data: T) -> String { - let hex = hex::encode(data); - - let mut s = "0x".to_string(); - s.push_str(hex.as_str()); - s -} - -/// Decode `data` from a 0x-prefixed hex string. -pub fn decode(s: &str) -> Result, String> { - if let Some(stripped) = s.strip_prefix("0x") { - hex::decode(stripped).map_err(|e| format!("invalid hex: {:?}", e)) - } else { - Err("hex must have 0x prefix".to_string()) - } -} - -pub struct PrefixedHexVisitor; - -impl<'de> Visitor<'de> for PrefixedHexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string with 0x prefix") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - decode(value).map_err(de::Error::custom) - } -} - -pub struct HexVisitor; - -impl<'de> Visitor<'de> for HexVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string (irrelevant of prefix)") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - hex::decode(value.trim_start_matches("0x")) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn encoding() { - let bytes = vec![0, 255]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x00ff"); - - let bytes = vec![]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x"); - - let bytes = vec![1, 2, 3]; - let hex = encode(bytes); - assert_eq!(hex.as_str(), "0x010203"); - } -} diff --git a/consensus/serde_utils/src/hex_vec.rs b/consensus/serde_utils/src/hex_vec.rs deleted file mode 100644 index f7f483362..000000000 --- a/consensus/serde_utils/src/hex_vec.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Formats `Vec` as a 0x-prefixed hex string. -//! -//! E.g., `vec![0, 1, 2, 3]` serializes as `"0x00010203"`. - -use crate::hex::PrefixedHexVisitor; -use serde::{Deserializer, Serializer}; - -pub fn serialize(bytes: &[u8], serializer: S) -> Result -where - S: Serializer, -{ - let mut hex_string: String = "0x".to_string(); - hex_string.push_str(&hex::encode(bytes)); - - serializer.serialize_str(&hex_string) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_str(PrefixedHexVisitor) -} diff --git a/consensus/serde_utils/src/json_str.rs b/consensus/serde_utils/src/json_str.rs deleted file mode 100644 index b9a181391..000000000 --- a/consensus/serde_utils/src/json_str.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! Serialize a datatype as a JSON-blob within a single string. -use serde::{ - de::{DeserializeOwned, Error as _}, - ser::Error as _, - Deserialize, Deserializer, Serialize, Serializer, -}; - -/// Serialize as a JSON object within a string. -pub fn serialize(value: &T, serializer: S) -> Result -where - S: Serializer, - T: Serialize, -{ - serializer.serialize_str(&serde_json::to_string(value).map_err(S::Error::custom)?) -} - -/// Deserialize a JSON object embedded in a string. -pub fn deserialize<'de, T, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, - T: DeserializeOwned, -{ - let json_str = String::deserialize(deserializer)?; - serde_json::from_str(&json_str).map_err(D::Error::custom) -} diff --git a/consensus/serde_utils/src/lib.rs b/consensus/serde_utils/src/lib.rs deleted file mode 100644 index 5c5dafc66..000000000 --- a/consensus/serde_utils/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -mod quoted_int; - -pub mod fixed_bytes_hex; -pub mod hex; -pub mod hex_vec; -pub mod json_str; -pub mod list_of_bytes_lists; -pub mod quoted_u64_vec; -pub mod u256_hex_be; -pub mod u32_hex; -pub mod u64_hex_be; -pub mod u8_hex; - -pub use fixed_bytes_hex::{bytes_4_hex, bytes_8_hex}; -pub use quoted_int::{quoted_i64, quoted_u256, quoted_u32, quoted_u64, quoted_u8}; diff --git a/consensus/serde_utils/src/list_of_bytes_lists.rs b/consensus/serde_utils/src/list_of_bytes_lists.rs deleted file mode 100644 index b93321aa0..000000000 --- a/consensus/serde_utils/src/list_of_bytes_lists.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use crate::hex; -use serde::ser::SerializeSeq; -use serde::{de, Deserializer, Serializer}; - -pub struct ListOfBytesListVisitor; -impl<'a> serde::de::Visitor<'a> for ListOfBytesListVisitor { - type Value = Vec>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed byte lists") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element::()? { - vec.push(hex::decode(&val).map_err(de::Error::custom)?); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[Vec], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for val in value { - seq.serialize_element(&hex::encode(val))?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result>, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(ListOfBytesListVisitor) -} diff --git a/consensus/serde_utils/src/quoted_int.rs b/consensus/serde_utils/src/quoted_int.rs deleted file mode 100644 index 0cc35aa31..000000000 --- a/consensus/serde_utils/src/quoted_int.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! Formats some integer types using quotes. -//! -//! E.g., `1` serializes as `"1"`. -//! -//! Quotes can be optional during decoding. - -use ethereum_types::U256; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; -use std::convert::TryFrom; -use std::marker::PhantomData; - -macro_rules! define_mod { - ($int: ty) => { - /// Serde support for deserializing quoted integers. - /// - /// Configurable so that quotes are either required or optional. - pub struct QuotedIntVisitor { - require_quotes: bool, - _phantom: PhantomData, - } - - impl<'a, T> serde::de::Visitor<'a> for QuotedIntVisitor - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - type Value = T; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - if self.require_quotes { - write!(formatter, "a quoted integer") - } else { - write!(formatter, "a quoted or unquoted integer") - } - } - - fn visit_str(self, s: &str) -> Result - where - E: serde::de::Error, - { - s.parse::<$int>() - .map(T::from) - .map_err(serde::de::Error::custom) - } - - fn visit_u64(self, v: u64) -> Result - where - E: serde::de::Error, - { - if self.require_quotes { - Err(serde::de::Error::custom( - "received unquoted integer when quotes are required", - )) - } else { - T::try_from(v).map_err(|_| serde::de::Error::custom("invalid integer")) - } - } - } - - /// Compositional wrapper type that allows quotes or no quotes. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct MaybeQuoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "self")] - pub value: T, - } - - /// Wrapper type for requiring quotes on a `$int`-like type. - /// - /// Unlike using `serde(with = "quoted_$int::require_quotes")` this is composable, and can be nested - /// inside types like `Option`, `Result` and `Vec`. - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, Serialize)] - #[serde(transparent)] - pub struct Quoted - where - T: From<$int> + Into<$int> + Copy + TryFrom, - { - #[serde(with = "require_quotes")] - pub value: T, - } - - /// Serialize with quotes. - pub fn serialize(value: &T, serializer: S) -> Result - where - S: Serializer, - T: From<$int> + Into<$int> + Copy, - { - let v: $int = (*value).into(); - serializer.serialize_str(&format!("{}", v)) - } - - /// Deserialize with or without quotes. - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: false, - _phantom: PhantomData, - }) - } - - /// Requires quotes when deserializing. - /// - /// Usage: `#[serde(with = "quoted_u64::require_quotes")]`. - pub mod require_quotes { - pub use super::serialize; - use super::*; - - pub fn deserialize<'de, D, T>(deserializer: D) -> Result - where - D: Deserializer<'de>, - T: From<$int> + Into<$int> + Copy + TryFrom, - { - deserializer.deserialize_any(QuotedIntVisitor { - require_quotes: true, - _phantom: PhantomData, - }) - } - } - - #[cfg(test)] - mod test { - use super::*; - - #[test] - fn require_quotes() { - let x = serde_json::from_str::>("\"8\"").unwrap(); - assert_eq!(x.value, 8); - serde_json::from_str::>("8").unwrap_err(); - } - } - }; -} - -pub mod quoted_u8 { - use super::*; - - define_mod!(u8); -} - -pub mod quoted_u32 { - use super::*; - - define_mod!(u32); -} - -pub mod quoted_u64 { - use super::*; - - define_mod!(u64); -} - -pub mod quoted_i64 { - use super::*; - - define_mod!(i64); -} - -pub mod quoted_u256 { - use super::*; - - struct U256Visitor; - - impl<'de> serde::de::Visitor<'de> for U256Visitor { - type Value = U256; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("a quoted U256 integer") - } - - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - U256::from_dec_str(v).map_err(serde::de::Error::custom) - } - } - - /// Serialize with quotes. - pub fn serialize(value: &U256, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&format!("{}", value)) - } - - /// Deserialize with quotes. - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(U256Visitor) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedU256(#[serde(with = "quoted_u256")] U256); - - #[test] - fn u256_with_quotes() { - assert_eq!( - &serde_json::to_string(&WrappedU256(U256::one())).unwrap(), - "\"1\"" - ); - assert_eq!( - serde_json::from_str::("\"1\"").unwrap(), - WrappedU256(U256::one()) - ); - } - - #[test] - fn u256_without_quotes() { - serde_json::from_str::("1").unwrap_err(); - } - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct WrappedI64(#[serde(with = "quoted_i64")] i64); - - #[test] - fn negative_i64_with_quotes() { - assert_eq!( - serde_json::from_str::("\"-200\"").unwrap().0, - -200 - ); - assert_eq!( - serde_json::to_string(&WrappedI64(-12_500)).unwrap(), - "\"-12500\"" - ); - } - - // It would be OK if this worked, but we don't need it to (i64s should always be quoted). - #[test] - fn negative_i64_without_quotes() { - serde_json::from_str::("-200").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/quoted_u64_vec.rs b/consensus/serde_utils/src/quoted_u64_vec.rs deleted file mode 100644 index f124c9890..000000000 --- a/consensus/serde_utils/src/quoted_u64_vec.rs +++ /dev/null @@ -1,97 +0,0 @@ -//! Formats `Vec` using quotes. -//! -//! E.g., `vec![0, 1, 2]` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. - -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use serde_derive::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -#[serde(transparent)] -pub struct QuotedIntWrapper { - #[serde(with = "crate::quoted_u64")] - pub int: u64, -} - -pub struct QuotedIntVecVisitor; -impl<'a> serde::de::Visitor<'a> for QuotedIntVecVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut vec = vec![]; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - vec.push(val.int); - } - - Ok(vec) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - deserializer.deserialize_any(QuotedIntVecVisitor) -} - -#[cfg(test)] -mod test { - use super::*; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::quoted_u64_vec")] - values: Vec, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - assert_eq!(obj.values, vec![1, 2, 3, 4]); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u256_hex_be.rs b/consensus/serde_utils/src/u256_hex_be.rs deleted file mode 100644 index 8007e5792..000000000 --- a/consensus/serde_utils/src/u256_hex_be.rs +++ /dev/null @@ -1,144 +0,0 @@ -use ethereum_types::U256; - -use serde::de::Visitor; -use serde::{de, Deserializer, Serialize, Serializer}; -use std::fmt; -use std::str::FromStr; - -pub fn serialize(num: &U256, serializer: S) -> Result -where - S: Serializer, -{ - num.serialize(serializer) -} - -pub struct U256Visitor; - -impl<'de> Visitor<'de> for U256Visitor { - type Value = String; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a well formatted hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - let stripped = &value[2..]; - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {:?}", - stripped - ))) - } else if stripped == "0" { - Ok(value.to_string()) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else { - Ok(value.to_string()) - } - } -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_string(U256Visitor)?; - - U256::from_str(&decoded).map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))) -} - -#[cfg(test)] -mod test { - use ethereum_types::U256; - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: U256, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0.into() }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1.into() }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256.into() }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65.into() }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024.into() }).unwrap(), - "\"0x400\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - 1 - }) - .unwrap(), - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { - val: U256::max_value() - }) - .unwrap(), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65.into() }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024.into() }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - 1 - }, - ); - assert_eq!( - serde_json::from_str::( - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ) - .unwrap(), - Wrapper { - val: U256::max_value() - }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u32_hex.rs b/consensus/serde_utils/src/u32_hex.rs deleted file mode 100644 index c1ab3537b..000000000 --- a/consensus/serde_utils/src/u32_hex.rs +++ /dev/null @@ -1,21 +0,0 @@ -//! Formats `u32` as a 0x-prefixed, little-endian hex string. -//! -//! E.g., `0` serializes as `"0x00000000"`. - -use crate::bytes_4_hex; -use serde::{Deserializer, Serializer}; - -pub fn serialize(num: &u32, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode(num.to_le_bytes())); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - bytes_4_hex::deserialize(deserializer).map(u32::from_le_bytes) -} diff --git a/consensus/serde_utils/src/u64_hex_be.rs b/consensus/serde_utils/src/u64_hex_be.rs deleted file mode 100644 index e3364a2d2..000000000 --- a/consensus/serde_utils/src/u64_hex_be.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Formats `u64` as a 0x-prefixed, big-endian hex string. -//! -//! E.g., `0` serializes as `"0x0000000000000000"`. - -use serde::de::{self, Error, Visitor}; -use serde::{Deserializer, Serializer}; -use std::fmt; - -const BYTES_LEN: usize = 8; - -pub struct QuantityVisitor; -impl<'de> Visitor<'de> for QuantityVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a hex string") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if !value.starts_with("0x") { - return Err(de::Error::custom("must start with 0x")); - } - - let stripped = value.trim_start_matches("0x"); - - if stripped.is_empty() { - Err(de::Error::custom(format!( - "quantity cannot be {}", - stripped - ))) - } else if stripped == "0" { - Ok(vec![0]) - } else if stripped.starts_with('0') { - Err(de::Error::custom("cannot have leading zero")) - } else if stripped.len() % 2 != 0 { - hex::decode(format!("0{}", stripped)) - .map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } else { - hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e))) - } - } -} - -pub fn serialize(num: &u64, serializer: S) -> Result -where - S: Serializer, -{ - let raw = hex::encode(num.to_be_bytes()); - let trimmed = raw.trim_start_matches('0'); - - let hex = if trimmed.is_empty() { "0" } else { trimmed }; - - serializer.serialize_str(&format!("0x{}", &hex)) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let decoded = deserializer.deserialize_str(QuantityVisitor)?; - - // TODO: this is not strict about byte length like other methods. - if decoded.len() > BYTES_LEN { - return Err(D::Error::custom(format!( - "expected max {} bytes for array, got {}", - BYTES_LEN, - decoded.len() - ))); - } - - let mut array = [0; BYTES_LEN]; - array[BYTES_LEN - decoded.len()..].copy_from_slice(&decoded); - Ok(u64::from_be_bytes(array)) -} - -#[cfg(test)] -mod test { - use serde::{Deserialize, Serialize}; - use serde_json; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - #[serde(transparent)] - struct Wrapper { - #[serde(with = "super")] - val: u64, - } - - #[test] - fn encoding() { - assert_eq!( - &serde_json::to_string(&Wrapper { val: 0 }).unwrap(), - "\"0x0\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1 }).unwrap(), - "\"0x1\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 256 }).unwrap(), - "\"0x100\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 65 }).unwrap(), - "\"0x41\"" - ); - assert_eq!( - &serde_json::to_string(&Wrapper { val: 1024 }).unwrap(), - "\"0x400\"" - ); - } - - #[test] - fn decoding() { - assert_eq!( - serde_json::from_str::("\"0x0\"").unwrap(), - Wrapper { val: 0 }, - ); - assert_eq!( - serde_json::from_str::("\"0x41\"").unwrap(), - Wrapper { val: 65 }, - ); - assert_eq!( - serde_json::from_str::("\"0x400\"").unwrap(), - Wrapper { val: 1024 }, - ); - serde_json::from_str::("\"0x\"").unwrap_err(); - serde_json::from_str::("\"0x0400\"").unwrap_err(); - serde_json::from_str::("\"400\"").unwrap_err(); - serde_json::from_str::("\"ff\"").unwrap_err(); - } -} diff --git a/consensus/serde_utils/src/u8_hex.rs b/consensus/serde_utils/src/u8_hex.rs deleted file mode 100644 index 8083e1d12..000000000 --- a/consensus/serde_utils/src/u8_hex.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Formats `u8` as a 0x-prefixed hex string. -//! -//! E.g., `0` serializes as `"0x00"`. - -use crate::hex::PrefixedHexVisitor; -use serde::de::Error; -use serde::{Deserializer, Serializer}; - -pub fn serialize(byte: &u8, serializer: S) -> Result -where - S: Serializer, -{ - let hex = format!("0x{}", hex::encode([*byte])); - serializer.serialize_str(&hex) -} - -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - if bytes.len() != 1 { - return Err(D::Error::custom(format!( - "expected 1 byte for u8, got {}", - bytes.len() - ))); - } - Ok(bytes[0]) -} diff --git a/consensus/ssz/Cargo.toml b/consensus/ssz/Cargo.toml deleted file mode 100644 index d39ad1087..000000000 --- a/consensus/ssz/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "eth2_ssz" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" -license = "Apache-2.0" - -[lib] -name = "ssz" - -[dev-dependencies] -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -smallvec = { version = "1.6.1", features = ["const_generics"] } -itertools = "0.10.3" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/ssz/README.md b/consensus/ssz/README.md deleted file mode 100644 index 04603cda3..000000000 --- a/consensus/ssz/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# simpleserialize (ssz) - -[](https://crates.io/crates/eth2_ssz) diff --git a/consensus/ssz/examples/large_list.rs b/consensus/ssz/examples/large_list.rs deleted file mode 100644 index a1b10ab7a..000000000 --- a/consensus/ssz/examples/large_list.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; - -fn main() { - let vec: Vec = vec![4242; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/large_list_of_structs.rs b/consensus/ssz/examples/large_list_of_structs.rs deleted file mode 100644 index 2aaaf9b8a..000000000 --- a/consensus/ssz/examples/large_list_of_structs.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Encode and decode a list many times. -//! -//! Useful for `cargo flamegraph`. - -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; - -#[derive(Clone, Copy, Encode, Decode)] -pub struct FixedLen { - a: u64, - b: u64, - c: u64, - d: u64, -} - -fn main() { - let fixed_len = FixedLen { - a: 42, - b: 42, - c: 42, - d: 42, - }; - - let vec: Vec = vec![fixed_len; 8196]; - - let output: Vec> = (0..40_000) - .map(|_| Vec::from_ssz_bytes(&vec.as_ssz_bytes()).unwrap()) - .collect(); - - println!("{}", output.len()); -} diff --git a/consensus/ssz/examples/struct_definition.rs b/consensus/ssz/examples/struct_definition.rs deleted file mode 100644 index 123da12c5..000000000 --- a/consensus/ssz/examples/struct_definition.rs +++ /dev/null @@ -1,73 +0,0 @@ -use ssz::{Decode, DecodeError, Encode, SszDecoderBuilder, SszEncoder}; - -#[derive(Debug, PartialEq)] -pub struct Foo { - a: u16, - b: Vec, - c: u16, -} - -impl Encode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Encode>::is_ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - ::ssz_fixed_len() - + ssz::BYTES_PER_LENGTH_OFFSET - + ::ssz_fixed_len() - + self.b.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() - + as Encode>::ssz_fixed_len() - + ::ssz_fixed_len(); - - let mut encoder = SszEncoder::container(buf, offset); - - encoder.append(&self.a); - encoder.append(&self.b); - encoder.append(&self.c); - - encoder.finalize(); - } -} - -impl Decode for Foo { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() && as Decode>::is_ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = SszDecoderBuilder::new(bytes); - - builder.register_type::()?; - builder.register_type::>()?; - builder.register_type::()?; - - let mut decoder = builder.build()?; - - Ok(Self { - a: decoder.decode_next()?, - b: decoder.decode_next()?, - c: decoder.decode_next()?, - }) - } -} - -fn main() { - let my_foo = Foo { - a: 42, - b: vec![0, 1, 2, 3], - c: 11, - }; - - let bytes = vec![42, 0, 8, 0, 0, 0, 11, 0, 0, 1, 2, 3]; - - assert_eq!(my_foo.as_ssz_bytes(), bytes); - - let decoded_foo = Foo::from_ssz_bytes(&bytes).unwrap(); - - assert_eq!(my_foo, decoded_foo); -} diff --git a/consensus/ssz/src/decode.rs b/consensus/ssz/src/decode.rs deleted file mode 100644 index 10b3573b1..000000000 --- a/consensus/ssz/src/decode.rs +++ /dev/null @@ -1,374 +0,0 @@ -use super::*; -use smallvec::{smallvec, SmallVec}; -use std::cmp::Ordering; - -type SmallVec8 = SmallVec<[T; 8]>; - -pub mod impls; -pub mod try_from_iter; - -/// Returned when SSZ decoding fails. -#[derive(Debug, PartialEq, Clone)] -pub enum DecodeError { - /// The bytes supplied were too short to be decoded into the specified type. - InvalidByteLength { len: usize, expected: usize }, - /// The given bytes were too short to be read as a length prefix. - InvalidLengthPrefix { len: usize, expected: usize }, - /// A length offset pointed to a byte that was out-of-bounds (OOB). - /// - /// A bytes may be OOB for the following reasons: - /// - /// - It is `>= bytes.len()`. - /// - When decoding variable length items, the 1st offset points "backwards" into the fixed - /// length items (i.e., `length[0] < BYTES_PER_LENGTH_OFFSET`). - /// - When decoding variable-length items, the `n`'th offset was less than the `n-1`'th offset. - OutOfBoundsByte { i: usize }, - /// An offset points “backwards” into the fixed-bytes portion of the message, essentially - /// double-decoding bytes that will also be decoded as fixed-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#1-Offset-into-fixed-portion - OffsetIntoFixedPortion(usize), - /// The first offset does not point to the byte that follows the fixed byte portion, - /// essentially skipping a variable-length byte. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#2-Skip-first-variable-byte - OffsetSkipsVariableBytes(usize), - /// An offset points to bytes prior to the previous offset. Depending on how you look at it, - /// this either double-decodes bytes or makes the first offset a negative-length. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#3-Offsets-are-decreasing - OffsetsAreDecreasing(usize), - /// An offset references byte indices that do not exist in the source bytes. - /// - /// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view#4-Offsets-are-out-of-bounds - OffsetOutOfBounds(usize), - /// A variable-length list does not have a fixed portion that is cleanly divisible by - /// `BYTES_PER_LENGTH_OFFSET`. - InvalidListFixedBytesLen(usize), - /// Some item has a `ssz_fixed_len` of zero. This is illegal. - ZeroLengthItem, - /// The given bytes were invalid for some application-level reason. - BytesInvalid(String), - /// The given union selector is out of bounds. - UnionSelectorInvalid(u8), -} - -/// Performs checks on the `offset` based upon the other parameters provided. -/// -/// ## Detail -/// -/// - `offset`: the offset bytes (e.g., result of `read_offset(..)`). -/// - `previous_offset`: unless this is the first offset in the SSZ object, the value of the -/// previously-read offset. Used to ensure offsets are not decreasing. -/// - `num_bytes`: the total number of bytes in the SSZ object. Used to ensure the offset is not -/// out of bounds. -/// - `num_fixed_bytes`: the number of fixed-bytes in the struct, if it is known. Used to ensure -/// that the first offset doesn't skip any variable bytes. -/// -/// ## References -/// -/// The checks here are derived from this document: -/// -/// https://notes.ethereum.org/ruKvDXl6QOW3gnqVYb8ezA?view -pub fn sanitize_offset( - offset: usize, - previous_offset: Option, - num_bytes: usize, - num_fixed_bytes: Option, -) -> Result { - if num_fixed_bytes.map_or(false, |fixed_bytes| offset < fixed_bytes) { - Err(DecodeError::OffsetIntoFixedPortion(offset)) - } else if previous_offset.is_none() - && num_fixed_bytes.map_or(false, |fixed_bytes| offset != fixed_bytes) - { - Err(DecodeError::OffsetSkipsVariableBytes(offset)) - } else if offset > num_bytes { - Err(DecodeError::OffsetOutOfBounds(offset)) - } else if previous_offset.map_or(false, |prev| prev > offset) { - Err(DecodeError::OffsetsAreDecreasing(offset)) - } else { - Ok(offset) - } -} - -/// Provides SSZ decoding (de-serialization) via the `from_ssz_bytes(&bytes)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Decode)]`. -pub trait Decode: Sized { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Attempts to decode `Self` from `bytes`, returning a `DecodeError` on failure. - /// - /// The supplied bytes must be the exact length required to decode `Self`, excess bytes will - /// result in an error. - fn from_ssz_bytes(bytes: &[u8]) -> Result; -} - -#[derive(Copy, Clone, Debug)] -pub struct Offset { - position: usize, - offset: usize, -} - -/// Builds an `SszDecoder`. -/// -/// The purpose of this struct is to split some SSZ bytes into individual slices. The builder is -/// then converted into a `SszDecoder` which decodes those values into object instances. -/// -/// See [`SszDecoder`](struct.SszDecoder.html) for usage examples. -pub struct SszDecoderBuilder<'a> { - bytes: &'a [u8], - items: SmallVec8<&'a [u8]>, - offsets: SmallVec8, - items_index: usize, -} - -impl<'a> SszDecoderBuilder<'a> { - /// Instantiate a new builder that should build a `SszDecoder` over the given `bytes` which - /// are assumed to be the SSZ encoding of some object. - pub fn new(bytes: &'a [u8]) -> Self { - Self { - bytes, - items: smallvec![], - offsets: smallvec![], - items_index: 0, - } - } - - /// Registers a variable-length object as the next item in `bytes`, without specifying the - /// actual type. - /// - /// ## Notes - /// - /// Use of this function is generally discouraged since it cannot detect if some type changes - /// from variable to fixed length. - /// - /// Use `Self::register_type` wherever possible. - pub fn register_anonymous_variable_length_item(&mut self) -> Result<(), DecodeError> { - struct Anonymous; - - impl Decode for Anonymous { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(_bytes: &[u8]) -> Result { - unreachable!("Anonymous should never be decoded") - } - } - - self.register_type::() - } - - /// Declares that some type `T` is the next item in `bytes`. - pub fn register_type(&mut self) -> Result<(), DecodeError> { - self.register_type_parameterized(T::is_ssz_fixed_len(), T::ssz_fixed_len()) - } - - /// Declares that a type with the given parameters is the next item in `bytes`. - pub fn register_type_parameterized( - &mut self, - is_ssz_fixed_len: bool, - ssz_fixed_len: usize, - ) -> Result<(), DecodeError> { - if is_ssz_fixed_len { - let start = self.items_index; - self.items_index += ssz_fixed_len; - - let slice = - self.bytes - .get(start..self.items_index) - .ok_or(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - })?; - - self.items.push(slice); - } else { - self.offsets.push(Offset { - position: self.items.len(), - offset: sanitize_offset( - read_offset(&self.bytes[self.items_index..])?, - self.offsets.last().map(|o| o.offset), - self.bytes.len(), - None, - )?, - }); - - // Push an empty slice into items; it will be replaced later. - self.items.push(&[]); - - self.items_index += BYTES_PER_LENGTH_OFFSET; - } - - Ok(()) - } - - fn finalize(&mut self) -> Result<(), DecodeError> { - if let Some(first_offset) = self.offsets.first().map(|o| o.offset) { - // Check to ensure the first offset points to the byte immediately following the - // fixed-length bytes. - match first_offset.cmp(&self.items_index) { - Ordering::Less => return Err(DecodeError::OffsetIntoFixedPortion(first_offset)), - Ordering::Greater => { - return Err(DecodeError::OffsetSkipsVariableBytes(first_offset)) - } - Ordering::Equal => (), - } - - // Iterate through each pair of offsets, grabbing the slice between each of the offsets. - for pair in self.offsets.windows(2) { - let a = pair[0]; - let b = pair[1]; - - self.items[a.position] = &self.bytes[a.offset..b.offset]; - } - - // Handle the last offset, pushing a slice from it's start through to the end of - // `self.bytes`. - if let Some(last) = self.offsets.last() { - self.items[last.position] = &self.bytes[last.offset..] - } - } else { - // If the container is fixed-length, ensure there are no excess bytes. - if self.items_index != self.bytes.len() { - return Err(DecodeError::InvalidByteLength { - len: self.bytes.len(), - expected: self.items_index, - }); - } - } - - Ok(()) - } - - /// Finalizes the builder, returning a `SszDecoder` that may be used to instantiate objects. - pub fn build(mut self) -> Result, DecodeError> { - self.finalize()?; - - Ok(SszDecoder { items: self.items }) - } -} - -/// Decodes some slices of SSZ into object instances. Should be instantiated using -/// [`SszDecoderBuilder`](struct.SszDecoderBuilder.html). -/// -/// ## Example -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszDecoder, SszDecoderBuilder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_decoding_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let bytes = foo.as_ssz_bytes(); -/// -/// let mut builder = SszDecoderBuilder::new(&bytes); -/// -/// builder.register_type::().unwrap(); -/// builder.register_type::>().unwrap(); -/// -/// let mut decoder = builder.build().unwrap(); -/// -/// let decoded_foo = Foo { -/// a: decoder.decode_next().unwrap(), -/// b: decoder.decode_next().unwrap(), -/// }; -/// -/// assert_eq!(foo, decoded_foo); -/// } -/// -/// ``` -pub struct SszDecoder<'a> { - items: SmallVec8<&'a [u8]>, -} - -impl<'a> SszDecoder<'a> { - /// Decodes the next item. - /// - /// # Panics - /// - /// Panics when attempting to decode more items than actually exist. - pub fn decode_next(&mut self) -> Result { - self.decode_next_with(|slice| T::from_ssz_bytes(slice)) - } - - /// Decodes the next item using the provided function. - pub fn decode_next_with(&mut self, f: F) -> Result - where - F: FnOnce(&'a [u8]) -> Result, - { - f(self.items.remove(0)) - } -} - -/// Takes `bytes`, assuming it is the encoding for a SSZ union, and returns the union-selector and -/// the body (trailing bytes). -/// -/// ## Errors -/// -/// Returns an error if: -/// -/// - `bytes` is empty. -/// - the union selector is not a valid value (i.e., larger than the maximum number of variants. -pub fn split_union_bytes(bytes: &[u8]) -> Result<(UnionSelector, &[u8]), DecodeError> { - let selector = bytes - .first() - .copied() - .ok_or(DecodeError::OutOfBoundsByte { i: 0 }) - .and_then(UnionSelector::new)?; - let body = bytes - .get(1..) - .ok_or(DecodeError::OutOfBoundsByte { i: 1 })?; - Ok((selector, body)) -} - -/// Reads a `BYTES_PER_LENGTH_OFFSET`-byte length from `bytes`, where `bytes.len() >= -/// BYTES_PER_LENGTH_OFFSET`. -pub fn read_offset(bytes: &[u8]) -> Result { - decode_offset(bytes.get(0..BYTES_PER_LENGTH_OFFSET).ok_or( - DecodeError::InvalidLengthPrefix { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }, - )?) -} - -/// Decode bytes as a little-endian usize, returning an `Err` if `bytes.len() != -/// BYTES_PER_LENGTH_OFFSET`. -fn decode_offset(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = BYTES_PER_LENGTH_OFFSET; - - if len != expected { - Err(DecodeError::InvalidLengthPrefix { len, expected }) - } else { - let mut array: [u8; BYTES_PER_LENGTH_OFFSET] = std::default::Default::default(); - array.clone_from_slice(bytes); - - Ok(u32::from_le_bytes(array) as usize) - } -} diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs deleted file mode 100644 index f58ecd3b8..000000000 --- a/consensus/ssz/src/decode/impls.rs +++ /dev/null @@ -1,775 +0,0 @@ -use super::*; -use crate::decode::try_from_iter::{TryCollect, TryFromIter}; -use core::num::NonZeroUsize; -use ethereum_types::{H160, H256, U128, U256}; -use itertools::process_results; -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::iter::{self, FromIterator}; -use std::sync::Arc; - -macro_rules! impl_decodable_for_uint { - ($type: ident, $bit_size: expr) => { - impl Decode for $type { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $bit_size / 8 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - let mut array: [u8; $bit_size / 8] = std::default::Default::default(); - array.clone_from_slice(bytes); - - Ok(Self::from_le_bytes(array)) - } - } - } - }; -} - -impl_decodable_for_uint!(u8, 8); -impl_decodable_for_uint!(u16, 16); -impl_decodable_for_uint!(u32, 32); -impl_decodable_for_uint!(u64, 64); - -#[cfg(target_pointer_width = "32")] -impl_decodable_for_uint!(usize, 32); - -#[cfg(target_pointer_width = "64")] -impl_decodable_for_uint!(usize, 64); - -macro_rules! impl_decode_for_tuples { - ($( - $Tuple:ident { - $(($idx:tt) -> $T:ident)+ - } - )+) => { - $( - impl<$($T: Decode),+> Decode for ($($T,)+) { - fn is_ssz_fixed_len() -> bool { - $( - <$T as Decode>::is_ssz_fixed_len() && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - $( - <$T as Decode>::ssz_fixed_len() + - )* - 0 - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = SszDecoderBuilder::new(bytes); - - $( - builder.register_type::<$T>()?; - )* - - let mut decoder = builder.build()?; - - Ok(($( - decoder.decode_next::<$T>()?, - )* - )) - } - } - )+ - } -} - -impl_decode_for_tuples! { - Tuple2 { - (0) -> A - (1) -> B - } - Tuple3 { - (0) -> A - (1) -> B - (2) -> C - } - Tuple4 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - } - Tuple5 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - } - Tuple6 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - } - Tuple7 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - } - Tuple8 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - } - Tuple9 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - } - Tuple10 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - } - Tuple11 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - } - Tuple12 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - (11) -> L - } -} - -impl Decode for bool { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - match bytes[0] { - 0b0000_0000 => Ok(false), - 0b0000_0001 => Ok(true), - _ => Err(DecodeError::BytesInvalid(format!( - "Out-of-range for boolean: {}", - bytes[0] - ))), - } - } - } -} - -impl Decode for NonZeroUsize { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let x = usize::from_ssz_bytes(bytes)?; - - if x == 0 { - Err(DecodeError::BytesInvalid( - "NonZeroUsize cannot be zero.".to_string(), - )) - } else { - // `unwrap` is safe here as `NonZeroUsize::new()` succeeds if `x > 0` and this path - // never executes when `x == 0`. - Ok(NonZeroUsize::new(x).unwrap()) - } - } -} - -impl Decode for Option { - fn is_ssz_fixed_len() -> bool { - false - } - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let (selector, body) = split_union_bytes(bytes)?; - match selector.into() { - 0u8 => Ok(None), - 1u8 => ::from_ssz_bytes(body).map(Option::Some), - other => Err(DecodeError::UnionSelectorInvalid(other)), - } - } -} - -impl Decode for Arc { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - T::from_ssz_bytes(bytes).map(Arc::new) - } -} - -impl Decode for H160 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 20 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(Self::from_slice(bytes)) - } - } -} - -impl Decode for H256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(H256::from_slice(bytes)) - } - } -} - -impl Decode for U256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(U256::from_little_endian(bytes)) - } - } -} - -impl Decode for U128 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 16 - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - Ok(U128::from_little_endian(bytes)) - } - } -} - -macro_rules! impl_decodable_for_u8_array { - ($len: expr) => { - impl Decode for [u8; $len] { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $len - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let len = bytes.len(); - let expected = ::ssz_fixed_len(); - - if len != expected { - Err(DecodeError::InvalidByteLength { len, expected }) - } else { - let mut array: [u8; $len] = [0; $len]; - array.copy_from_slice(bytes); - - Ok(array) - } - } - } - }; -} - -impl_decodable_for_u8_array!(4); -impl_decodable_for_u8_array!(32); -impl_decodable_for_u8_array!(48); - -macro_rules! impl_for_vec { - ($type: ty, $max_len: expr) => { - impl Decode for $type { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if T::is_ssz_fixed_len() { - bytes - .chunks(T::ssz_fixed_len()) - .map(T::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, $max_len) - } - } - } - }; -} - -impl_for_vec!(Vec, None); -impl_for_vec!(SmallVec<[T; 1]>, None); -impl_for_vec!(SmallVec<[T; 2]>, None); -impl_for_vec!(SmallVec<[T; 3]>, None); -impl_for_vec!(SmallVec<[T; 4]>, None); -impl_for_vec!(SmallVec<[T; 5]>, None); -impl_for_vec!(SmallVec<[T; 6]>, None); -impl_for_vec!(SmallVec<[T; 7]>, None); -impl_for_vec!(SmallVec<[T; 8]>, None); - -impl Decode for BTreeMap -where - K: Decode + Ord, - V: Decode, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if <(K, V)>::is_ssz_fixed_len() { - bytes - .chunks(<(K, V)>::ssz_fixed_len()) - .map(<(K, V)>::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, None) - } - } -} - -impl Decode for BTreeSet -where - T: Decode + Ord, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(Self::from_iter(iter::empty())) - } else if T::is_ssz_fixed_len() { - bytes - .chunks(T::ssz_fixed_len()) - .map(T::from_ssz_bytes) - .collect() - } else { - decode_list_of_variable_length_items(bytes, None) - } - } -} - -/// Decodes `bytes` as if it were a list of variable-length items. -/// -/// The `ssz::SszDecoder` can also perform this functionality, however this function is -/// significantly faster as it is optimized to read same-typed items whilst `ssz::SszDecoder` -/// supports reading items of differing types. -pub fn decode_list_of_variable_length_items>( - bytes: &[u8], - max_len: Option, -) -> Result { - if bytes.is_empty() { - return Container::try_from_iter(iter::empty()).map_err(|e| { - DecodeError::BytesInvalid(format!("Error trying to collect empty list: {e:?}")) - }); - } - - let first_offset = read_offset(bytes)?; - sanitize_offset(first_offset, None, bytes.len(), Some(first_offset))?; - - if first_offset % BYTES_PER_LENGTH_OFFSET != 0 || first_offset < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidListFixedBytesLen(first_offset)); - } - - let num_items = first_offset / BYTES_PER_LENGTH_OFFSET; - - if max_len.map_or(false, |max| num_items > max) { - return Err(DecodeError::BytesInvalid(format!( - "Variable length list of {num_items} items exceeds maximum of {max_len:?}", - ))); - } - - let mut offset = first_offset; - process_results( - (1..=num_items).map(|i| { - let slice_option = if i == num_items { - bytes.get(offset..) - } else { - let start = offset; - - let next_offset = read_offset(&bytes[(i * BYTES_PER_LENGTH_OFFSET)..])?; - offset = - sanitize_offset(next_offset, Some(offset), bytes.len(), Some(first_offset))?; - - bytes.get(start..offset) - }; - - let slice = slice_option.ok_or(DecodeError::OutOfBoundsByte { i: offset })?; - T::from_ssz_bytes(slice) - }), - |iter| iter.try_collect(), - )? - .map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {e:?}"))) -} - -#[cfg(test)] -mod tests { - use super::*; - - // Note: decoding of valid bytes is generally tested "indirectly" in the `/tests` dir, by - // encoding then decoding the element. - - #[test] - fn invalid_u8_array_4() { - assert_eq!( - <[u8; 4]>::from_ssz_bytes(&[0; 3]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 4 - }) - ); - - assert_eq!( - <[u8; 4]>::from_ssz_bytes(&[0; 5]), - Err(DecodeError::InvalidByteLength { - len: 5, - expected: 4 - }) - ); - } - - #[test] - fn invalid_bool() { - assert_eq!( - bool::from_ssz_bytes(&[0; 2]), - Err(DecodeError::InvalidByteLength { - len: 2, - expected: 1 - }) - ); - - assert_eq!( - bool::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 1 - }) - ); - - if let Err(DecodeError::BytesInvalid(_)) = bool::from_ssz_bytes(&[2]) { - // Success. - } else { - panic!("Did not return error on invalid bool val") - } - } - - #[test] - fn invalid_h256() { - assert_eq!( - H256::from_ssz_bytes(&[0; 33]), - Err(DecodeError::InvalidByteLength { - len: 33, - expected: 32 - }) - ); - - assert_eq!( - H256::from_ssz_bytes(&[0; 31]), - Err(DecodeError::InvalidByteLength { - len: 31, - expected: 32 - }) - ); - } - - #[test] - fn empty_list() { - let vec: Vec> = vec![]; - let bytes = vec.as_ssz_bytes(); - assert!(bytes.is_empty()); - assert_eq!(Vec::from_ssz_bytes(&bytes), Ok(vec),); - } - - #[test] - fn first_length_points_backwards() { - assert_eq!( - >>::from_ssz_bytes(&[0, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(0)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[1, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(1)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[2, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(2)) - ); - - assert_eq!( - >>::from_ssz_bytes(&[3, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(3)) - ); - } - - #[test] - fn lengths_are_decreasing() { - assert_eq!( - >>::from_ssz_bytes(&[12, 0, 0, 0, 14, 0, 0, 0, 12, 0, 0, 0, 1, 0, 1, 0]), - Err(DecodeError::OffsetsAreDecreasing(12)) - ); - } - - #[test] - fn awkward_fixed_length_portion() { - assert_eq!( - >>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]), - Err(DecodeError::InvalidListFixedBytesLen(10)) - ); - } - - #[test] - fn length_out_of_bounds() { - assert_eq!( - >>::from_ssz_bytes(&[5, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(5)) - ); - assert_eq!( - >>::from_ssz_bytes(&[8, 0, 0, 0, 9, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(9)) - ); - assert_eq!( - >>::from_ssz_bytes(&[8, 0, 0, 0, 16, 0, 0, 0]), - Err(DecodeError::OffsetOutOfBounds(16)) - ); - } - - #[test] - fn vec_of_vec_of_u16() { - assert_eq!( - >>::from_ssz_bytes(&[4, 0, 0, 0]), - Ok(vec![vec![]]) - ); - - assert_eq!( - >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), - Ok(vec![0, 1, 2, 3]) - ); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn vec_of_u16() { - assert_eq!(>::from_ssz_bytes(&[0, 0, 0, 0]), Ok(vec![0, 0])); - assert_eq!( - >::from_ssz_bytes(&[0, 0, 1, 0, 2, 0, 3, 0]), - Ok(vec![0, 1, 2, 3]) - ); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn u16() { - assert_eq!(::from_ssz_bytes(&[0, 0]), Ok(0)); - assert_eq!(::from_ssz_bytes(&[16, 0]), Ok(16)); - assert_eq!(::from_ssz_bytes(&[0, 1]), Ok(256)); - assert_eq!(::from_ssz_bytes(&[255, 255]), Ok(65535)); - - assert_eq!( - ::from_ssz_bytes(&[255]), - Err(DecodeError::InvalidByteLength { - len: 1, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[]), - Err(DecodeError::InvalidByteLength { - len: 0, - expected: 2 - }) - ); - - assert_eq!( - ::from_ssz_bytes(&[0, 1, 2]), - Err(DecodeError::InvalidByteLength { - len: 3, - expected: 2 - }) - ); - } - - #[test] - fn tuple() { - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 0, 0, 0]), Ok((0, 0))); - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[16, 0, 17, 0]), Ok((16, 17))); - assert_eq!(<(u16, u16)>::from_ssz_bytes(&[0, 1, 2, 0]), Ok((256, 2))); - assert_eq!( - <(u16, u16)>::from_ssz_bytes(&[255, 255, 0, 0]), - Ok((65535, 0)) - ); - } -} diff --git a/consensus/ssz/src/decode/try_from_iter.rs b/consensus/ssz/src/decode/try_from_iter.rs deleted file mode 100644 index 1ff89a107..000000000 --- a/consensus/ssz/src/decode/try_from_iter.rs +++ /dev/null @@ -1,103 +0,0 @@ -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::convert::Infallible; -use std::fmt::Debug; - -/// Partial variant of `std::iter::FromIterator`. -/// -/// This trait is implemented for types which can be constructed from an iterator of decoded SSZ -/// values, but which may refuse values once a length limit is reached. -pub trait TryFromIter: Sized { - type Error: Debug; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator; -} - -// It would be nice to be able to do a blanket impl, e.g. -// -// `impl TryFromIter for C where C: FromIterator` -// -// However this runs into trait coherence issues due to the type parameter `T` on `TryFromIter`. -// -// E.g. If we added an impl downstream for `List` then another crate downstream of that -// could legally add an impl of `FromIterator for List` which would create -// two conflicting implementations for `List`. Hence the `List` impl is disallowed -// by the compiler in the presence of the blanket impl. That's obviously annoying, so we opt to -// abandon the blanket impl in favour of impls for selected types. -impl TryFromIter for Vec { - type Error = Infallible; - - fn try_from_iter(values: I) -> Result - where - I: IntoIterator, - { - // Pre-allocate the expected size of the Vec, which is parsed from the SSZ input bytes as - // `num_items`. This length has already been checked to be less than or equal to the type's - // maximum length in `decode_list_of_variable_length_items`. - let iter = values.into_iter(); - let (_, opt_max_len) = iter.size_hint(); - let mut vec = Vec::with_capacity(opt_max_len.unwrap_or(0)); - vec.extend(iter); - Ok(vec) - } -} - -impl TryFromIter for SmallVec<[T; N]> { - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter<(K, V)> for BTreeMap -where - K: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -impl TryFromIter for BTreeSet -where - T: Ord, -{ - type Error = Infallible; - - fn try_from_iter(iter: I) -> Result - where - I: IntoIterator, - { - Ok(Self::from_iter(iter)) - } -} - -/// Partial variant of `collect`. -pub trait TryCollect: Iterator { - fn try_collect(self) -> Result - where - C: TryFromIter; -} - -impl TryCollect for I -where - I: Iterator, -{ - fn try_collect(self) -> Result - where - C: TryFromIter, - { - C::try_from_iter(self) - } -} diff --git a/consensus/ssz/src/encode.rs b/consensus/ssz/src/encode.rs deleted file mode 100644 index a46ef80e0..000000000 --- a/consensus/ssz/src/encode.rs +++ /dev/null @@ -1,196 +0,0 @@ -use super::*; - -mod impls; - -/// Provides SSZ encoding (serialization) via the `as_ssz_bytes(&self)` method. -/// -/// See `examples/` for manual implementations or the crate root for implementations using -/// `#[derive(Encode)]`. -pub trait Encode { - /// Returns `true` if this object has a fixed-length. - /// - /// I.e., there are no variable length items in this object or any of it's contained objects. - fn is_ssz_fixed_len() -> bool; - - /// Append the encoding `self` to `buf`. - /// - /// Note, variable length objects need only to append their "variable length" portion, they do - /// not need to provide their offset. - fn ssz_append(&self, buf: &mut Vec); - - /// The number of bytes this object occupies in the fixed-length portion of the SSZ bytes. - /// - /// By default, this is set to `BYTES_PER_LENGTH_OFFSET` which is suitable for variable length - /// objects, but not fixed-length objects. Fixed-length objects _must_ return a value which - /// represents their length. - fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - /// Returns the size (in bytes) when `self` is serialized. - /// - /// Returns the same value as `self.as_ssz_bytes().len()` but this method is significantly more - /// efficient. - fn ssz_bytes_len(&self) -> usize; - - /// Returns the full-form encoding of this object. - /// - /// The default implementation of this method should suffice for most cases. - fn as_ssz_bytes(&self) -> Vec { - let mut buf = vec![]; - - self.ssz_append(&mut buf); - - buf - } -} - -/// Allow for encoding an ordered series of distinct or indistinct objects as SSZ bytes. -/// -/// **You must call `finalize(..)` after the final `append(..)` call** to ensure the bytes are -/// written to `buf`. -/// -/// ## Example -/// -/// Use `SszEncoder` to produce identical output to `foo.as_ssz_bytes()`: -/// -/// ```rust -/// use ssz_derive::{Encode, Decode}; -/// use ssz::{Decode, Encode, SszEncoder}; -/// -/// #[derive(PartialEq, Debug, Encode, Decode)] -/// struct Foo { -/// a: u64, -/// b: Vec, -/// } -/// -/// fn ssz_encode_example() { -/// let foo = Foo { -/// a: 42, -/// b: vec![1, 3, 3, 7] -/// }; -/// -/// let mut buf: Vec = vec![]; -/// let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); -/// -/// let mut encoder = SszEncoder::container(&mut buf, offset); -/// -/// encoder.append(&foo.a); -/// encoder.append(&foo.b); -/// -/// encoder.finalize(); -/// -/// assert_eq!(foo.as_ssz_bytes(), buf); -/// } -/// -/// ``` -pub struct SszEncoder<'a> { - offset: usize, - buf: &'a mut Vec, - variable_bytes: Vec, -} - -impl<'a> SszEncoder<'a> { - /// Instantiate a new encoder for encoding a SSZ container. - pub fn container(buf: &'a mut Vec, num_fixed_bytes: usize) -> Self { - buf.reserve(num_fixed_bytes); - - Self { - offset: num_fixed_bytes, - buf, - variable_bytes: vec![], - } - } - - /// Append some `item` to the SSZ bytes. - pub fn append(&mut self, item: &T) { - self.append_parameterized(T::is_ssz_fixed_len(), |buf| item.ssz_append(buf)) - } - - /// Uses `ssz_append` to append the encoding of some item to the SSZ bytes. - pub fn append_parameterized(&mut self, is_ssz_fixed_len: bool, ssz_append: F) - where - F: Fn(&mut Vec), - { - if is_ssz_fixed_len { - ssz_append(self.buf); - } else { - self.buf - .extend_from_slice(&encode_length(self.offset + self.variable_bytes.len())); - - ssz_append(&mut self.variable_bytes); - } - } - - /// Write the variable bytes to `self.bytes`. - /// - /// This method must be called after the final `append(..)` call when serializing - /// variable-length items. - pub fn finalize(&mut self) -> &mut Vec { - self.buf.append(&mut self.variable_bytes); - - self.buf - } -} - -/// Encode `len` as a little-endian byte array of `BYTES_PER_LENGTH_OFFSET` length. -/// -/// If `len` is larger than `2 ^ BYTES_PER_LENGTH_OFFSET`, a `debug_assert` is raised. -pub fn encode_length(len: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - // Note: it is possible for `len` to be larger than what can be encoded in - // `BYTES_PER_LENGTH_OFFSET` bytes, triggering this debug assertion. - // - // These are the alternatives to using a `debug_assert` here: - // - // 1. Use `assert`. - // 2. Push an error to the caller (e.g., `Option` or `Result`). - // 3. Ignore it completely. - // - // I have avoided (1) because it's basically a choice between "produce invalid SSZ" or "kill - // the entire program". I figure it may be possible for an attacker to trigger this assert and - // take the program down -- I think producing invalid SSZ is a better option than this. - // - // I have avoided (2) because this error will need to be propagated upstream, making encoding a - // function which may fail. I don't think this is ergonomic and the upsides don't outweigh the - // downsides. - // - // I figure a `debug_assertion` is better than (3) as it will give us a change to detect the - // error during testing. - // - // If you have a different opinion, feel free to start an issue and tag @paulhauner. - debug_assert!(len <= MAX_LENGTH_VALUE); - - let mut bytes = [0; BYTES_PER_LENGTH_OFFSET]; - bytes.copy_from_slice(&len.to_le_bytes()[0..BYTES_PER_LENGTH_OFFSET]); - bytes -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_encode_length() { - assert_eq!(encode_length(0), [0; 4]); - - assert_eq!(encode_length(1), [1, 0, 0, 0]); - - assert_eq!( - encode_length(MAX_LENGTH_VALUE), - [255; BYTES_PER_LENGTH_OFFSET] - ); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn test_encode_length_above_max_debug_panics() { - encode_length(MAX_LENGTH_VALUE + 1); - } - - #[test] - #[cfg(not(debug_assertions))] - fn test_encode_length_above_max_not_debug_does_not_panic() { - assert_eq!(&encode_length(MAX_LENGTH_VALUE + 1)[..], &[0; 4]); - } -} diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs deleted file mode 100644 index 8c609d939..000000000 --- a/consensus/ssz/src/encode/impls.rs +++ /dev/null @@ -1,633 +0,0 @@ -use super::*; -use core::num::NonZeroUsize; -use ethereum_types::{H160, H256, U128, U256}; -use smallvec::SmallVec; -use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; - -macro_rules! impl_encodable_for_uint { - ($type: ident, $bit_size: expr) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $bit_size / 8 - } - - fn ssz_bytes_len(&self) -> usize { - $bit_size / 8 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.to_le_bytes()); - } - } - }; -} - -impl_encodable_for_uint!(u8, 8); -impl_encodable_for_uint!(u16, 16); -impl_encodable_for_uint!(u32, 32); -impl_encodable_for_uint!(u64, 64); - -#[cfg(target_pointer_width = "32")] -impl_encodable_for_uint!(usize, 32); - -#[cfg(target_pointer_width = "64")] -impl_encodable_for_uint!(usize, 64); - -// Based on the `tuple_impls` macro from the standard library. -macro_rules! impl_encode_for_tuples { - ($( - $Tuple:ident { - $(($idx:tt) -> $T:ident)+ - } - )+) => { - $( - impl<$($T: Encode),+> Encode for ($($T,)+) { - fn is_ssz_fixed_len() -> bool { - $( - <$T as Encode>::is_ssz_fixed_len() && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - $( - <$T as Encode>::ssz_fixed_len() + - )* - 0 - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() - } else { - let mut len = 0; - $( - len += if <$T as Encode>::is_ssz_fixed_len() { - <$T as Encode>::ssz_fixed_len() - } else { - BYTES_PER_LENGTH_OFFSET + - self.$idx.ssz_bytes_len() - }; - )* - len - } - } - - fn ssz_append(&self, buf: &mut Vec) { - let offset = $( - <$T as Encode>::ssz_fixed_len() + - )* - 0; - - let mut encoder = SszEncoder::container(buf, offset); - - $( - encoder.append(&self.$idx); - )* - - encoder.finalize(); - } - } - )+ - } -} - -impl_encode_for_tuples! { - Tuple2 { - (0) -> A - (1) -> B - } - Tuple3 { - (0) -> A - (1) -> B - (2) -> C - } - Tuple4 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - } - Tuple5 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - } - Tuple6 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - } - Tuple7 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - } - Tuple8 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - } - Tuple9 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - } - Tuple10 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - } - Tuple11 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - } - Tuple12 { - (0) -> A - (1) -> B - (2) -> C - (3) -> D - (4) -> E - (5) -> F - (6) -> G - (7) -> H - (8) -> I - (9) -> J - (10) -> K - (11) -> L - } -} - -impl Encode for Option { - fn is_ssz_fixed_len() -> bool { - false - } - fn ssz_append(&self, buf: &mut Vec) { - match self { - Option::None => { - let union_selector: u8 = 0u8; - buf.push(union_selector); - } - Option::Some(ref inner) => { - let union_selector: u8 = 1u8; - buf.push(union_selector); - inner.ssz_append(buf); - } - } - } - fn ssz_bytes_len(&self) -> usize { - match self { - Option::None => 1usize, - Option::Some(ref inner) => inner - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max_value"), - } - } -} - -impl Encode for Arc { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.as_ref().ssz_append(buf) - } - - fn ssz_bytes_len(&self) -> usize { - self.as_ref().ssz_bytes_len() - } -} - -// Encode transparently through references. -impl<'a, T: Encode> Encode for &'a T { - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - T::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - T::ssz_append(self, buf) - } - - fn ssz_bytes_len(&self) -> usize { - T::ssz_bytes_len(self) - } -} - -/// Compute the encoded length of a vector-like sequence of `T`. -pub fn sequence_ssz_bytes_len(iter: I) -> usize -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - // Compute length before doing any iteration. - let length = iter.len(); - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() * length - } else { - let mut len = iter.map(|item| item.ssz_bytes_len()).sum(); - len += BYTES_PER_LENGTH_OFFSET * length; - len - } -} - -/// Encode a vector-like sequence of `T`. -pub fn sequence_ssz_append(iter: I, buf: &mut Vec) -where - I: Iterator + ExactSizeIterator, - T: Encode, -{ - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * iter.len()); - - for item in iter { - item.ssz_append(buf); - } - } else { - let mut encoder = SszEncoder::container(buf, iter.len() * BYTES_PER_LENGTH_OFFSET); - - for item in iter { - encoder.append(&item); - } - - encoder.finalize(); - } -} - -macro_rules! impl_for_vec { - ($type: ty) => { - impl Encode for $type { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } - } - }; -} - -impl_for_vec!(Vec); -impl_for_vec!(SmallVec<[T; 1]>); -impl_for_vec!(SmallVec<[T; 2]>); -impl_for_vec!(SmallVec<[T; 3]>); -impl_for_vec!(SmallVec<[T; 4]>); -impl_for_vec!(SmallVec<[T; 5]>); -impl_for_vec!(SmallVec<[T; 6]>); -impl_for_vec!(SmallVec<[T; 7]>); -impl_for_vec!(SmallVec<[T; 8]>); - -impl Encode for BTreeMap -where - K: Encode + Ord, - V: Encode, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for BTreeSet -where - T: Encode + Ord, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - sequence_ssz_bytes_len(self.iter()) - } - - fn ssz_append(&self, buf: &mut Vec) { - sequence_ssz_append(self.iter(), buf) - } -} - -impl Encode for bool { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn ssz_bytes_len(&self) -> usize { - 1 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&(*self as u8).to_le_bytes()); - } -} - -impl Encode for NonZeroUsize { - fn is_ssz_fixed_len() -> bool { - ::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - ::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - std::mem::size_of::() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.get().ssz_append(buf) - } -} - -impl Encode for H160 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 20 - } - - fn ssz_bytes_len(&self) -> usize { - 20 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for H256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(self.as_bytes()); - } -} - -impl Encode for U256 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 32 - } - - fn ssz_bytes_len(&self) -> usize { - 32 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -impl Encode for U128 { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 16 - } - - fn ssz_bytes_len(&self) -> usize { - 16 - } - - fn ssz_append(&self, buf: &mut Vec) { - let n = ::ssz_fixed_len(); - let s = buf.len(); - - buf.resize(s + n, 0); - self.to_little_endian(&mut buf[s..]); - } -} - -macro_rules! impl_encodable_for_u8_array { - ($len: expr) => { - impl Encode for [u8; $len] { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - $len - } - - fn ssz_bytes_len(&self) -> usize { - $len - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self[..]); - } - } - }; -} - -impl_encodable_for_u8_array!(4); -impl_encodable_for_u8_array!(32); -impl_encodable_for_u8_array!(48); - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn vec_of_u8() { - let vec: Vec = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec = vec![1]; - assert_eq!(vec.as_ssz_bytes(), vec![1]); - - let vec: Vec = vec![0, 1, 2, 3]; - assert_eq!(vec.as_ssz_bytes(), vec![0, 1, 2, 3]); - } - - #[test] - fn vec_of_vec_of_u8() { - let vec: Vec> = vec![]; - assert_eq!(vec.as_ssz_bytes(), vec![]); - - let vec: Vec> = vec![vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![4, 0, 0, 0]); - - let vec: Vec> = vec![vec![], vec![]]; - assert_eq!(vec.as_ssz_bytes(), vec![8, 0, 0, 0, 8, 0, 0, 0]); - - let vec: Vec> = vec![vec![0, 1, 2], vec![11, 22, 33]]; - assert_eq!( - vec.as_ssz_bytes(), - vec![8, 0, 0, 0, 11, 0, 0, 0, 0, 1, 2, 11, 22, 33] - ); - } - - #[test] - fn ssz_encode_u8() { - assert_eq!(0_u8.as_ssz_bytes(), vec![0]); - assert_eq!(1_u8.as_ssz_bytes(), vec![1]); - assert_eq!(100_u8.as_ssz_bytes(), vec![100]); - assert_eq!(255_u8.as_ssz_bytes(), vec![255]); - } - - #[test] - fn ssz_encode_u16() { - assert_eq!(1_u16.as_ssz_bytes(), vec![1, 0]); - assert_eq!(100_u16.as_ssz_bytes(), vec![100, 0]); - assert_eq!((1_u16 << 8).as_ssz_bytes(), vec![0, 1]); - assert_eq!(65535_u16.as_ssz_bytes(), vec![255, 255]); - } - - #[test] - fn ssz_encode_u32() { - assert_eq!(1_u32.as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!(100_u32.as_ssz_bytes(), vec![100, 0, 0, 0]); - assert_eq!((1_u32 << 16).as_ssz_bytes(), vec![0, 0, 1, 0]); - assert_eq!((1_u32 << 24).as_ssz_bytes(), vec![0, 0, 0, 1]); - assert_eq!((!0_u32).as_ssz_bytes(), vec![255, 255, 255, 255]); - } - - #[test] - fn ssz_encode_u64() { - assert_eq!(1_u64.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_u64).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_usize() { - assert_eq!(1_usize.as_ssz_bytes(), vec![1, 0, 0, 0, 0, 0, 0, 0]); - assert_eq!( - (!0_usize).as_ssz_bytes(), - vec![255, 255, 255, 255, 255, 255, 255, 255] - ); - } - - #[test] - fn ssz_encode_option_u8() { - let opt: Option = None; - assert_eq!(opt.as_ssz_bytes(), vec![0]); - let opt: Option = Some(2); - assert_eq!(opt.as_ssz_bytes(), vec![1, 2]); - } - - #[test] - fn ssz_encode_bool() { - assert_eq!(true.as_ssz_bytes(), vec![1]); - assert_eq!(false.as_ssz_bytes(), vec![0]); - } - - #[test] - fn ssz_encode_h256() { - assert_eq!(H256::from(&[0; 32]).as_ssz_bytes(), vec![0; 32]); - assert_eq!(H256::from(&[1; 32]).as_ssz_bytes(), vec![1; 32]); - - let bytes = vec![ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]; - - assert_eq!(H256::from_slice(&bytes).as_ssz_bytes(), bytes); - } - - #[test] - fn ssz_encode_u8_array_4() { - assert_eq!([0, 0, 0, 0].as_ssz_bytes(), vec![0; 4]); - assert_eq!([1, 0, 0, 0].as_ssz_bytes(), vec![1, 0, 0, 0]); - assert_eq!([1, 2, 3, 4].as_ssz_bytes(), vec![1, 2, 3, 4]); - } - - #[test] - fn tuple() { - assert_eq!((10u8, 11u8).as_ssz_bytes(), vec![10, 11]); - assert_eq!((10u32, 11u8).as_ssz_bytes(), vec![10, 0, 0, 0, 11]); - assert_eq!((10u8, 11u8, 12u8).as_ssz_bytes(), vec![10, 11, 12]); - } -} diff --git a/consensus/ssz/src/legacy.rs b/consensus/ssz/src/legacy.rs deleted file mode 100644 index 4953db057..000000000 --- a/consensus/ssz/src/legacy.rs +++ /dev/null @@ -1,265 +0,0 @@ -//! Provides a "legacy" version of SSZ encoding for `Option where T: Encode + Decode`. -//! -//! The SSZ specification changed in 2021 to use a 1-byte union selector, instead of a 4-byte one -//! which was used in the Lighthouse database. -//! -//! Users can use the `four_byte_option_impl` macro to define a module that can be used with the -//! `#[ssz(with = "module")]`. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::four_byte_option_impl; -//! -//! four_byte_option_impl!(impl_for_u64, u64); -//! -//! #[derive(Encode, Decode)] -//! struct Foo { -//! #[ssz(with = "impl_for_u64")] -//! a: Option, -//! } -//! ``` - -use crate::*; - -#[macro_export] -macro_rules! four_byte_option_impl { - ($mod_name: ident, $type: ty) => { - #[allow(dead_code)] - mod $mod_name { - use super::*; - - pub mod encode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn ssz_bytes_len(opt: &Option<$type>) -> usize { - if let Some(some) = opt { - let len = if <$type as Encode>::is_ssz_fixed_len() { - <$type as Encode>::ssz_fixed_len() - } else { - <$type as Encode>::ssz_bytes_len(some) - }; - len + BYTES_PER_LENGTH_OFFSET - } else { - BYTES_PER_LENGTH_OFFSET - } - } - - pub fn ssz_append(opt: &Option<$type>, buf: &mut Vec) { - match opt { - None => buf.extend_from_slice(&legacy::encode_four_byte_union_selector(0)), - Some(t) => { - buf.extend_from_slice(&legacy::encode_four_byte_union_selector(1)); - t.ssz_append(buf); - } - } - } - - pub fn as_ssz_bytes(opt: &Option<$type>) -> Vec { - let mut buf = vec![]; - - ssz_append(opt, &mut buf); - - buf - } - } - - pub mod decode { - use super::*; - #[allow(unused_imports)] - use ssz::*; - - pub fn is_ssz_fixed_len() -> bool { - false - } - - pub fn ssz_fixed_len() -> usize { - BYTES_PER_LENGTH_OFFSET - } - - pub fn from_ssz_bytes(bytes: &[u8]) -> Result, DecodeError> { - if bytes.len() < BYTES_PER_LENGTH_OFFSET { - return Err(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: BYTES_PER_LENGTH_OFFSET, - }); - } - - let (index_bytes, value_bytes) = bytes.split_at(BYTES_PER_LENGTH_OFFSET); - - let index = legacy::read_four_byte_union_selector(index_bytes)?; - if index == 0 { - Ok(None) - } else if index == 1 { - Ok(Some(<$type as ssz::Decode>::from_ssz_bytes(value_bytes)?)) - } else { - Err(DecodeError::BytesInvalid(format!( - "{} is not a valid union index for Option", - index - ))) - } - } - } - } - }; -} - -pub fn encode_four_byte_union_selector(selector: usize) -> [u8; BYTES_PER_LENGTH_OFFSET] { - encode_length(selector) -} - -pub fn read_four_byte_union_selector(bytes: &[u8]) -> Result { - read_offset(bytes) -} - -#[cfg(test)] -mod test { - use super::*; - use crate as ssz; - use ssz_derive::{Decode, Encode}; - - type VecU16 = Vec; - - four_byte_option_impl!(impl_u16, u16); - four_byte_option_impl!(impl_vec_u16, VecU16); - - #[test] - fn ssz_encode_option_u16() { - let item = Some(65535_u16); - let bytes = vec![1, 0, 0, 0, 255, 255]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_u16::decode::from_ssz_bytes(&bytes).unwrap(), None); - } - - #[test] - fn ssz_encode_option_vec_u16() { - let item = Some(vec![0_u16, 1]); - let bytes = vec![1, 0, 0, 0, 0, 0, 1, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - - let item = None; - let bytes = vec![0, 0, 0, 0]; - assert_eq!(impl_vec_u16::encode::as_ssz_bytes(&item), bytes); - assert_eq!(impl_vec_u16::decode::from_ssz_bytes(&bytes).unwrap(), item); - } - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct TwoVariableLenOptions { - a: u16, - #[ssz(with = "impl_u16")] - b: Option, - #[ssz(with = "impl_vec_u16")] - c: Option>, - #[ssz(with = "impl_vec_u16")] - d: Option>, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn two_variable_len_options_encoding() { - let s = TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }; - - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 - // | option | offset | offset | option = vec![ - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: Some(vec![1]), - }, - TwoVariableLenOptions { - a: 42, - b: Some(12), - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: Some(vec![0]), - d: None, - }, - TwoVariableLenOptions { - a: 42, - b: None, - c: None, - d: None, - }, - ]; - - round_trip(vec); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } -} diff --git a/consensus/ssz/src/lib.rs b/consensus/ssz/src/lib.rs deleted file mode 100644 index e71157a3e..000000000 --- a/consensus/ssz/src/lib.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Provides encoding (serialization) and decoding (deserialization) in the SimpleSerialize (SSZ) -//! format designed for use in Ethereum 2.0. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! -//! ```rust -//! use ssz_derive::{Encode, Decode}; -//! use ssz::{Decode, Encode}; -//! -//! #[derive(PartialEq, Debug, Encode, Decode)] -//! struct Foo { -//! a: u64, -//! b: Vec, -//! } -//! -//! fn ssz_encode_decode_example() { -//! let foo = Foo { -//! a: 42, -//! b: vec![1, 3, 3, 7] -//! }; -//! -//! let ssz_bytes: Vec = foo.as_ssz_bytes(); -//! -//! let decoded_foo = Foo::from_ssz_bytes(&ssz_bytes).unwrap(); -//! -//! assert_eq!(foo, decoded_foo); -//! } -//! -//! ``` -//! -//! See `examples/` for manual implementations of the `Encode` and `Decode` traits. - -mod decode; -mod encode; -pub mod legacy; -mod union_selector; - -pub use decode::{ - impls::decode_list_of_variable_length_items, read_offset, split_union_bytes, - try_from_iter::TryFromIter, Decode, DecodeError, SszDecoder, SszDecoderBuilder, -}; -pub use encode::{encode_length, Encode, SszEncoder}; -pub use union_selector::UnionSelector; - -/// The number of bytes used to represent an offset. -pub const BYTES_PER_LENGTH_OFFSET: usize = 4; -/// The maximum value that can be represented using `BYTES_PER_LENGTH_OFFSET`. -#[cfg(target_pointer_width = "32")] -pub const MAX_LENGTH_VALUE: usize = (std::u32::MAX >> (8 * (4 - BYTES_PER_LENGTH_OFFSET))) as usize; -#[cfg(target_pointer_width = "64")] -pub const MAX_LENGTH_VALUE: usize = (std::u64::MAX >> (8 * (8 - BYTES_PER_LENGTH_OFFSET))) as usize; - -/// The number of bytes used to indicate the variant of a union. -pub const BYTES_PER_UNION_SELECTOR: usize = 1; -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -pub const MAX_UNION_SELECTOR: u8 = 127; - -/// Convenience function to SSZ encode an object supporting ssz::Encode. -/// -/// Equivalent to `val.as_ssz_bytes()`. -pub fn ssz_encode(val: &T) -> Vec -where - T: Encode, -{ - val.as_ssz_bytes() -} diff --git a/consensus/ssz/src/union_selector.rs b/consensus/ssz/src/union_selector.rs deleted file mode 100644 index 18bab094a..000000000 --- a/consensus/ssz/src/union_selector.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::*; - -/// Provides the one-byte "selector" from the SSZ union specification: -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union -#[derive(Copy, Clone)] -pub struct UnionSelector(u8); - -impl From for u8 { - fn from(union_selector: UnionSelector) -> u8 { - union_selector.0 - } -} - -impl PartialEq for UnionSelector { - fn eq(&self, other: &u8) -> bool { - self.0 == *other - } -} - -impl UnionSelector { - /// Instantiate `self`, returning an error if `selector > MAX_UNION_SELECTOR`. - pub fn new(selector: u8) -> Result { - Some(selector) - .filter(|_| selector <= MAX_UNION_SELECTOR) - .map(Self) - .ok_or(DecodeError::UnionSelectorInvalid(selector)) - } -} diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs deleted file mode 100644 index f52d2c5cd..000000000 --- a/consensus/ssz/tests/tests.rs +++ /dev/null @@ -1,390 +0,0 @@ -use ethereum_types::H256; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; - -mod round_trip { - use super::*; - use std::collections::BTreeMap; - use std::iter::FromIterator; - - fn round_trip(items: Vec) { - for item in items { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - } - - #[test] - fn bool() { - let items: Vec = vec![true, false]; - - round_trip(items); - } - - #[test] - fn option_u16() { - let items: Vec> = vec![None, Some(2u16)]; - - round_trip(items); - } - - #[test] - fn u8_array_4() { - let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; - - round_trip(items); - } - - #[test] - fn h256() { - let items: Vec = vec![H256::zero(), H256::from([1; 32]), H256::random()]; - - round_trip(items); - } - - #[test] - fn vec_of_h256() { - let items: Vec> = vec![ - vec![], - vec![H256::zero(), H256::from([1; 32]), H256::random()], - ]; - - round_trip(items); - } - - #[test] - fn option_vec_h256() { - let items: Vec>> = vec![ - None, - Some(vec![]), - Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]), - ]; - - round_trip(items); - } - - #[test] - fn vec_u16() { - let items: Vec> = vec![ - vec![], - vec![255], - vec![0, 1, 2], - vec![100; 64], - vec![255, 0, 255], - ]; - - round_trip(items); - } - - #[test] - fn vec_of_vec_u16() { - let items: Vec>> = vec![ - vec![], - vec![vec![]], - vec![vec![1, 2, 3]], - vec![vec![], vec![]], - vec![vec![], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![1, 2, 3]], - vec![vec![1, 2, 3], vec![], vec![1, 2, 3]], - vec![vec![], vec![], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - vec![vec![], vec![1], vec![1, 2, 3]], - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct FixedLen { - a: u16, - b: u64, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn fixed_len_struct_encoding() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - let expected_encodings = vec![ - // | u16--| u64----------------------------| u32----------| - vec![00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 01, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - vec![01, 00, 00, 00, 00, 00, 00, 00, 00, 00, 01, 00, 00, 00], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn fixed_len_excess_bytes() { - let fixed = FixedLen { a: 1, b: 2, c: 3 }; - - let mut bytes = fixed.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - assert_eq!( - FixedLen::from_ssz_bytes(&bytes), - Err(DecodeError::InvalidByteLength { - len: 15, - expected: 14, - }) - ); - } - - #[test] - fn vec_of_fixed_len_struct() { - let items: Vec = vec![ - FixedLen { a: 0, b: 0, c: 0 }, - FixedLen { a: 1, b: 1, c: 1 }, - FixedLen { a: 1, b: 0, c: 1 }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct VariableLen { - a: u16, - b: Vec, - c: u32, - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offset_into_fixed_bytes() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 09, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetIntoFixedPortion(9)) - ); - } - - #[test] - fn variable_len_excess_bytes() { - let variable = VariableLen { - a: 1, - b: vec![2], - c: 3, - }; - - let mut bytes = variable.as_ssz_bytes(); - bytes.append(&mut vec![0]); - - // The error message triggered is not so helpful, it's caught by a side-effect. Just - // checking there is _some_ error is fine. - assert!(VariableLen::from_ssz_bytes(&bytes).is_err()); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn first_offset_skips_byte() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | u32 | variable - 01, 00, 11, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ]; - - assert_eq!( - VariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetSkipsVariableBytes(11)) - ); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn variable_len_struct_encoding() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 1, - b: vec![0], - c: 1, - }, - VariableLen { - a: 1, - b: vec![0, 1, 2], - c: 1, - }, - ]; - - let expected_encodings = vec![ - // 00..................................09 - // | u16--| vec offset-----| u32------------| vec payload --------| - vec![00, 00, 10, 00, 00, 00, 00, 00, 00, 00], - vec![01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00], - vec![ - 01, 00, 10, 00, 00, 00, 01, 00, 00, 00, 00, 00, 01, 00, 02, 00, - ], - ]; - - for i in 0..items.len() { - assert_eq!( - items[i].as_ssz_bytes(), - expected_encodings[i], - "Failed on {}", - i - ); - } - } - - #[test] - fn vec_of_variable_len_struct() { - let items: Vec = vec![ - VariableLen { - a: 0, - b: vec![], - c: 0, - }, - VariableLen { - a: 255, - b: vec![0, 1, 2, 3], - c: 99, - }, - VariableLen { - a: 255, - b: vec![0], - c: 99, - }, - VariableLen { - a: 50, - b: vec![0], - c: 0, - }, - ]; - - round_trip(items); - } - - #[derive(Debug, PartialEq, Encode, Decode)] - struct ThreeVariableLen { - a: u16, - b: Vec, - c: Vec, - d: Vec, - } - - #[test] - fn three_variable_len() { - let vec: Vec = vec![ThreeVariableLen { - a: 42, - b: vec![0], - c: vec![1], - d: vec![2], - }]; - - round_trip(vec); - } - - #[test] - #[allow(clippy::zero_prefixed_literal)] - fn offsets_decreasing() { - let bytes = vec![ - // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 - // | offset | offset | offset | variable - 01, 00, 14, 00, 00, 00, 15, 00, 00, 00, 14, 00, 00, 00, 00, 00, - ]; - - assert_eq!( - ThreeVariableLen::from_ssz_bytes(&bytes), - Err(DecodeError::OffsetsAreDecreasing(14)) - ); - } - - #[test] - fn tuple_u8_u16() { - let vec: Vec<(u8, u16)> = vec![ - (0, 0), - (0, 1), - (1, 0), - (u8::max_value(), u16::max_value()), - (0, u16::max_value()), - (u8::max_value(), 0), - (42, 12301), - ]; - - round_trip(vec); - } - - #[test] - fn tuple_vec_vec() { - let vec: Vec<(u64, Vec, Vec>)> = vec![ - (0, vec![], vec![vec![]]), - (99, vec![101], vec![vec![], vec![]]), - ( - 42, - vec![12, 13, 14], - vec![vec![99, 98, 97, 96], vec![42, 44, 46, 48, 50]], - ), - ]; - - round_trip(vec); - } - - #[test] - fn btree_map_fixed() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![(0u8, 0u16), (1, 2), (2, 4), (4, 6)]), - ]; - round_trip(data); - } - - #[test] - fn btree_map_variable_value() { - let data = vec![ - BTreeMap::new(), - BTreeMap::from_iter(vec![ - ( - 0u64, - ThreeVariableLen { - a: 1, - b: vec![3, 5, 7], - c: vec![], - d: vec![0, 0], - }, - ), - ( - 1, - ThreeVariableLen { - a: 99, - b: vec![1], - c: vec![2, 3, 4, 5, 6, 7, 8, 9, 10], - d: vec![4, 5, 6, 7, 8], - }, - ), - ( - 2, - ThreeVariableLen { - a: 0, - b: vec![], - c: vec![], - d: vec![], - }, - ), - ]), - ]; - round_trip(data); - } -} diff --git a/consensus/ssz_derive/Cargo.toml b/consensus/ssz_derive/Cargo.toml deleted file mode 100644 index d3b2865a6..000000000 --- a/consensus/ssz_derive/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "eth2_ssz_derive" -version = "0.3.1" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the eth2_ssz crate." -license = "Apache-2.0" - -[lib] -name = "ssz_derive" -proc-macro = true - -[dependencies] -syn = "1.0.42" -proc-macro2 = "1.0.23" -quote = "1.0.7" -darling = "0.13.0" - -[dev-dependencies] -eth2_ssz = "0.4.1" diff --git a/consensus/ssz_derive/src/lib.rs b/consensus/ssz_derive/src/lib.rs deleted file mode 100644 index 280bdb83d..000000000 --- a/consensus/ssz_derive/src/lib.rs +++ /dev/null @@ -1,1124 +0,0 @@ -//! Provides procedural derive macros for the `Encode` and `Decode` traits of the `eth2_ssz` crate. -//! -//! ## Attributes -//! -//! The following struct/enum attributes are available: -//! -//! - `#[ssz(enum_behaviour = "tag")]`: encodes and decodes an `enum` with 0 fields per variant -//! - `#[ssz(enum_behaviour = "union")]`: encodes and decodes an `enum` with a one-byte variant selector. -//! - `#[ssz(enum_behaviour = "transparent")]`: allows encoding an `enum` by serializing only the -//! value whilst ignoring outermost the `enum`. -//! - `#[ssz(struct_behaviour = "container")]`: encodes and decodes the `struct` as an SSZ -//! "container". -//! - `#[ssz(struct_behaviour = "transparent")]`: encodes and decodes a `struct` with exactly one -//! non-skipped field as if the outermost `struct` does not exist. -//! -//! The following field attributes are available: -//! -//! - `#[ssz(with = "module")]`: uses the methods in `module` to implement `ssz::Encode` and -//! `ssz::Decode`. This is useful when it's not possible to create an `impl` for that type -//! (e.g. the type is defined in another crate). -//! - `#[ssz(skip_serializing)]`: this field will not be included in the serialized SSZ vector. -//! - `#[ssz(skip_deserializing)]`: this field will not be expected in the serialized -//! SSZ vector and it will be initialized from a `Default` implementation. -//! -//! ## Examples -//! -//! ### Structs -//! -//! ```rust -//! use ssz::{Encode, Decode}; -//! use ssz_derive::{Encode, Decode}; -//! -//! /// Represented as an SSZ "list" wrapped in an SSZ "container". -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "container")] // "container" is the default behaviour -//! struct TypicalStruct { -//! foo: Vec -//! } -//! -//! assert_eq!( -//! TypicalStruct { foo: vec![42] }.as_ssz_bytes(), -//! vec![4, 0, 0, 0, 42] -//! ); -//! -//! assert_eq!( -//! TypicalStruct::from_ssz_bytes(&[4, 0, 0, 0, 42]).unwrap(), -//! TypicalStruct { foo: vec![42] }, -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". -//! #[derive(Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct WrapperStruct { -//! foo: Vec -//! } -//! -//! assert_eq!( -//! WrapperStruct { foo: vec![42] }.as_ssz_bytes(), -//! vec![42] -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct WrapperStructSkippedField { -//! foo: Vec, -//! #[ssz(skip_serializing, skip_deserializing)] -//! bar: u8, -//! } -//! -//! assert_eq!( -//! WrapperStructSkippedField { foo: vec![42], bar: 99 }.as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! WrapperStructSkippedField::from_ssz_bytes(&[42]).unwrap(), -//! WrapperStructSkippedField { foo: vec![42], bar: 0 } -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". -//! #[derive(Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct NewType(Vec); -//! -//! assert_eq!( -//! NewType(vec![42]).as_ssz_bytes(), -//! vec![42] -//! ); -//! -//! /// Represented as an SSZ "list" *without* an SSZ "container". The `bar` byte is ignored. -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(struct_behaviour = "transparent")] -//! struct NewTypeSkippedField(Vec, #[ssz(skip_serializing, skip_deserializing)] u8); -//! -//! assert_eq!( -//! NewTypeSkippedField(vec![42], 99).as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! NewTypeSkippedField::from_ssz_bytes(&[42]).unwrap(), -//! NewTypeSkippedField(vec![42], 0) -//! ); -//! ``` -//! -//! ### Enums -//! -//! ```rust -//! use ssz::{Encode, Decode}; -//! use ssz_derive::{Encode, Decode}; -//! -//! /// Represented as an SSZ "union". -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(enum_behaviour = "union")] -//! enum UnionEnum { -//! Foo(u8), -//! Bar(Vec), -//! } -//! -//! assert_eq!( -//! UnionEnum::Foo(42).as_ssz_bytes(), -//! vec![0, 42] -//! ); -//! assert_eq!( -//! UnionEnum::from_ssz_bytes(&[1, 42, 42]).unwrap(), -//! UnionEnum::Bar(vec![42, 42]), -//! ); -//! -//! /// Represented as only the value in the enum variant. -//! #[derive(Debug, PartialEq, Encode)] -//! #[ssz(enum_behaviour = "transparent")] -//! enum TransparentEnum { -//! Foo(u8), -//! Bar(Vec), -//! } -//! -//! assert_eq!( -//! TransparentEnum::Foo(42).as_ssz_bytes(), -//! vec![42] -//! ); -//! assert_eq!( -//! TransparentEnum::Bar(vec![42, 42]).as_ssz_bytes(), -//! vec![42, 42] -//! ); -//! -//! /// Representated as an SSZ "uint8" -//! #[derive(Debug, PartialEq, Encode, Decode)] -//! #[ssz(enum_behaviour = "tag")] -//! enum TagEnum { -//! Foo, -//! Bar, -//! } -//! assert_eq!( -//! TagEnum::Foo.as_ssz_bytes(), -//! vec![0] -//! ); -//! assert_eq!( -//! TagEnum::from_ssz_bytes(&[1]).unwrap(), -//! TagEnum::Bar, -//! ); -//! ``` - -use darling::{FromDeriveInput, FromMeta}; -use proc_macro::TokenStream; -use quote::quote; -use std::convert::TryInto; -use syn::{parse_macro_input, DataEnum, DataStruct, DeriveInput, Ident, Index}; - -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -const MAX_UNION_SELECTOR: u8 = 127; - -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_TAG: &str = "tag"; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute with \ - a \"transparent\", \"union\", or \"tag\" value, e.g., #[ssz(enum_behaviour = \"transparent\")]"; - -#[derive(Debug, FromDeriveInput)] -#[darling(attributes(ssz))] -struct StructOpts { - #[darling(default)] - enum_behaviour: Option, - #[darling(default)] - struct_behaviour: Option, -} - -/// Field-level configuration. -#[derive(Debug, Default, FromMeta)] -struct FieldOpts { - #[darling(default)] - with: Option, - #[darling(default)] - skip_serializing: bool, - #[darling(default)] - skip_deserializing: bool, -} - -enum Procedure<'a> { - Struct { - data: &'a syn::DataStruct, - behaviour: StructBehaviour, - }, - Enum { - data: &'a syn::DataEnum, - behaviour: EnumBehaviour, - }, -} - -enum StructBehaviour { - Container, - Transparent, -} - -enum EnumBehaviour { - Union, - Transparent, - Tag, -} - -impl<'a> Procedure<'a> { - fn read(item: &'a DeriveInput) -> Self { - let opts = StructOpts::from_derive_input(item).unwrap(); - - match &item.data { - syn::Data::Struct(data) => { - if opts.enum_behaviour.is_some() { - panic!("cannot use \"enum_behaviour\" for a struct"); - } - - match opts.struct_behaviour.as_deref() { - Some("container") | None => Procedure::Struct { - data, - behaviour: StructBehaviour::Container, - }, - Some("transparent") => Procedure::Struct { - data, - behaviour: StructBehaviour::Transparent, - }, - Some(other) => panic!( - "{} is not a valid struct behaviour, use \"container\" or \"transparent\"", - other - ), - } - } - syn::Data::Enum(data) => { - if opts.struct_behaviour.is_some() { - panic!("cannot use \"struct_behaviour\" for an enum"); - } - - match opts.enum_behaviour.as_deref() { - Some("union") => Procedure::Enum { - data, - behaviour: EnumBehaviour::Union, - }, - Some("transparent") => Procedure::Enum { - data, - behaviour: EnumBehaviour::Transparent, - }, - Some("tag") => Procedure::Enum { - data, - behaviour: EnumBehaviour::Tag, - }, - Some(other) => panic!( - "{} is not a valid enum behaviour, use \"container\" or \"transparent\"", - other - ), - None => panic!("{}", NO_ENUM_BEHAVIOUR_ERROR), - } - } - _ => panic!("ssz_derive only supports structs and enums"), - } - } -} - -fn parse_ssz_fields( - struct_data: &syn::DataStruct, -) -> Vec<(&syn::Type, Option<&syn::Ident>, FieldOpts)> { - struct_data - .fields - .iter() - .map(|field| { - let ty = &field.ty; - let ident = field.ident.as_ref(); - - let field_opts_candidates = field - .attrs - .iter() - .filter(|attr| attr.path.get_ident().map_or(false, |ident| *ident == "ssz")) - .collect::>(); - - if field_opts_candidates.len() > 1 { - panic!("more than one field-level \"ssz\" attribute provided") - } - - let field_opts = field_opts_candidates - .first() - .map(|attr| { - let meta = attr.parse_meta().unwrap(); - FieldOpts::from_meta(&meta).unwrap() - }) - .unwrap_or_default(); - - (ty, ident, field_opts) - }) - .collect() -} - -/// Implements `ssz::Encode` for some `struct` or `enum`. -#[proc_macro_derive(Encode, attributes(ssz))] -pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let procedure = Procedure::read(&item); - - match procedure { - Procedure::Struct { data, behaviour } => match behaviour { - StructBehaviour::Transparent => ssz_encode_derive_struct_transparent(&item, data), - StructBehaviour::Container => ssz_encode_derive_struct(&item, data), - }, - Procedure::Enum { data, behaviour } => match behaviour { - EnumBehaviour::Transparent => ssz_encode_derive_enum_transparent(&item, data), - EnumBehaviour::Union => ssz_encode_derive_enum_union(&item, data), - EnumBehaviour::Tag => ssz_encode_derive_enum_tag(&item, data), - }, - } -} - -/// Derive `ssz::Encode` for a struct. -/// -/// Fields are encoded in the order they are defined. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_serializing)]`: the field will not be serialized. -fn ssz_encode_derive_struct(derive_input: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let field_is_ssz_fixed_len = &mut vec![]; - let field_fixed_len = &mut vec![]; - let field_ssz_bytes_len = &mut vec![]; - let field_encoder_append = &mut vec![]; - - for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { - if field_opts.skip_serializing { - continue; - } - - let ident = match ident { - Some(ref ident) => ident, - _ => panic!( - "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." - ), - }; - - if let Some(module) = field_opts.with { - let module = quote! { #module::encode }; - field_is_ssz_fixed_len.push(quote! { #module::is_ssz_fixed_len() }); - field_fixed_len.push(quote! { #module::ssz_fixed_len() }); - field_ssz_bytes_len.push(quote! { #module::ssz_bytes_len(&self.#ident) }); - field_encoder_append.push(quote! { - encoder.append_parameterized( - #module::is_ssz_fixed_len(), - |buf| #module::ssz_append(&self.#ident, buf) - ) - }); - } else { - field_is_ssz_fixed_len.push(quote! { <#ty as ssz::Encode>::is_ssz_fixed_len() }); - field_fixed_len.push(quote! { <#ty as ssz::Encode>::ssz_fixed_len() }); - field_ssz_bytes_len.push(quote! { self.#ident.ssz_bytes_len() }); - field_encoder_append.push(quote! { encoder.append(&self.#ident) }); - } - } - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - #( - #field_is_ssz_fixed_len && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - let mut len: usize = 0; - #( - len = len - .checked_add(#field_fixed_len) - .expect("encode ssz_fixed_len length overflow"); - )* - len - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - if ::is_ssz_fixed_len() { - ::ssz_fixed_len() - } else { - let mut len: usize = 0; - #( - if #field_is_ssz_fixed_len { - len = len - .checked_add(#field_fixed_len) - .expect("encode ssz_bytes_len length overflow"); - } else { - len = len - .checked_add(ssz::BYTES_PER_LENGTH_OFFSET) - .expect("encode ssz_bytes_len length overflow for offset"); - len = len - .checked_add(#field_ssz_bytes_len) - .expect("encode ssz_bytes_len length overflow for bytes"); - } - )* - - len - } - } - - fn ssz_append(&self, buf: &mut Vec) { - let mut offset: usize = 0; - #( - offset = offset - .checked_add(#field_fixed_len) - .expect("encode ssz_append offset overflow"); - )* - - let mut encoder = ssz::SszEncoder::container(buf, offset); - - #( - #field_encoder_append; - )* - - encoder.finalize(); - } - } - }; - output.into() -} - -/// Derive `ssz::Encode` "transparently" for a struct which has exactly one non-skipped field. -/// -/// The single field is encoded directly, making the outermost `struct` transparent. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_serializing)]`: the field will not be serialized. -fn ssz_encode_derive_struct_transparent( - derive_input: &DeriveInput, - struct_data: &DataStruct, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - let ssz_fields = parse_ssz_fields(struct_data); - let num_fields = ssz_fields - .iter() - .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) - .count(); - - if num_fields != 1 { - panic!( - "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", - num_fields - ); - } - - let (index, (ty, ident, _field_opts)) = ssz_fields - .iter() - .enumerate() - .find(|(_, (_, _, field_opts))| !field_opts.skip_deserializing) - .expect("\"transparent\" struct must have at least one non-skipped field"); - - // Remove the `_usize` suffix from the value to avoid a compiler warning. - let index = Index::from(index); - - let output = if let Some(field_name) = ident { - quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.#field_name.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.#field_name.ssz_append(buf) - } - } - } - } else { - quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Encode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.#index.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.#index.ssz_append(buf) - } - } - } - }; - - output.into() -} - -/// Derive `ssz::Encode` for an enum in the "transparent" method. -/// -/// The "transparent" method is distinct from the "union" method specified in the SSZ specification. -/// When using "transparent", the enum will be ignored and the contained field will be serialized as -/// if the enum does not exist. Since an union variant "selector" is not serialized, it is not -/// possible to reliably decode an enum that is serialized transparently. -/// -/// ## Limitations -/// -/// Only supports: -/// - Enums with a single field per variant, where -/// - All fields are variably sized from an SSZ-perspective (not fixed size). -/// -/// ## Panics -/// -/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run -/// time* if the variable-size requirement isn't met. -fn ssz_encode_derive_enum_transparent( - derive_input: &DeriveInput, - enum_data: &DataEnum, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (patterns, assert_exprs): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - let type_assert = quote! { - !<#ty as ssz::Encode>::is_ssz_fixed_len() - }; - (pattern, type_assert) - }) - .unzip(); - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - assert!( - #( - #assert_exprs && - )* true, - "not all enum variants are variably-sized" - ); - false - } - - fn ssz_bytes_len(&self) -> usize { - match self { - #( - #patterns => inner.ssz_bytes_len(), - )* - } - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - #( - #patterns => inner.ssz_append(buf), - )* - } - } - } - }; - output.into() -} - -/// Derive `ssz::Encode` for an `enum` following the "tag" method. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has no fields -fn ssz_encode_derive_enum_tag(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if !variant.fields.is_empty() { - panic!("ssz::Encode tag behaviour can only be derived for enums with no fields"); - } - - quote! { - #name::#variant_name - } - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn ssz_bytes_len(&self) -> usize { - 1 - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - #( - #patterns => { - let union_selector: u8 = #union_selectors; - debug_assert!(union_selector <= ssz::MAX_UNION_SELECTOR); - buf.push(union_selector); - }, - )* - } - } - } - }; - output.into() -} - -/// Derive `ssz::Encode` for an `enum` following the "union" SSZ spec. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has a single field. -fn ssz_encode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - pattern - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - match self { - #( - #patterns => inner - .ssz_bytes_len() - .checked_add(1) - .expect("encoded length must be less than usize::max_value"), - )* - } - } - - fn ssz_append(&self, buf: &mut Vec) { - match self { - #( - #patterns => { - let union_selector: u8 = #union_selectors; - debug_assert!(union_selector <= ssz::MAX_UNION_SELECTOR); - buf.push(union_selector); - inner.ssz_append(buf) - }, - )* - } - } - } - }; - output.into() -} - -/// Derive `ssz::Decode` for a struct or enum. -#[proc_macro_derive(Decode, attributes(ssz))] -pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let procedure = Procedure::read(&item); - - match procedure { - Procedure::Struct { data, behaviour } => match behaviour { - StructBehaviour::Transparent => ssz_decode_derive_struct_transparent(&item, data), - StructBehaviour::Container => ssz_decode_derive_struct(&item, data), - }, - Procedure::Enum { data, behaviour } => match behaviour { - EnumBehaviour::Union => ssz_decode_derive_enum_union(&item, data), - EnumBehaviour::Tag => ssz_decode_derive_enum_tag(&item, data), - EnumBehaviour::Transparent => panic!( - "Decode cannot be derived for enum_behaviour \"{}\", only \"{}\" and \"{}\" is valid.", - ENUM_TRANSPARENT, ENUM_UNION, ENUM_TAG, - ), - }, - } -} - -/// Implements `ssz::Decode` for some `struct`. -/// -/// Fields are decoded in the order they are defined. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a -/// `Default` implementation. The decoder will assume that the field was not serialized at all -/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). -fn ssz_decode_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let mut register_types = vec![]; - let mut field_names = vec![]; - let mut fixed_decodes = vec![]; - let mut decodes = vec![]; - let mut is_fixed_lens = vec![]; - let mut fixed_lens = vec![]; - - for (ty, ident, field_opts) in parse_ssz_fields(struct_data) { - let ident = match ident { - Some(ref ident) => ident, - _ => panic!( - "#[ssz(struct_behaviour = \"container\")] only supports named struct fields." - ), - }; - - field_names.push(quote! { - #ident - }); - - // Field should not be deserialized; use a `Default` impl to instantiate. - if field_opts.skip_deserializing { - decodes.push(quote! { - let #ident = <_>::default(); - }); - - fixed_decodes.push(quote! { - let #ident = <_>::default(); - }); - - continue; - } - - let is_ssz_fixed_len; - let ssz_fixed_len; - let from_ssz_bytes; - if let Some(module) = field_opts.with { - let module = quote! { #module::decode }; - - is_ssz_fixed_len = quote! { #module::is_ssz_fixed_len() }; - ssz_fixed_len = quote! { #module::ssz_fixed_len() }; - from_ssz_bytes = quote! { #module::from_ssz_bytes(slice) }; - - register_types.push(quote! { - builder.register_type_parameterized(#is_ssz_fixed_len, #ssz_fixed_len)?; - }); - decodes.push(quote! { - let #ident = decoder.decode_next_with(|slice| #module::from_ssz_bytes(slice))?; - }); - } else { - is_ssz_fixed_len = quote! { <#ty as ssz::Decode>::is_ssz_fixed_len() }; - ssz_fixed_len = quote! { <#ty as ssz::Decode>::ssz_fixed_len() }; - from_ssz_bytes = quote! { <#ty as ssz::Decode>::from_ssz_bytes(slice) }; - - register_types.push(quote! { - builder.register_type::<#ty>()?; - }); - decodes.push(quote! { - let #ident = decoder.decode_next()?; - }); - } - - fixed_decodes.push(quote! { - let #ident = { - start = end; - end = end - .checked_add(#ssz_fixed_len) - .ok_or_else(|| ssz::DecodeError::OutOfBoundsByte { - i: usize::max_value() - })?; - let slice = bytes.get(start..end) - .ok_or_else(|| ssz::DecodeError::InvalidByteLength { - len: bytes.len(), - expected: end - })?; - #from_ssz_bytes? - }; - }); - is_fixed_lens.push(is_ssz_fixed_len); - fixed_lens.push(ssz_fixed_len); - } - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - #( - #is_fixed_lens && - )* - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - let mut len: usize = 0; - #( - len = len - .checked_add(#fixed_lens) - .expect("decode ssz_fixed_len overflow"); - )* - len - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { - if ::is_ssz_fixed_len() { - if bytes.len() != ::ssz_fixed_len() { - return Err(ssz::DecodeError::InvalidByteLength { - len: bytes.len(), - expected: ::ssz_fixed_len(), - }); - } - - let mut start: usize = 0; - let mut end = start; - - #( - #fixed_decodes - )* - - Ok(Self { - #( - #field_names, - )* - }) - } else { - let mut builder = ssz::SszDecoderBuilder::new(bytes); - - #( - #register_types - )* - - let mut decoder = builder.build()?; - - #( - #decodes - )* - - - Ok(Self { - #( - #field_names, - )* - }) - } - } - } - }; - output.into() -} - -/// Implements `ssz::Decode` "transparently" for a `struct` with exactly one non-skipped field. -/// -/// The bytes will be decoded as if they are the inner field, without the outermost struct. The -/// outermost struct will then be applied artificially. -/// -/// ## Field attributes -/// -/// - `#[ssz(skip_deserializing)]`: during de-serialization the field will be instantiated from a -/// `Default` implementation. The decoder will assume that the field was not serialized at all -/// (e.g., if it has been serialized, an error will be raised instead of `Default` overriding it). -fn ssz_decode_derive_struct_transparent( - item: &DeriveInput, - struct_data: &DataStruct, -) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - let ssz_fields = parse_ssz_fields(struct_data); - let num_fields = ssz_fields - .iter() - .filter(|(_, _, field_opts)| !field_opts.skip_deserializing) - .count(); - - if num_fields != 1 { - panic!( - "A \"transparent\" struct must have exactly one non-skipped field ({} fields found)", - num_fields - ); - } - - let mut fields = vec![]; - let mut wrapped_type = None; - - for (i, (ty, ident, field_opts)) in ssz_fields.into_iter().enumerate() { - if let Some(name) = ident { - if field_opts.skip_deserializing { - fields.push(quote! { - #name: <_>::default(), - }); - } else { - fields.push(quote! { - #name: <_>::from_ssz_bytes(bytes)?, - }); - wrapped_type = Some(ty); - } - } else { - let index = syn::Index::from(i); - if field_opts.skip_deserializing { - fields.push(quote! { - #index:<_>::default(), - }); - } else { - fields.push(quote! { - #index:<_>::from_ssz_bytes(bytes)?, - }); - wrapped_type = Some(ty); - } - } - } - - let ty = wrapped_type.unwrap(); - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - <#ty as ssz::Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <#ty as ssz::Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { - Ok(Self { - #( - #fields - )* - - }) - } - } - }; - output.into() -} - -/// Derive `ssz::Decode` for an `enum` following the "tag" SSZ spec. -fn ssz_decode_derive_enum_tag(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if !variant.fields.is_empty() { - panic!("ssz::Decode tag behaviour can only be derived for enums with no fields"); - } - - quote! { - #name::#variant_name - } - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - 1 - } - - fn from_ssz_bytes(bytes: &[u8]) -> std::result::Result { - let byte = bytes - .first() - .copied() - .ok_or(ssz::DecodeError::OutOfBoundsByte { i: 0 })?; - - match byte { - #( - #union_selectors => { - Ok(#patterns) - }, - )* - other => Err(ssz::DecodeError::UnionSelectorInvalid(other)), - } - } - } - }; - output.into() -} - -/// Derive `ssz::Decode` for an `enum` following the "union" SSZ spec. -fn ssz_decode_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (constructors, var_types): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("ssz::Encode can only be derived for enums with 1 field per variant"); - } - - let constructor = quote! { - #name::#variant_name - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - (constructor, ty) - }) - .unzip(); - - let union_selectors = compute_union_selectors(constructors.len()); - - let output = quote! { - impl #impl_generics ssz::Decode for #name #ty_generics #where_clause { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - // Sanity check to ensure the definition here does not drift from the one defined in - // `ssz`. - debug_assert_eq!(#MAX_UNION_SELECTOR, ssz::MAX_UNION_SELECTOR); - - let (selector, body) = ssz::split_union_bytes(bytes)?; - - match selector.into() { - #( - #union_selectors => { - <#var_types as ssz::Decode>::from_ssz_bytes(body).map(#constructors) - }, - )* - other => Err(ssz::DecodeError::UnionSelectorInvalid(other)) - } - } - } - }; - output.into() -} - -fn compute_union_selectors(num_variants: usize) -> Vec { - let union_selectors = (0..num_variants) - .map(|i| { - i.try_into() - .expect("union selector exceeds u8::max_value, union has too many variants") - }) - .collect::>(); - - let highest_selector = union_selectors - .last() - .copied() - .expect("0-variant union is not permitted"); - - assert!( - highest_selector <= MAX_UNION_SELECTOR, - "union selector {} exceeds limit of {}, enum has too many variants", - highest_selector, - MAX_UNION_SELECTOR - ); - - union_selectors -} diff --git a/consensus/ssz_derive/tests/tests.rs b/consensus/ssz_derive/tests/tests.rs deleted file mode 100644 index 72192b293..000000000 --- a/consensus/ssz_derive/tests/tests.rs +++ /dev/null @@ -1,251 +0,0 @@ -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::fmt::Debug; -use std::marker::PhantomData; - -fn assert_encode(item: &T, bytes: &[u8]) { - assert_eq!(item.as_ssz_bytes(), bytes); -} - -fn assert_encode_decode(item: &T, bytes: &[u8]) { - assert_encode(item, bytes); - assert_eq!(T::from_ssz_bytes(bytes).unwrap(), *item); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "tag")] -enum TagEnum { - A, - B, - C, -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoFixedUnion { - U8(u8), - U16(u16), -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct TwoFixedUnionStruct { - a: TwoFixedUnion, -} - -#[test] -fn two_fixed_union() { - let eight = TwoFixedUnion::U8(1); - let sixteen = TwoFixedUnion::U16(1); - - assert_encode_decode(&eight, &[0, 1]); - assert_encode_decode(&sixteen, &[1, 1, 0]); - - assert_encode_decode(&TwoFixedUnionStruct { a: eight }, &[4, 0, 0, 0, 0, 1]); - assert_encode_decode(&TwoFixedUnionStruct { a: sixteen }, &[4, 0, 0, 0, 1, 1, 0]); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct VariableA { - a: u8, - b: Vec, -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct VariableB { - a: Vec, - b: u8, -} - -#[derive(PartialEq, Debug, Encode)] -#[ssz(enum_behaviour = "transparent")] -enum TwoVariableTrans { - A(VariableA), - B(VariableB), -} - -#[derive(PartialEq, Debug, Encode)] -struct TwoVariableTransStruct { - a: TwoVariableTrans, -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoVariableUnion { - A(VariableA), - B(VariableB), -} - -#[derive(PartialEq, Debug, Encode, Decode)] -struct TwoVariableUnionStruct { - a: TwoVariableUnion, -} - -#[test] -fn two_variable_trans() { - let trans_a = TwoVariableTrans::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let trans_b = TwoVariableTrans::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode(&trans_a, &[1, 5, 0, 0, 0, 2, 3]); - assert_encode(&trans_b, &[5, 0, 0, 0, 3, 1, 2]); - - assert_encode( - &TwoVariableTransStruct { a: trans_a }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode( - &TwoVariableTransStruct { a: trans_b }, - &[4, 0, 0, 0, 5, 0, 0, 0, 3, 1, 2], - ); -} - -#[test] -fn two_variable_union() { - let union_a = TwoVariableUnion::A(VariableA { - a: 1, - b: vec![2, 3], - }); - let union_b = TwoVariableUnion::B(VariableB { - a: vec![1, 2], - b: 3, - }); - - assert_encode_decode(&union_a, &[0, 1, 5, 0, 0, 0, 2, 3]); - assert_encode_decode(&union_b, &[1, 5, 0, 0, 0, 3, 1, 2]); - - assert_encode_decode( - &TwoVariableUnionStruct { a: union_a }, - &[4, 0, 0, 0, 0, 1, 5, 0, 0, 0, 2, 3], - ); - assert_encode_decode( - &TwoVariableUnionStruct { a: union_b }, - &[4, 0, 0, 0, 1, 5, 0, 0, 0, 3, 1, 2], - ); -} - -#[test] -fn tag_enum() { - assert_encode_decode(&TagEnum::A, &[0]); - assert_encode_decode(&TagEnum::B, &[1]); - assert_encode_decode(&TagEnum::C, &[2]); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(enum_behaviour = "union")] -enum TwoVecUnion { - A(Vec), - B(Vec), -} - -#[test] -fn two_vec_union() { - assert_encode_decode(&TwoVecUnion::A(vec![]), &[0]); - assert_encode_decode(&TwoVecUnion::B(vec![]), &[1]); - - assert_encode_decode(&TwoVecUnion::A(vec![0]), &[0, 0]); - assert_encode_decode(&TwoVecUnion::B(vec![0]), &[1, 0]); - - assert_encode_decode(&TwoVecUnion::A(vec![0, 1]), &[0, 0, 1]); - assert_encode_decode(&TwoVecUnion::B(vec![0, 1]), &[1, 0, 1]); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStruct { - inner: Vec, -} - -impl TransparentStruct { - fn new(inner: u8) -> Self { - Self { inner: vec![inner] } - } -} - -#[test] -fn transparent_struct() { - assert_encode_decode(&TransparentStruct::new(42), &vec![42_u8].as_ssz_bytes()); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructSkippedField { - inner: Vec, - #[ssz(skip_serializing, skip_deserializing)] - skipped: PhantomData, -} - -impl TransparentStructSkippedField { - fn new(inner: u8) -> Self { - Self { - inner: vec![inner], - skipped: PhantomData, - } - } -} - -#[test] -fn transparent_struct_skipped_field() { - assert_encode_decode( - &TransparentStructSkippedField::new(42), - &vec![42_u8].as_ssz_bytes(), - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructNewType(Vec); - -#[test] -fn transparent_struct_newtype() { - assert_encode_decode( - &TransparentStructNewType(vec![42_u8]), - &vec![42_u8].as_ssz_bytes(), - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructNewTypeSkippedField( - Vec, - #[ssz(skip_serializing, skip_deserializing)] PhantomData, -); - -impl TransparentStructNewTypeSkippedField { - fn new(inner: Vec) -> Self { - Self(inner, PhantomData) - } -} - -#[test] -fn transparent_struct_newtype_skipped_field() { - assert_encode_decode( - &TransparentStructNewTypeSkippedField::new(vec![42_u8]), - &vec![42_u8].as_ssz_bytes(), - ); -} - -#[derive(PartialEq, Debug, Encode, Decode)] -#[ssz(struct_behaviour = "transparent")] -struct TransparentStructNewTypeSkippedFieldReverse( - #[ssz(skip_serializing, skip_deserializing)] PhantomData, - Vec, -); - -impl TransparentStructNewTypeSkippedFieldReverse { - fn new(inner: Vec) -> Self { - Self(PhantomData, inner) - } -} - -#[test] -fn transparent_struct_newtype_skipped_field_reverse() { - assert_encode_decode( - &TransparentStructNewTypeSkippedFieldReverse::new(vec![42_u8]), - &vec![42_u8].as_ssz_bytes(), - ); -} diff --git a/consensus/ssz_types/Cargo.toml b/consensus/ssz_types/Cargo.toml deleted file mode 100644 index 2baa8994f..000000000 --- a/consensus/ssz_types/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "eth2_ssz_types" -version = "0.2.2" -authors = ["Paul Hauner "] -edition = "2021" -description = "Provides types with unique properties required for SSZ serialization and Merklization." -license = "Apache-2.0" - -[lib] -name = "ssz_types" - -[dependencies] -tree_hash = "0.4.1" -serde = "1.0.116" -serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" -eth2_ssz = "0.4.1" -typenum = "1.12.0" -arbitrary = { version = "1.0", features = ["derive"], optional = true } -derivative = "2.1.1" -smallvec = "1.8.0" - -[dev-dependencies] -serde_json = "1.0.58" -tree_hash_derive = "0.4.0" diff --git a/consensus/ssz_types/src/bitfield.rs b/consensus/ssz_types/src/bitfield.rs deleted file mode 100644 index b7bde2257..000000000 --- a/consensus/ssz_types/src/bitfield.rs +++ /dev/null @@ -1,1332 +0,0 @@ -use crate::tree_hash::bitfield_bytes_tree_hash_root; -use crate::Error; -use core::marker::PhantomData; -use derivative::Derivative; -use eth2_serde_utils::hex::{encode as hex_encode, PrefixedHexVisitor}; -use serde::de::{Deserialize, Deserializer}; -use serde::ser::{Serialize, Serializer}; -use smallvec::{smallvec, SmallVec, ToSmallVec}; -use ssz::{Decode, Encode}; -use tree_hash::Hash256; -use typenum::Unsigned; - -/// Maximum number of bytes to store on the stack in a bitfield's `SmallVec`. -/// -/// The default of 32 bytes is enough to take us through to ~500K validators, as the byte length of -/// attestation bitfields is roughly `N // 32 slots // 64 committes // 8 bits`. -pub const SMALLVEC_LEN: usize = 32; - -/// A marker trait applied to `Variable` and `Fixed` that defines the behaviour of a `Bitfield`. -pub trait BitfieldBehaviour: Clone {} - -/// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Variable { - _phantom: PhantomData, -} - -/// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`. -/// -/// See the [`Bitfield`](struct.Bitfield.html) docs for usage. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Fixed { - _phantom: PhantomData, -} - -impl BitfieldBehaviour for Variable {} -impl BitfieldBehaviour for Fixed {} - -/// A heap-allocated, ordered, variable-length collection of `bool` values, limited to `N` bits. -pub type BitList = Bitfield>; - -/// A heap-allocated, ordered, fixed-length collection of `bool` values, with `N` bits. -/// -/// See [Bitfield](struct.Bitfield.html) documentation. -pub type BitVector = Bitfield>; - -/// A heap-allocated, ordered, fixed-length, collection of `bool` values. Use of -/// [`BitList`](type.BitList.html) or [`BitVector`](type.BitVector.html) type aliases is preferred -/// over direct use of this struct. -/// -/// The `T` type parameter is used to define length behaviour with the `Variable` or `Fixed` marker -/// structs. -/// -/// The length of the Bitfield is set at instantiation (i.e., runtime, not compile time). However, -/// use with a `Variable` sets a type-level (i.e., compile-time) maximum length and `Fixed` -/// provides a type-level fixed length. -/// -/// ## Example -/// -/// The example uses the following crate-level type aliases: -/// -/// - `BitList` is an alias for `Bitfield>` -/// - `BitVector` is an alias for `Bitfield>` -/// -/// ``` -/// use ssz_types::{BitVector, BitList, typenum}; -/// -/// // `BitList` has a type-level maximum length. The length of the list is specified at runtime -/// // and it must be less than or equal to `N`. After instantiation, `BitList` cannot grow or -/// // shrink. -/// type BitList8 = BitList; -/// -/// // Creating a `BitList` with a larger-than-`N` capacity returns `None`. -/// assert!(BitList8::with_capacity(9).is_err()); -/// -/// let mut bitlist = BitList8::with_capacity(4).unwrap(); // `BitList` permits a capacity of less than the maximum. -/// assert!(bitlist.set(3, true).is_ok()); // Setting inside the instantiation capacity is permitted. -/// assert!(bitlist.set(5, true).is_err()); // Setting outside that capacity is not. -/// -/// // `BitVector` has a type-level fixed length. Unlike `BitList`, it cannot be instantiated with a custom length -/// // or grow/shrink. -/// type BitVector8 = BitVector; -/// -/// let mut bitvector = BitVector8::new(); -/// assert_eq!(bitvector.len(), 8); // `BitVector` length is fixed at the type-level. -/// assert!(bitvector.set(7, true).is_ok()); // Setting inside the capacity is permitted. -/// assert!(bitvector.set(9, true).is_err()); // Setting outside the capacity is not. -/// -/// ``` -/// -/// ## Note -/// -/// The internal representation of the bitfield is the same as that required by SSZ. The lowest -/// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest -/// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = ""))] -pub struct Bitfield { - bytes: SmallVec<[u8; SMALLVEC_LEN]>, - len: usize, - _phantom: PhantomData, -} - -impl Bitfield> { - /// Instantiate with capacity for `num_bits` boolean values. The length cannot be grown or - /// shrunk after instantiation. - /// - /// All bits are initialized to `false`. - /// - /// Returns `None` if `num_bits > N`. - pub fn with_capacity(num_bits: usize) -> Result { - if num_bits <= N::to_usize() { - Ok(Self { - bytes: smallvec![0; bytes_for_bit_len(num_bits)], - len: num_bits, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Equal to `N` regardless of the value supplied to `with_capacity`. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`, such that a leading `true` bit is - /// used to indicate the length of the bitfield. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitList, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitList8 = BitList; - /// - /// let b = BitList8::with_capacity(4).unwrap(); - /// - /// assert_eq!(b.into_bytes(), SmallVec::from_buf([0b0001_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - let len = self.len(); - let mut bytes = self.bytes; - - bytes.resize(bytes_for_bit_len(len + 1), 0); - - let mut bitfield: Bitfield> = Bitfield::from_raw_bytes(bytes, len + 1) - .unwrap_or_else(|_| { - unreachable!( - "Bitfield with {} bytes must have enough capacity for {} bits.", - bytes_for_bit_len(len + 1), - len + 1 - ) - }); - bitfield - .set(len, true) - .expect("len must be in bounds for bitfield."); - - bitfield.bytes - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - let bytes_len = bytes.len(); - let mut initial_bitfield: Bitfield> = { - let num_bits = bytes.len() * 8; - Bitfield::from_raw_bytes(bytes, num_bits)? - }; - - let len = initial_bitfield - .highest_set_bit() - .ok_or(Error::MissingLengthInformation)?; - - // The length bit should be in the last byte, or else it means we have too many bytes. - if len / 8 + 1 != bytes_len { - return Err(Error::InvalidByteCount { - given: bytes_len, - expected: len / 8 + 1, - }); - } - - if len <= Self::max_len() { - initial_bitfield - .set(len, false) - .expect("Bit has been confirmed to exist"); - - let mut bytes = initial_bitfield.into_raw_bytes(); - - bytes.truncate(bytes_for_bit_len(len)); - - Self::from_raw_bytes(bytes, len) - } else { - Err(Error::OutOfBounds { - i: Self::max_len(), - len: Self::max_len(), - }) - } - } - - /// Compute the intersection of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the shorter of the two inputs. - pub fn intersection(&self, other: &Self) -> Self { - let min_len = std::cmp::min(self.len(), other.len()); - let mut result = Self::with_capacity(min_len).expect("min len always less than N"); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two BitLists of potentially different lengths. - /// - /// Return a new BitList with length equal to the longer of the two inputs. - pub fn union(&self, other: &Self) -> Self { - let max_len = std::cmp::max(self.len(), other.len()); - let mut result = Self::with_capacity(max_len).expect("max len always less than N"); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Bitfield> { - /// Instantiate a new `Bitfield` with a fixed-length of `N` bits. - /// - /// All bits are initialized to `false`. - pub fn new() -> Self { - Self { - bytes: smallvec![0; bytes_for_bit_len(Self::capacity())], - len: Self::capacity(), - _phantom: PhantomData, - } - } - - /// Returns `N`, the number of bits in `Self`. - pub fn capacity() -> usize { - N::to_usize() - } - - /// Consumes `self`, returning a serialized representation. - /// - /// The output is faithful to the SSZ encoding of `self`. - /// - /// ## Example - /// ``` - /// use ssz_types::{BitVector, typenum}; - /// use smallvec::SmallVec; - /// - /// type BitVector4 = BitVector; - /// - /// assert_eq!(BitVector4::new().into_bytes(), SmallVec::from_buf([0b0000_0000])); - /// ``` - pub fn into_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.into_raw_bytes() - } - - /// Instantiates a new instance from `bytes`. Consumes the same format that `self.into_bytes()` - /// produces (SSZ). - /// - /// Returns `None` if `bytes` are not a valid encoding. - pub fn from_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>) -> Result { - Self::from_raw_bytes(bytes, Self::capacity()) - } - - /// Compute the intersection of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn intersection(&self, other: &Self) -> Self { - let mut result = Self::new(); - // Bitwise-and the bytes together, starting from the left of each vector. This takes care - // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't - // contain any set bits beyond its length. - for i in 0..result.bytes.len() { - result.bytes[i] = self.bytes[i] & other.bytes[i]; - } - result - } - - /// Compute the union of two fixed-length `Bitfield`s. - /// - /// Return a new fixed-length `Bitfield`. - pub fn union(&self, other: &Self) -> Self { - let mut result = Self::new(); - for i in 0..result.bytes.len() { - result.bytes[i] = - self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); - } - result - } -} - -impl Default for Bitfield> { - fn default() -> Self { - Self::new() - } -} - -impl Bitfield { - /// Sets the `i`'th bit to `value`. - /// - /// Returns `None` if `i` is out-of-bounds of `self`. - pub fn set(&mut self, i: usize, value: bool) -> Result<(), Error> { - let len = self.len; - - if i < len { - let byte = self - .bytes - .get_mut(i / 8) - .ok_or(Error::OutOfBounds { i, len })?; - - if value { - *byte |= 1 << (i % 8) - } else { - *byte &= !(1 << (i % 8)) - } - - Ok(()) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the value of the `i`'th bit. - /// - /// Returns `Error` if `i` is out-of-bounds of `self`. - pub fn get(&self, i: usize) -> Result { - if i < self.len { - let byte = self - .bytes - .get(i / 8) - .ok_or(Error::OutOfBounds { i, len: self.len })?; - - Ok(*byte & 1 << (i % 8) > 0) - } else { - Err(Error::OutOfBounds { i, len: self.len }) - } - } - - /// Returns the number of bits stored in `self`. - pub fn len(&self) -> usize { - self.len - } - - /// Returns `true` if `self.len() == 0`. - pub fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Returns the underlying bytes representation of the bitfield. - pub fn into_raw_bytes(self) -> SmallVec<[u8; SMALLVEC_LEN]> { - self.bytes - } - - /// Returns a view into the underlying bytes representation of the bitfield. - pub fn as_slice(&self) -> &[u8] { - &self.bytes - } - - /// Instantiates from the given `bytes`, which are the same format as output from - /// `self.into_raw_bytes()`. - /// - /// Returns `None` if: - /// - /// - `bytes` is not the minimal required bytes to represent a bitfield of `bit_len` bits. - /// - `bit_len` is not a multiple of 8 and `bytes` contains set bits that are higher than, or - /// equal to `bit_len`. - fn from_raw_bytes(bytes: SmallVec<[u8; SMALLVEC_LEN]>, bit_len: usize) -> Result { - if bit_len == 0 { - if bytes.len() == 1 && bytes[0] == 0 { - // A bitfield with `bit_len` 0 can only be represented by a single zero byte. - Ok(Self { - bytes, - len: 0, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } else if bytes.len() != bytes_for_bit_len(bit_len) { - // The number of bytes must be the minimum required to represent `bit_len`. - Err(Error::InvalidByteCount { - given: bytes.len(), - expected: bytes_for_bit_len(bit_len), - }) - } else { - // Ensure there are no bits higher than `bit_len` that are set to true. - let (mask, _) = u8::max_value().overflowing_shr(8 - (bit_len as u32 % 8)); - - if (bytes.last().expect("Guarded against empty bytes") & !mask) == 0 { - Ok(Self { - bytes, - len: bit_len, - _phantom: PhantomData, - }) - } else { - Err(Error::ExcessBits) - } - } - } - - /// Returns the `Some(i)` where `i` is the highest index with a set bit. Returns `None` if - /// there are no set bits. - pub fn highest_set_bit(&self) -> Option { - self.bytes - .iter() - .enumerate() - .rev() - .find(|(_, byte)| **byte > 0) - .map(|(i, byte)| i * 8 + 7 - byte.leading_zeros() as usize) - } - - /// Returns an iterator across bitfield `bool` values, starting at the lowest index. - pub fn iter(&self) -> BitIter<'_, T> { - BitIter { - bitfield: self, - i: 0, - } - } - - /// Returns true if no bits are set. - pub fn is_zero(&self) -> bool { - self.bytes.iter().all(|byte| *byte == 0) - } - - /// Returns the number of bits that are set to `true`. - pub fn num_set_bits(&self) -> usize { - self.bytes - .iter() - .map(|byte| byte.count_ones() as usize) - .sum() - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference(&self, other: &Self) -> Self { - let mut result = self.clone(); - result.difference_inplace(other); - result - } - - /// Compute the difference of this Bitfield and another of potentially different length. - pub fn difference_inplace(&mut self, other: &Self) { - let min_byte_len = std::cmp::min(self.bytes.len(), other.bytes.len()); - - for i in 0..min_byte_len { - self.bytes[i] &= !other.bytes[i]; - } - } - - /// Shift the bits to higher indices, filling the lower indices with zeroes. - /// - /// The amount to shift by, `n`, must be less than or equal to `self.len()`. - pub fn shift_up(&mut self, n: usize) -> Result<(), Error> { - if n <= self.len() { - // Shift the bits up (starting from the high indices to avoid overwriting) - for i in (n..self.len()).rev() { - self.set(i, self.get(i - n)?)?; - } - // Zero the low bits - for i in 0..n { - self.set(i, false).unwrap(); - } - Ok(()) - } else { - Err(Error::OutOfBounds { - i: n, - len: self.len(), - }) - } - } -} - -/// Returns the minimum required bytes to represent a given number of bits. -/// -/// `bit_len == 0` requires a single byte. -fn bytes_for_bit_len(bit_len: usize) -> usize { - std::cmp::max(1, (bit_len + 7) / 8) -} - -/// An iterator over the bits in a `Bitfield`. -pub struct BitIter<'a, T> { - bitfield: &'a Bitfield, - i: usize, -} - -impl<'a, T: BitfieldBehaviour> Iterator for BitIter<'a, T> { - type Item = bool; - - fn next(&mut self) -> Option { - let res = self.bitfield.get(self.i).ok()?; - self.i += 1; - Some(res) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_bytes_len(&self) -> usize { - // We could likely do better than turning this into bytes and reading the length, however - // it is kept this way for simplicity. - self.clone().into_bytes().len() - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitList failed to decode: {:?}", e)) - }) - } -} - -impl Encode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_bytes_len(&self) -> usize { - self.as_slice().len() - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.extend_from_slice(&self.clone().into_bytes()) - } -} - -impl Decode for Bitfield> { - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - bytes_for_bit_len(N::to_usize()) - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Self::from_bytes(bytes.to_smallvec()).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!("BitVector failed to decode: {:?}", e)) - }) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl Serialize for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&hex_encode(self.as_ssz_bytes())) - } -} - -impl<'de, N: Unsigned + Clone> Deserialize<'de> for Bitfield> { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Self::from_ssz_bytes(&bytes) - .map_err(|e| serde::de::Error::custom(format!("Bitfield {:?}", e))) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - // Note: we use `as_slice` because it does _not_ have the length-delimiting bit set (or - // present). - let root = bitfield_bytes_tree_hash_root::(self.as_slice()); - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl tree_hash::TreeHash for Bitfield> { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - bitfield_bytes_tree_hash_root::(self.as_slice()) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(feature = "arbitrary")] -impl arbitrary::Arbitrary<'_> for Bitfield> { - fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec = smallvec![0u8; size]; - u.fill_buffer(&mut vec)?; - Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod bitvector { - use super::*; - use crate::BitVector; - - pub type BitVector0 = BitVector; - pub type BitVector1 = BitVector; - pub type BitVector4 = BitVector; - pub type BitVector8 = BitVector; - pub type BitVector16 = BitVector; - pub type BitVector64 = BitVector; - - #[test] - fn ssz_encode() { - assert_eq!(BitVector0::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector1::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector4::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!(BitVector8::new().as_ssz_bytes(), vec![0b0000_0000]); - assert_eq!( - BitVector16::new().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000] - ); - - let mut b = BitVector8::new(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255]); - - let mut b = BitVector4::new(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111]); - } - - #[test] - fn ssz_decode() { - assert!(BitVector0::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0001]).is_err()); - assert!(BitVector0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitVector1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0010]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0100]).is_err()); - assert!(BitVector1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[1, 0b0000_0000]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0000, 1]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0001]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0010]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0100]).is_err()); - - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitVector16::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_ok()); - assert!(BitVector16::from_ssz_bytes(&[1, 0b0000_0000, 0b0000_0000]).is_err()); - } - - #[test] - fn intersection() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - - assert_eq!(a.len(), 16); - assert_eq!(b.len(), 16); - assert_eq!(c.len(), 16); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - } - - #[test] - fn union() { - let a = BitVector16::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitVector16::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitVector16::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitVector16::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitVector16::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitVector0::new()); - - let mut b = BitVector1::new(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector8::new(); - for j in 0..8 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitVector16::new(); - for j in 0..16 { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn ssz_bytes_len() { - for i in 0..64 { - let mut bitfield = BitVector64::new(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - #[test] - fn excess_bits_nimbus() { - let bad = vec![0b0001_1111]; - - assert!(BitVector4::from_ssz_bytes(&bad).is_err()); - } - - // Ensure that stack size of a BitVector is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} - -#[cfg(test)] -#[allow(clippy::cognitive_complexity)] -mod bitlist { - use super::*; - use crate::BitList; - - pub type BitList0 = BitList; - pub type BitList1 = BitList; - pub type BitList8 = BitList; - pub type BitList16 = BitList; - pub type BitList1024 = BitList; - - #[test] - fn ssz_encode() { - assert_eq!( - BitList0::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(0).unwrap().as_ssz_bytes(), - vec![0b0000_0001], - ); - - assert_eq!( - BitList1::with_capacity(1).unwrap().as_ssz_bytes(), - vec![0b0000_0010], - ); - - assert_eq!( - BitList8::with_capacity(8).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0001], - ); - - assert_eq!( - BitList8::with_capacity(7).unwrap().as_ssz_bytes(), - vec![0b1000_0000] - ); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..8 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![255, 0b0000_0001]); - - let mut b = BitList8::with_capacity(8).unwrap(); - for i in 0..4 { - b.set(i, true).unwrap(); - } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111, 0b0000_0001]); - - assert_eq!( - BitList16::with_capacity(16).unwrap().as_ssz_bytes(), - vec![0b0000_0000, 0b0000_0000, 0b0000_0001] - ); - } - - #[test] - fn ssz_decode() { - assert!(BitList0::from_ssz_bytes(&[]).is_err()); - assert!(BitList1::from_ssz_bytes(&[]).is_err()); - assert!(BitList8::from_ssz_bytes(&[]).is_err()); - assert!(BitList16::from_ssz_bytes(&[]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0000]).is_err()); - - assert!(BitList0::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList0::from_ssz_bytes(&[0b0000_0010]).is_err()); - - assert!(BitList1::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0100]).is_err()); - - assert!(BitList8::from_ssz_bytes(&[0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0001]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0010]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err()); - } - - #[test] - fn ssz_decode_extra_bytes() { - assert!(BitList0::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList16::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0]).is_err()); - assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0, 0, 0]).is_err()); - } - - #[test] - fn ssz_round_trip() { - assert_round_trip(BitList0::with_capacity(0).unwrap()); - - for i in 0..2 { - assert_round_trip(BitList1::with_capacity(i).unwrap()); - } - for i in 0..9 { - assert_round_trip(BitList8::with_capacity(i).unwrap()); - } - for i in 0..17 { - assert_round_trip(BitList16::with_capacity(i).unwrap()); - } - - let mut b = BitList1::with_capacity(1).unwrap(); - b.set(0, true).unwrap(); - assert_round_trip(b); - - for i in 0..8 { - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList8::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - - for i in 0..16 { - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - if j % 2 == 0 { - b.set(j, true).unwrap(); - } - } - assert_round_trip(b); - - let mut b = BitList16::with_capacity(i).unwrap(); - for j in 0..i { - b.set(j, true).unwrap(); - } - assert_round_trip(b); - } - } - - fn assert_round_trip(t: T) { - assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); - } - - #[test] - fn from_raw_bytes() { - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000], 0).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 1).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 2).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 3).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 4).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 5).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 6).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 7).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 8).is_ok()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 9).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 10).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 11).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 12).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 13).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 14).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 15).is_ok()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 16).is_ok()); - - for i in 0..8 { - assert!(BitList1024::from_raw_bytes(smallvec![], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], i).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1111_1110], i).is_err()); - } - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0001], 0).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0011], 1).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_0111], 2).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0000_1111], 3).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0001_1111], 4).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0011_1111], 5).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b0111_1111], 6).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111], 7).is_err()); - - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0001], 8).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0011], 9).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_0111], 10).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0000_1111], 11).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0001_1111], 12).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0011_1111], 13).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b0111_1111], 14).is_err()); - assert!(BitList1024::from_raw_bytes(smallvec![0b1111_1111, 0b1111_1111], 15).is_err()); - } - - fn test_set_unset(num_bits: usize) { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - - for i in 0..=num_bits { - if i < num_bits { - // Starts as false - assert_eq!(bitfield.get(i), Ok(false)); - // Can be set true. - assert!(bitfield.set(i, true).is_ok()); - assert_eq!(bitfield.get(i), Ok(true)); - // Can be set false - assert!(bitfield.set(i, false).is_ok()); - assert_eq!(bitfield.get(i), Ok(false)); - } else { - assert!(bitfield.get(i).is_err()); - assert!(bitfield.set(i, true).is_err()); - assert!(bitfield.get(i).is_err()); - } - } - } - - fn test_bytes_round_trip(num_bits: usize) { - for i in 0..num_bits { - let mut bitfield = BitList1024::with_capacity(num_bits).unwrap(); - bitfield.set(i, true).unwrap(); - - let bytes = bitfield.clone().into_raw_bytes(); - assert_eq!(bitfield, Bitfield::from_raw_bytes(bytes, num_bits).unwrap()); - } - } - - #[test] - fn set_unset() { - for i in 0..8 * 5 { - test_set_unset(i) - } - } - - #[test] - fn bytes_round_trip() { - for i in 0..8 * 5 { - test_bytes_round_trip(i) - } - } - - /// Type-specialised `smallvec` macro for testing. - macro_rules! bytevec { - ($($x : expr),* $(,)*) => { - { - let __smallvec: SmallVec<[u8; SMALLVEC_LEN]> = smallvec!($($x),*); - __smallvec - } - }; - } - - #[test] - fn into_raw_bytes() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(0, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0001, 0b0000_0000] - ); - bitfield.set(1, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0011, 0b0000_0000] - ); - bitfield.set(2, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_0111, 0b0000_0000] - ); - bitfield.set(3, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0000_1111, 0b0000_0000] - ); - bitfield.set(4, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0001_1111, 0b0000_0000] - ); - bitfield.set(5, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0011_1111, 0b0000_0000] - ); - bitfield.set(6, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b0111_1111, 0b0000_0000] - ); - bitfield.set(7, true).unwrap(); - assert_eq!( - bitfield.clone().into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0000] - ); - bitfield.set(8, true).unwrap(); - assert_eq!( - bitfield.into_raw_bytes(), - bytevec![0b1111_1111, 0b0000_0001] - ); - } - - #[test] - fn highest_set_bit() { - assert_eq!( - BitList1024::with_capacity(16).unwrap().highest_set_bit(), - None - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0001, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(0) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0010, 0b0000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(1) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_1000], 8) - .unwrap() - .highest_set_bit(), - Some(3) - ); - - assert_eq!( - BitList1024::from_raw_bytes(smallvec![0b0000_0000, 0b1000_0000], 16) - .unwrap() - .highest_set_bit(), - Some(15) - ); - } - - #[test] - fn intersection() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1000, 0b0001], 16).unwrap(); - - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn intersection_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1110, 0b0010_1011]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0010_1101, 0b0000_0001]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1100, 0b0000_0001]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap(); - - assert_eq!(a.len(), 13); - assert_eq!(b.len(), 8); - assert_eq!(c.len(), 8); - assert_eq!(d.len(), 23); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&d), a); - assert_eq!(d.intersection(&a), a); - } - - #[test] - fn union() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let c = BitList1024::from_raw_bytes(smallvec![0b1111, 0b1001], 16).unwrap(); - - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn union_diff_length() { - let a = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1110]).unwrap(); - let b = BitList1024::from_bytes(smallvec![0b0000_0001, 0b0010_1101]).unwrap(); - let c = BitList1024::from_bytes(smallvec![0b0010_1011, 0b0010_1111]).unwrap(); - let d = BitList1024::from_bytes(smallvec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap(); - - assert_eq!(a.len(), c.len()); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&d), d); - assert_eq!(d.union(&a), d); - } - - #[test] - fn difference() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0000], 16).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b0011, 0b1000], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - assert!(a.difference(&a).is_zero()); - } - - #[test] - fn difference_diff_length() { - let a = BitList1024::from_raw_bytes(smallvec![0b0110, 0b1100, 0b0011], 24).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - let a_b = BitList1024::from_raw_bytes(smallvec![0b0100, 0b0100, 0b0011], 24).unwrap(); - let b_a = BitList1024::from_raw_bytes(smallvec![0b1001, 0b0001], 16).unwrap(); - - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - } - - #[test] - fn shift_up() { - let mut a = BitList1024::from_raw_bytes(smallvec![0b1100_1111, 0b1101_0110], 16).unwrap(); - let mut b = BitList1024::from_raw_bytes(smallvec![0b1001_1110, 0b1010_1101], 16).unwrap(); - - a.shift_up(1).unwrap(); - assert_eq!(a, b); - a.shift_up(15).unwrap(); - assert!(a.is_zero()); - - b.shift_up(16).unwrap(); - assert!(b.is_zero()); - assert!(b.shift_up(17).is_err()); - } - - #[test] - fn num_set_bits() { - let a = BitList1024::from_raw_bytes(smallvec![0b1100, 0b0001], 16).unwrap(); - let b = BitList1024::from_raw_bytes(smallvec![0b1011, 0b1001], 16).unwrap(); - - assert_eq!(a.num_set_bits(), 3); - assert_eq!(b.num_set_bits(), 5); - } - - #[test] - fn iter() { - let mut bitfield = BitList1024::with_capacity(9).unwrap(); - bitfield.set(2, true).unwrap(); - bitfield.set(8, true).unwrap(); - - assert_eq!( - bitfield.iter().collect::>(), - vec![false, false, true, false, false, false, false, false, true] - ); - } - - #[test] - fn ssz_bytes_len() { - for i in 1..64 { - let mut bitfield = BitList1024::with_capacity(i).unwrap(); - for j in 0..i { - bitfield.set(j, true).expect("should set bit in bounds"); - } - let bytes = bitfield.as_ssz_bytes(); - assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); - } - } - - // Ensure that the stack size of a BitList is manageable. - #[test] - fn size_of() { - assert_eq!(std::mem::size_of::(), SMALLVEC_LEN + 24); - } -} diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs deleted file mode 100644 index 9625f27f3..000000000 --- a/consensus/ssz_types/src/fixed_vector.rs +++ /dev/null @@ -1,446 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `Vector` (distinct from a Rust `Vec`). -/// -/// An ordered, heap-allocated, fixed-length, homogeneous collection of `T`, with `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Note -/// -/// Whilst it is possible with this library, SSZ declares that a `FixedVector` with a length of `0` -/// is illegal. -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{FixedVector, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `FixedVector` from a `Vec` that has the expected length. -/// let exact: FixedVector<_, typenum::U4> = FixedVector::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `FixedVector` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: FixedVector<_, typenum::U3> = FixedVector::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `FixedVector` from a `Vec` that is too short and the missing values are created -/// // using `std::default::Default`. -/// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct FixedVector { - vec: Vec, - _phantom: PhantomData, -} - -impl FixedVector { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err`. - pub fn new(vec: Vec) -> Result { - if vec.len() == Self::capacity() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::capacity(), - }) - } - } - - /// Create a new vector filled with clones of `elem`. - pub fn from_elem(elem: T) -> Self - where - T: Clone, - { - Self { - vec: vec![elem; N::to_usize()], - _phantom: PhantomData, - } - } - - /// Identical to `self.capacity`, returns the type-level constant length. - /// - /// Exists for compatibility with `Vec`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if the type-level constant length of `self` is zero. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level constant length. - pub fn capacity() -> usize { - N::to_usize() - } -} - -impl From> for FixedVector { - fn from(mut vec: Vec) -> Self { - vec.resize_with(Self::capacity(), Default::default); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(vector: FixedVector) -> Vec { - vector.vec - } -} - -impl Default for FixedVector { - fn default() -> Self { - Self { - vec: (0..N::to_usize()).map(|_| T::default()).collect(), - _phantom: PhantomData, - } - } -} - -impl> Index for FixedVector { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for FixedVector { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for FixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -// This implementation is required to use `get_mut` to access elements. -// -// It's safe because none of the methods on mutable slices allow changing the length -// of the backing vec. -impl DerefMut for FixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl tree_hash::TreeHash for FixedVector -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - vec_tree_hash_root::(&self.vec) - } -} - -impl ssz::Encode for FixedVector -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * self.len()); - - for item in &self.vec { - item.ssz_append(buf); - } - } else { - let mut encoder = - ssz::SszEncoder::container(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); - - for item in &self.vec { - encoder.append(item); - } - - encoder.finalize(); - } - } -} - -impl ssz::Decode for FixedVector -where - T: ssz::Decode, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let fixed_len = N::to_usize(); - - if bytes.is_empty() { - Err(ssz::DecodeError::InvalidByteLength { - len: 0, - expected: 1, - }) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items != fixed_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "FixedVector of {} items has {} items", - num_items, fixed_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .map(|chunk| T::from_ssz_bytes(chunk)) - .collect::, _>>() - .and_then(|vec| { - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - }) - } else { - let vec = ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len))?; - Self::new(vec).map_err(|e| { - ssz::DecodeError::BytesInvalid(format!( - "Wrong number of FixedVector elements: {:?}", - e - )) - }) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for FixedVector -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let size = N::to_usize(); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 4]; - let fixed: Result, _> = FixedVector::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: FixedVector = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 8192); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); - - let vec = vec![]; - let fixed: FixedVector = FixedVector::from(vec); - assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: FixedVector = FixedVector::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn ssz_encode() { - let vec: FixedVector = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn ssz_round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn ssz_round_trip_u16_len_8() { - ssz_round_trip::>(vec![42; 8].into()); - ssz_round_trip::>(vec![0; 8].into()); - } - - #[test] - fn tree_hash_u8() { - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 1]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![0; 8]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); - - let fixed: FixedVector = FixedVector::from(vec![42; 16]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[42; 16], 0)); - - let source: Vec = (0..16).collect(); - let fixed: FixedVector = FixedVector::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), merkle_root(&source, 0)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: FixedVector = FixedVector::from(vec![]); - assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 32], 0)); - - let fixed: FixedVector = FixedVector::from(vec![a]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(a.tree_hash_root().as_bytes(), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 8]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 8), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 13]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 13), 0) - ); - - let fixed: FixedVector = FixedVector::from(vec![a; 16]); - assert_eq!( - fixed.tree_hash_root(), - merkle_root(&repeat(a.tree_hash_root().as_bytes(), 16), 0) - ); - } -} diff --git a/consensus/ssz_types/src/lib.rs b/consensus/ssz_types/src/lib.rs deleted file mode 100644 index 3e181da8c..000000000 --- a/consensus/ssz_types/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Provides types with unique properties required for SSZ serialization and Merklization: -//! -//! - `FixedVector`: A heap-allocated list with a size that is fixed at compile time. -//! - `VariableList`: A heap-allocated list that cannot grow past a type-level maximum length. -//! - `BitList`: A heap-allocated bitfield that with a type-level _maximum_ length. -//! - `BitVector`: A heap-allocated bitfield that with a type-level _fixed__ length. -//! -//! These structs are required as SSZ serialization and Merklization rely upon type-level lengths -//! for padding and verification. -//! -//! Adheres to the Ethereum 2.0 [SSZ -//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md) -//! at v0.12.1. -//! -//! ## Example -//! ``` -//! use ssz_types::*; -//! -//! pub struct Example { -//! bit_vector: BitVector, -//! bit_list: BitList, -//! variable_list: VariableList, -//! fixed_vector: FixedVector, -//! } -//! -//! let mut example = Example { -//! bit_vector: Bitfield::new(), -//! bit_list: Bitfield::with_capacity(4).unwrap(), -//! variable_list: <_>::from(vec![0, 1]), -//! fixed_vector: <_>::from(vec![2, 3]), -//! }; -//! -//! assert_eq!(example.bit_vector.len(), 8); -//! assert_eq!(example.bit_list.len(), 4); -//! assert_eq!(&example.variable_list[..], &[0, 1]); -//! assert_eq!(&example.fixed_vector[..], &[2, 3, 0, 0, 0, 0, 0, 0]); -//! -//! ``` - -#[macro_use] -mod bitfield; -mod fixed_vector; -pub mod serde_utils; -mod tree_hash; -mod variable_list; - -pub use bitfield::{BitList, BitVector, Bitfield}; -pub use fixed_vector::FixedVector; -pub use typenum; -pub use variable_list::VariableList; - -pub mod length { - pub use crate::bitfield::{Fixed, Variable}; -} - -/// Returned when an item encounters an error. -#[derive(PartialEq, Debug, Clone)] -pub enum Error { - OutOfBounds { - i: usize, - len: usize, - }, - /// A `BitList` does not have a set bit, therefore it's length is unknowable. - MissingLengthInformation, - /// A `BitList` has excess bits set to true. - ExcessBits, - /// A `BitList` has an invalid number of bytes for a given bit length. - InvalidByteCount { - given: usize, - expected: usize, - }, -} diff --git a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs deleted file mode 100644 index 86077891b..000000000 --- a/consensus/ssz_types/src/serde_utils/hex_fixed_vec.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::FixedVector; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &FixedVector, serializer: S) -> Result -where - S: Serializer, - U: Unsigned, -{ - serializer.serialize_str(&hex::encode(&bytes[..])) -} - -pub fn deserialize<'de, D, U>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - U: Unsigned, -{ - let vec = deserializer.deserialize_string(PrefixedHexVisitor)?; - FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("invalid fixed vector: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/hex_var_list.rs b/consensus/ssz_types/src/serde_utils/hex_var_list.rs deleted file mode 100644 index e3a3a14e0..000000000 --- a/consensus/ssz_types/src/serde_utils/hex_var_list.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Serialize `VariableList` as 0x-prefixed hex string. -use crate::VariableList; -use eth2_serde_utils::hex::{self, PrefixedHexVisitor}; -use serde::{Deserializer, Serializer}; -use typenum::Unsigned; - -pub fn serialize(bytes: &VariableList, serializer: S) -> Result -where - S: Serializer, - N: Unsigned, -{ - serializer.serialize_str(&hex::encode(&**bytes)) -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - VariableList::new(bytes) - .map_err(|e| serde::de::Error::custom(format!("invalid variable list: {:?}", e))) -} diff --git a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs b/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs deleted file mode 100644 index e2fd8ddf3..000000000 --- a/consensus/ssz_types/src/serde_utils/list_of_hex_var_list.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Serialize `VaraibleList, N>` as list of 0x-prefixed hex string. -use crate::VariableList; -use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -#[derive(Deserialize)] -#[serde(transparent)] -pub struct WrappedListOwned( - #[serde(with = "crate::serde_utils::hex_var_list")] VariableList, -); - -#[derive(Serialize)] -#[serde(transparent)] -pub struct WrappedListRef<'a, N: Unsigned>( - #[serde(with = "crate::serde_utils::hex_var_list")] &'a VariableList, -); - -pub fn serialize( - list: &VariableList, N>, - serializer: S, -) -> Result -where - S: Serializer, - M: Unsigned, - N: Unsigned, -{ - let mut seq = serializer.serialize_seq(Some(list.len()))?; - for bytes in list { - seq.serialize_element(&WrappedListRef(bytes))?; - } - seq.end() -} - -#[derive(Default)] -pub struct Visitor { - _phantom_m: PhantomData, - _phantom_n: PhantomData, -} - -impl<'a, M, N> serde::de::Visitor<'a> for Visitor -where - M: Unsigned, - N: Unsigned, -{ - type Value = VariableList, N>; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of 0x-prefixed hex bytes") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let mut list: VariableList, N> = <_>::default(); - - while let Some(val) = seq.next_element::>()? { - list.push(val.0).map_err(|e| { - serde::de::Error::custom(format!("failed to push value to list: {:?}.", e)) - })?; - } - - Ok(list) - } -} - -pub fn deserialize<'de, D, M, N>( - deserializer: D, -) -> Result, N>, D::Error> -where - D: Deserializer<'de>, - M: Unsigned, - N: Unsigned, -{ - deserializer.deserialize_seq(Visitor::default()) -} diff --git a/consensus/ssz_types/src/serde_utils/mod.rs b/consensus/ssz_types/src/serde_utils/mod.rs deleted file mode 100644 index 4417f5ac5..000000000 --- a/consensus/ssz_types/src/serde_utils/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod hex_fixed_vec; -pub mod hex_var_list; -pub mod list_of_hex_fixed_vec; -pub mod list_of_hex_var_list; -pub mod quoted_u64_fixed_vec; -pub mod quoted_u64_var_list; diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs deleted file mode 100644 index 0eb265adc..000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_fixed_vec.rs +++ /dev/null @@ -1,113 +0,0 @@ -//! Formats `FixedVector` using quotes. -//! -//! E.g., `FixedVector::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If `N` does not equal the length deserialization will fail. - -use crate::serde_utils::quoted_u64_var_list::deserialize_max; -use crate::FixedVector; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntFixedVecVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntFixedVecVisitor -where - N: Unsigned, -{ - type Value = FixedVector; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let fix: FixedVector = FixedVector::new(vec) - .map_err(|e| serde::de::Error::custom(format!("FixedVector: {:?}", e)))?; - Ok(fix) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntFixedVecVisitor { - _phantom: PhantomData, - }) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_fixed_vec")] - values: FixedVector, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: FixedVector = FixedVector::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_err() { - serde_json::from_str::(r#"{ "values": [] }"#).unwrap_err(); - } - - #[test] - fn short_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2] }"#).unwrap_err(); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs b/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs deleted file mode 100644 index 9e176b635..000000000 --- a/consensus/ssz_types/src/serde_utils/quoted_u64_var_list.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! Formats `VariableList` using quotes. -//! -//! E.g., `VariableList::from(vec![0, 1, 2])` serializes as `["0", "1", "2"]`. -//! -//! Quotes can be optional during decoding. If the length of the `Vec` is greater than `N`, deserialization fails. - -use crate::VariableList; -use eth2_serde_utils::quoted_u64_vec::QuotedIntWrapper; -use serde::ser::SerializeSeq; -use serde::{Deserializer, Serializer}; -use std::marker::PhantomData; -use typenum::Unsigned; - -pub struct QuotedIntVarListVisitor { - _phantom: PhantomData, -} - -impl<'a, N> serde::de::Visitor<'a> for QuotedIntVarListVisitor -where - N: Unsigned, -{ - type Value = VariableList; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a list of quoted or unquoted integers") - } - - fn visit_seq(self, seq: A) -> Result - where - A: serde::de::SeqAccess<'a>, - { - let vec = deserialize_max(seq, N::to_usize())?; - let list: VariableList = VariableList::new(vec) - .map_err(|e| serde::de::Error::custom(format!("VariableList: {:?}", e)))?; - Ok(list) - } -} - -pub fn serialize(value: &[u64], serializer: S) -> Result -where - S: Serializer, -{ - let mut seq = serializer.serialize_seq(Some(value.len()))?; - for &int in value { - seq.serialize_element(&QuotedIntWrapper { int })?; - } - seq.end() -} - -pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, - N: Unsigned, -{ - deserializer.deserialize_any(QuotedIntVarListVisitor { - _phantom: PhantomData, - }) -} - -/// Returns a `Vec` of no more than `max_items` length. -pub(crate) fn deserialize_max<'a, A>(mut seq: A, max_items: usize) -> Result, A::Error> -where - A: serde::de::SeqAccess<'a>, -{ - let mut vec = vec![]; - let mut counter = 0; - - while let Some(val) = seq.next_element()? { - let val: QuotedIntWrapper = val; - counter += 1; - if counter > max_items { - return Err(serde::de::Error::custom(format!( - "Deserialization failed. Length cannot be greater than {}.", - max_items - ))); - } - - vec.push(val.int); - } - - Ok(vec) -} - -#[cfg(test)] -mod test { - use super::*; - use serde_derive::{Deserialize, Serialize}; - use typenum::U4; - - #[derive(Debug, Serialize, Deserialize)] - struct Obj { - #[serde(with = "crate::serde_utils::quoted_u64_var_list")] - values: VariableList, - } - - #[test] - fn quoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", "2", "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn unquoted_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2, 3, 4] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn mixed_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": ["1", 2, "3", "4"] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2, 3, 4]); - assert_eq!(obj.values, expected); - } - - #[test] - fn empty_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [] }"#).unwrap(); - assert!(obj.values.is_empty()); - } - - #[test] - fn short_list_success() { - let obj: Obj = serde_json::from_str(r#"{ "values": [1, 2] }"#).unwrap(); - let expected: VariableList = VariableList::from(vec![1, 2]); - assert_eq!(obj.values, expected); - } - - #[test] - fn long_list_err() { - serde_json::from_str::(r#"{ "values": [1, 2, 3, 4, 5] }"#).unwrap_err(); - } - - #[test] - fn whole_list_quoted_err() { - serde_json::from_str::(r#"{ "values": "[1, 2, 3, 4]" }"#).unwrap_err(); - } -} diff --git a/consensus/ssz_types/src/tree_hash.rs b/consensus/ssz_types/src/tree_hash.rs deleted file mode 100644 index e08c1d62f..000000000 --- a/consensus/ssz_types/src/tree_hash.rs +++ /dev/null @@ -1,58 +0,0 @@ -use tree_hash::{Hash256, MerkleHasher, TreeHash, TreeHashType, BYTES_PER_CHUNK}; -use typenum::Unsigned; - -/// A helper function providing common functionality between the `TreeHash` implementations for -/// `FixedVector` and `VariableList`. -pub fn vec_tree_hash_root(vec: &[T]) -> Hash256 -where - T: TreeHash, - N: Unsigned, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = MerkleHasher::with_leaves( - (N::to_usize() + T::tree_hash_packing_factor() - 1) / T::tree_hash_packing_factor(), - ); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(N::to_usize()); - - for item in vec { - hasher - .write(item.tree_hash_root().as_bytes()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -/// A helper function providing common functionality for finding the Merkle root of some bytes that -/// represent a bitfield. -pub fn bitfield_bytes_tree_hash_root(bytes: &[u8]) -> Hash256 { - let byte_size = (N::to_usize() + 7) / 8; - let leaf_count = (byte_size + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK; - - let mut hasher = MerkleHasher::with_leaves(leaf_count); - - hasher - .write(bytes) - .expect("bitfield should not exceed tree hash leaf limit"); - - hasher - .finish() - .expect("bitfield tree hash buffer should not exceed leaf limit") -} diff --git a/consensus/ssz_types/src/variable_list.rs b/consensus/ssz_types/src/variable_list.rs deleted file mode 100644 index 3361f7509..000000000 --- a/consensus/ssz_types/src/variable_list.rs +++ /dev/null @@ -1,477 +0,0 @@ -use crate::tree_hash::vec_tree_hash_root; -use crate::Error; -use derivative::Derivative; -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, DerefMut, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::Hash256; -use typenum::Unsigned; - -pub use typenum; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `N` values. -/// -/// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a -/// fixed number of elements and you may not add or remove elements, only modify. -/// -/// The length of this struct is fixed at the type-level using -/// [typenum](https://crates.io/crates/typenum). -/// -/// ## Example -/// -/// ``` -/// use ssz_types::{VariableList, typenum}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `VariableList` from a `Vec` that has the expected length. -/// let exact: VariableList<_, typenum::U4> = VariableList::from(base.clone()); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `VariableList` from a `Vec` that is too long and the `Vec` is truncated. -/// let short: VariableList<_, typenum::U3> = VariableList::from(base.clone()); -/// assert_eq!(&short[..], &[1, 2, 3]); -/// -/// // Create a `VariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: VariableList<_, typenum::U5> = VariableList::from(base); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct VariableList { - vec: Vec, - _phantom: PhantomData, -} - -impl VariableList { - /// Returns `Some` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `None`. - pub fn new(vec: Vec) -> Result { - if vec.len() <= N::to_usize() { - Ok(Self { - vec, - _phantom: PhantomData, - }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: Self::max_len(), - }) - } - } - - /// Create an empty list. - pub fn empty() -> Self { - Self { - vec: vec![], - _phantom: PhantomData, - } - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - pub fn max_len() -> usize { - N::to_usize() - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < Self::max_len() { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len() + 1, - len: Self::max_len(), - }) - } - } -} - -impl From> for VariableList { - fn from(mut vec: Vec) -> Self { - vec.truncate(N::to_usize()); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl From> for Vec { - fn from(list: VariableList) -> Vec { - list.vec - } -} - -impl Default for VariableList { - fn default() -> Self { - Self { - vec: Vec::default(), - _phantom: PhantomData, - } - } -} - -impl> Index for VariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for VariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for VariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl DerefMut for VariableList { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for VariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl tree_hash::TreeHash for VariableList -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = vec_tree_hash_root::(&self.vec); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -impl ssz::Encode for VariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } -} - -impl ssz::Decode for VariableList -where - T: ssz::Decode, - N: Unsigned, -{ - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let max_len = N::to_usize(); - - if bytes.is_empty() { - Ok(vec![].into()) - } else if T::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(T::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "VariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes - .chunks(T::ssz_fixed_len()) - .try_fold(Vec::with_capacity(num_items), |mut vec, chunk| { - vec.push(T::from_ssz_bytes(chunk)?); - Ok(vec) - }) - .map(Into::into) - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len)) - .map(|vec: Vec<_>| vec.into()) - } - } -} - -#[cfg(feature = "arbitrary")] -impl<'a, T: arbitrary::Arbitrary<'a>, N: 'static + Unsigned> arbitrary::Arbitrary<'a> - for VariableList -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let max_size = N::to_usize(); - let rand = usize::arbitrary(u)?; - let size = std::cmp::min(rand, max_size); - let mut vec: Vec = Vec::with_capacity(size); - for _ in 0..size { - vec.push(::arbitrary(u)?); - } - Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use tree_hash::{merkle_root, TreeHash}; - use tree_hash_derive::TreeHash; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = VariableList::new(vec); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: VariableList = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((fixed[..]).len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: VariableList = VariableList::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: VariableList = VariableList::from(vec); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: VariableList = VariableList::from(vec); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: VariableList = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!(T::from_ssz_bytes(encoded), Ok(item)); - } - - #[test] - fn u16_len_8() { - round_trip::>(vec![42; 8].into()); - round_trip::>(vec![0; 8].into()); - } - - fn root_with_length(bytes: &[u8], len: usize) -> Hash256 { - let root = merkle_root(bytes, 0); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_u8() { - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&[0; 8], 0)); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![0; i]); - assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); - } - - let source: Vec = (0..16).collect(); - let fixed: VariableList = VariableList::from(source.clone()); - assert_eq!(fixed.tree_hash_root(), root_with_length(&source, 16)); - } - - #[derive(Clone, Copy, TreeHash, Default)] - struct A { - a: u32, - b: u32, - } - - fn repeat(input: &[u8], n: usize) -> Vec { - let mut output = vec![]; - - for _ in 0..n { - output.append(&mut input.to_vec()); - } - - output - } - - fn padded_root_with_length(bytes: &[u8], len: usize, min_nodes: usize) -> Hash256 { - let root = merkle_root(bytes, min_nodes); - tree_hash::mix_in_length(&root, len) - } - - #[test] - fn tree_hash_composite() { - let a = A { a: 0, b: 1 }; - - let fixed: VariableList = VariableList::from(vec![]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&[0; 32], 0, 0), - ); - - for i in 0..=1 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 1), - "U1 {}", - i - ); - } - - for i in 0..=8 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 8), - "U8 {}", - i - ); - } - - for i in 0..=13 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 13), - "U13 {}", - i - ); - } - - for i in 0..=16 { - let fixed: VariableList = VariableList::from(vec![a; i]); - assert_eq!( - fixed.tree_hash_root(), - padded_root_with_length(&repeat(a.tree_hash_root().as_bytes(), i), i, 16), - "U16 {}", - i - ); - } - } -} diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index ccb41830b..c16742782 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -13,15 +13,15 @@ tokio = { version = "1.14.0", features = ["rt-multi-thread"] } bls = { path = "../../crypto/bls" } integer-sqrt = "0.1.5" itertools = "0.10.0" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -eth2_ssz_types = "0.2.2" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +ssz_types = "0.5.0" merkle_proof = { path = "../merkle_proof" } safe_arith = { path = "../safe_arith" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" types = { path = "../types", default-features = false } rayon = "1.4.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" int_to_bytes = { path = "../int_to_bytes" } smallvec = "1.6.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } @@ -39,7 +39,7 @@ arbitrary-fuzz = [ "types/arbitrary-fuzz", "bls/arbitrary", "merkle_proof/arbitrary", - "eth2_ssz/arbitrary", - "eth2_ssz_types/arbitrary", + "ethereum_ssz/arbitrary", + "ssz_types/arbitrary", "tree_hash/arbitrary", ] diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index cc7bd17c5..ed5e64294 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -29,7 +29,7 @@ pub struct BlockReplayer< > { state: BeaconState, spec: &'a ChainSpec, - state_root_strategy: StateRootStrategy, + state_processing_strategy: StateProcessingStrategy, block_sig_strategy: BlockSignatureStrategy, verify_block_root: Option, pre_block_hook: Option>, @@ -60,13 +60,13 @@ impl From for BlockReplayError { } } -/// Defines how state roots should be computed during block replay. -#[derive(PartialEq)] -pub enum StateRootStrategy { +/// Defines how state roots should be computed and whether to perform all state transitions during block replay. +#[derive(PartialEq, Clone, Copy)] +pub enum StateProcessingStrategy { /// Perform all transitions faithfully to the specification. Accurate, - /// Don't compute state roots, eventually computing an invalid beacon state that can only be - /// used for obtaining shuffling. + /// Don't compute state roots and process withdrawals, eventually computing an invalid beacon + /// state that can only be used for obtaining shuffling. Inconsistent, } @@ -87,7 +87,7 @@ where Self { state, spec, - state_root_strategy: StateRootStrategy::Accurate, + state_processing_strategy: StateProcessingStrategy::Accurate, block_sig_strategy: BlockSignatureStrategy::VerifyBulk, verify_block_root: Some(VerifyBlockRoot::True), pre_block_hook: None, @@ -100,12 +100,15 @@ where } } - /// Set the replayer's state root strategy different from the default. - pub fn state_root_strategy(mut self, state_root_strategy: StateRootStrategy) -> Self { - if state_root_strategy == StateRootStrategy::Inconsistent { + /// Set the replayer's state processing strategy different from the default. + pub fn state_processing_strategy( + mut self, + state_processing_strategy: StateProcessingStrategy, + ) -> Self { + if state_processing_strategy == StateProcessingStrategy::Inconsistent { self.verify_block_root = None; } - self.state_root_strategy = state_root_strategy; + self.state_processing_strategy = state_processing_strategy; self } @@ -182,7 +185,7 @@ where i: usize, ) -> Result, Error> { // If we don't care about state roots then return immediately. - if self.state_root_strategy == StateRootStrategy::Inconsistent { + if self.state_processing_strategy == StateProcessingStrategy::Inconsistent { return Ok(Some(Hash256::zero())); } @@ -249,7 +252,7 @@ where // If no explicit policy is set, verify only the first 1 or 2 block roots if using // accurate state roots. Inaccurate state roots require block root verification to // be off. - if i <= 1 && self.state_root_strategy == StateRootStrategy::Accurate { + if i <= 1 && self.state_processing_strategy == StateProcessingStrategy::Accurate { VerifyBlockRoot::True } else { VerifyBlockRoot::False @@ -263,6 +266,7 @@ where &mut self.state, block, self.block_sig_strategy, + self.state_processing_strategy, verify_block_root, &mut ctxt, self.spec, diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index aaad96fbd..2e86556b0 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -1,4 +1,4 @@ -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; use safe_arith::SafeArith; diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 9641e8f96..7340206a3 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -27,7 +27,7 @@ pub mod state_advance; pub mod upgrade; pub mod verify_operation; -pub use block_replayer::{BlockReplayError, BlockReplayer, StateRootStrategy}; +pub use block_replayer::{BlockReplayError, BlockReplayer, StateProcessingStrategy}; pub use consensus_context::{ConsensusContext, ContextError}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index d81665dbc..53bfbe306 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -41,6 +41,7 @@ mod verify_exit; mod verify_proposer_slashing; use crate::common::decrease_balance; +use crate::StateProcessingStrategy; #[cfg(feature = "arbitrary-fuzz")] use arbitrary::Arbitrary; @@ -98,6 +99,7 @@ pub fn per_block_processing>( state: &mut BeaconState, signed_block: &SignedBeaconBlock, block_signature_strategy: BlockSignatureStrategy, + state_processing_strategy: StateProcessingStrategy, verify_block_root: VerifyBlockRoot, ctxt: &mut ConsensusContext, spec: &ChainSpec, @@ -162,7 +164,9 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let payload = block.body().execution_payload()?; - process_withdrawals::(state, payload, spec)?; + if state_processing_strategy == StateProcessingStrategy::Accurate { + process_withdrawals::(state, payload, spec)?; + } process_execution_payload::(state, payload, spec)?; } diff --git a/consensus/state_processing/src/per_block_processing/deneb/deneb.rs b/consensus/state_processing/src/per_block_processing/deneb/deneb.rs index 8ab213a4d..aacb6b83f 100644 --- a/consensus/state_processing/src/per_block_processing/deneb/deneb.rs +++ b/consensus/state_processing/src/per_block_processing/deneb/deneb.rs @@ -1,5 +1,5 @@ use crate::{BlockProcessingError, ConsensusContext}; -use eth2_hashing::hash_fixed; +use ethereum_hashing::hash_fixed; use itertools::{EitherOrBoth, Itertools}; use safe_arith::SafeArith; use ssz::Decode; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 63f71915f..81641b590 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -1,11 +1,11 @@ #![cfg(all(test, not(feature = "fake_crypto")))] -use crate::per_block_processing; use crate::per_block_processing::errors::{ AttestationInvalid, AttesterSlashingInvalid, BlockOperationError, BlockProcessingError, DepositInvalid, HeaderInvalid, IndexedAttestationInvalid, IntoWithIndex, ProposerSlashingInvalid, }; +use crate::{per_block_processing, StateProcessingStrategy}; use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures, @@ -72,6 +72,7 @@ async fn valid_block_ok() { &mut state, &block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -97,6 +98,7 @@ async fn invalid_block_header_state_slot() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -129,6 +131,7 @@ async fn invalid_parent_block_root() { &mut state, &SignedBeaconBlock::from_block(block, signature), BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -162,6 +165,7 @@ async fn invalid_block_signature() { &mut state, &SignedBeaconBlock::from_block(block, Signature::empty()), BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, @@ -195,6 +199,7 @@ async fn invalid_randao_reveal_signature() { &mut state, &signed_block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &spec, diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index bb2679925..731a82aa9 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -1,7 +1,7 @@ use super::errors::{BlockOperationError, BlsExecutionChangeInvalid as Invalid}; use crate::per_block_processing::signature_sets::bls_execution_change_signature_set; use crate::VerifySignatures; -use eth2_hashing::hash; +use ethereum_hashing::hash; use types::*; type Result = std::result::Result>; diff --git a/consensus/swap_or_not_shuffle/Cargo.toml b/consensus/swap_or_not_shuffle/Cargo.toml index 9a7d58b77..303e5cfba 100644 --- a/consensus/swap_or_not_shuffle/Cargo.toml +++ b/consensus/swap_or_not_shuffle/Cargo.toml @@ -12,7 +12,7 @@ harness = false criterion = "0.3.3" [dependencies] -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" [features] diff --git a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs index f43edfe86..e71f3ca18 100644 --- a/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs +++ b/consensus/swap_or_not_shuffle/src/compute_shuffled_index.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::{Context, Sha256Context}; +use ethereum_hashing::{Context, Sha256Context}; use std::cmp::max; /// Return `p(index)` in a pseudorandom permutation `p` of `0...list_size-1` with ``seed`` as entropy. diff --git a/consensus/swap_or_not_shuffle/src/shuffle_list.rs b/consensus/swap_or_not_shuffle/src/shuffle_list.rs index edc6dd637..2b9a25655 100644 --- a/consensus/swap_or_not_shuffle/src/shuffle_list.rs +++ b/consensus/swap_or_not_shuffle/src/shuffle_list.rs @@ -1,5 +1,5 @@ use crate::Hash256; -use eth2_hashing::hash_fixed; +use ethereum_hashing::hash_fixed; use std::mem; const SEED_SIZE: usize = 32; diff --git a/consensus/tree_hash/Cargo.toml b/consensus/tree_hash/Cargo.toml deleted file mode 100644 index b2630d4bf..000000000 --- a/consensus/tree_hash/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "tree_hash" -version = "0.4.1" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Efficient Merkle-hashing as used in Ethereum 2.0" - -[dev-dependencies] -rand = "0.8.5" -tree_hash_derive = "0.4.0" -types = { path = "../types" } -beacon_chain = { path = "../../beacon_node/beacon_chain" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" - -[dependencies] -ethereum-types = "0.14.1" -eth2_hashing = "0.3.0" -smallvec = "1.6.1" - -[features] -arbitrary = ["ethereum-types/arbitrary"] diff --git a/consensus/tree_hash/examples/flamegraph_beacon_state.rs b/consensus/tree_hash/examples/flamegraph_beacon_state.rs deleted file mode 100644 index e5b505bb9..000000000 --- a/consensus/tree_hash/examples/flamegraph_beacon_state.rs +++ /dev/null @@ -1,50 +0,0 @@ -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use types::{BeaconState, EthSpec, MainnetEthSpec}; - -const TREE_HASH_LOOPS: usize = 1_000; -const VALIDATOR_COUNT: usize = 1_000; - -fn get_harness() -> BeaconChainHarness> { - let harness = BeaconChainHarness::builder(T::default()) - .default_spec() - .deterministic_keypairs(VALIDATOR_COUNT) - .fresh_ephemeral_store() - .build(); - - harness.advance_slot(); - - harness -} - -fn build_state() -> BeaconState { - let state = get_harness::().chain.head_beacon_state_cloned(); - - assert_eq!(state.as_base().unwrap().validators.len(), VALIDATOR_COUNT); - assert_eq!(state.as_base().unwrap().balances.len(), VALIDATOR_COUNT); - assert!(state - .as_base() - .unwrap() - .previous_epoch_attestations - .is_empty()); - assert!(state - .as_base() - .unwrap() - .current_epoch_attestations - .is_empty()); - assert!(state.as_base().unwrap().eth1_data_votes.is_empty()); - assert!(state.as_base().unwrap().historical_roots.is_empty()); - - state -} - -fn main() { - let state = build_state::(); - - // This vec is an attempt to ensure the compiler doesn't optimize-out the hashing. - let mut vec = Vec::with_capacity(TREE_HASH_LOOPS); - - for _ in 0..TREE_HASH_LOOPS { - let root = state.canonical_root(); - vec.push(root[0]); - } -} diff --git a/consensus/tree_hash/src/impls.rs b/consensus/tree_hash/src/impls.rs deleted file mode 100644 index 134be4021..000000000 --- a/consensus/tree_hash/src/impls.rs +++ /dev/null @@ -1,241 +0,0 @@ -use super::*; -use ethereum_types::{H160, H256, U128, U256}; -use std::sync::Arc; - -fn int_to_hash256(int: u64) -> Hash256 { - let mut bytes = [0; HASHSIZE]; - bytes[0..8].copy_from_slice(&int.to_le_bytes()); - Hash256::from_slice(&bytes) -} - -macro_rules! impl_for_bitsize { - ($type: ident, $bit_size: expr) => { - impl TreeHash for $type { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(&self.to_le_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - HASHSIZE / ($bit_size / 8) - } - - #[allow(clippy::cast_lossless)] // Lint does not apply to all uses of this macro. - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } - } - }; -} - -impl_for_bitsize!(u8, 8); -impl_for_bitsize!(u16, 16); -impl_for_bitsize!(u32, 32); -impl_for_bitsize!(u64, 64); -impl_for_bitsize!(usize, 64); - -impl TreeHash for bool { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - (*self as u8).tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - u8::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - int_to_hash256(*self as u64) - } -} - -/// Only valid for byte types less than 32 bytes. -macro_rules! impl_for_lt_32byte_u8_array { - ($len: expr) => { - impl TreeHash for [u8; $len] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("bytesN should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..$len].copy_from_slice(&self[..]); - Hash256::from_slice(&result) - } - } - }; -} - -impl_for_lt_32byte_u8_array!(4); -impl_for_lt_32byte_u8_array!(32); - -impl TreeHash for [u8; 48] { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let values_per_chunk = BYTES_PER_CHUNK; - let minimum_chunk_count = (48 + values_per_chunk - 1) / values_per_chunk; - merkle_root(self, minimum_chunk_count) - } -} - -impl TreeHash for U128 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 16]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 2 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; HASHSIZE]; - self.to_little_endian(&mut result[0..16]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for U256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - self.to_little_endian(&mut result); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - self.to_little_endian(&mut result[..]); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H160 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - PackedEncoding::from_slice(&result) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - let mut result = [0; 32]; - result[0..20].copy_from_slice(self.as_bytes()); - Hash256::from_slice(&result) - } -} - -impl TreeHash for H256 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - PackedEncoding::from_slice(self.as_bytes()) - } - - fn tree_hash_packing_factor() -> usize { - 1 - } - - fn tree_hash_root(&self) -> Hash256 { - *self - } -} - -impl TreeHash for Arc { - fn tree_hash_type() -> TreeHashType { - T::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - self.as_ref().tree_hash_packed_encoding() - } - - fn tree_hash_packing_factor() -> usize { - T::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - self.as_ref().tree_hash_root() - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn bool() { - let mut true_bytes: Vec = vec![1]; - true_bytes.append(&mut vec![0; 31]); - - let false_bytes: Vec = vec![0; 32]; - - assert_eq!(true.tree_hash_root().as_bytes(), true_bytes.as_slice()); - assert_eq!(false.tree_hash_root().as_bytes(), false_bytes.as_slice()); - } - - #[test] - fn int_to_bytes() { - assert_eq!(int_to_hash256(0).as_bytes(), &[0; 32]); - assert_eq!( - int_to_hash256(1).as_bytes(), - &[ - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0 - ] - ); - assert_eq!( - int_to_hash256(u64::max_value()).as_bytes(), - &[ - 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - ] - ); - } -} diff --git a/consensus/tree_hash/src/lib.rs b/consensus/tree_hash/src/lib.rs deleted file mode 100644 index d9ab4bd70..000000000 --- a/consensus/tree_hash/src/lib.rs +++ /dev/null @@ -1,208 +0,0 @@ -pub mod impls; -mod merkle_hasher; -mod merkleize_padded; -mod merkleize_standard; - -pub use merkle_hasher::{Error, MerkleHasher}; -pub use merkleize_padded::merkleize_padded; -pub use merkleize_standard::merkleize_standard; - -use eth2_hashing::{hash_fixed, ZERO_HASHES, ZERO_HASHES_MAX_INDEX}; -use smallvec::SmallVec; - -pub const BYTES_PER_CHUNK: usize = 32; -pub const HASHSIZE: usize = 32; -pub const MERKLE_HASH_CHUNK: usize = 2 * BYTES_PER_CHUNK; -pub const MAX_UNION_SELECTOR: u8 = 127; -pub const SMALLVEC_SIZE: usize = 32; - -pub type Hash256 = ethereum_types::H256; -pub type PackedEncoding = SmallVec<[u8; SMALLVEC_SIZE]>; - -/// Convenience method for `MerkleHasher` which also provides some fast-paths for small trees. -/// -/// `minimum_leaf_count` will only be used if it is greater than or equal to the minimum number of leaves that can be created from `bytes`. -pub fn merkle_root(bytes: &[u8], minimum_leaf_count: usize) -> Hash256 { - let leaves = std::cmp::max( - (bytes.len() + (HASHSIZE - 1)) / HASHSIZE, - minimum_leaf_count, - ); - - if leaves == 0 { - // If there are no bytes then the hash is always zero. - Hash256::zero() - } else if leaves == 1 { - // If there is only one leaf, the hash is always those leaf bytes padded out to 32-bytes. - let mut hash = [0; HASHSIZE]; - hash[0..bytes.len()].copy_from_slice(bytes); - Hash256::from_slice(&hash) - } else if leaves == 2 { - // If there are only two leaves (this is common with BLS pubkeys), we can avoid some - // overhead with `MerkleHasher` and just do a simple 3-node tree here. - let mut leaves = [0; HASHSIZE * 2]; - leaves[0..bytes.len()].copy_from_slice(bytes); - - Hash256::from_slice(&hash_fixed(&leaves)) - } else { - // If there are 3 or more leaves, use `MerkleHasher`. - let mut hasher = MerkleHasher::with_leaves(leaves); - hasher - .write(bytes) - .expect("the number of leaves is adequate for the number of bytes"); - hasher - .finish() - .expect("the number of leaves is adequate for the number of bytes") - } -} - -/// Returns the node created by hashing `root` and `length`. -/// -/// Used in `TreeHash` for inserting the length of a list above it's root. -pub fn mix_in_length(root: &Hash256, length: usize) -> Hash256 { - let usize_len = std::mem::size_of::(); - - let mut length_bytes = [0; BYTES_PER_CHUNK]; - length_bytes[0..usize_len].copy_from_slice(&length.to_le_bytes()); - - Hash256::from_slice(ð2_hashing::hash32_concat(root.as_bytes(), &length_bytes)[..]) -} - -/// Returns `Some(root)` created by hashing `root` and `selector`, if `selector <= -/// MAX_UNION_SELECTOR`. Otherwise, returns `None`. -/// -/// Used in `TreeHash` for the "union" type. -/// -/// ## Specification -/// -/// ```ignore,text -/// mix_in_selector: Given a Merkle root root and a type selector selector ("uint256" little-endian -/// serialization) return hash(root + selector). -/// ``` -/// -/// https://github.com/ethereum/consensus-specs/blob/v1.1.0-beta.3/ssz/simple-serialize.md#union -pub fn mix_in_selector(root: &Hash256, selector: u8) -> Option { - if selector > MAX_UNION_SELECTOR { - return None; - } - - let mut chunk = [0; BYTES_PER_CHUNK]; - chunk[0] = selector; - - let root = eth2_hashing::hash32_concat(root.as_bytes(), &chunk); - Some(Hash256::from_slice(&root)) -} - -/// Returns a cached padding node for a given height. -fn get_zero_hash(height: usize) -> &'static [u8] { - if height <= ZERO_HASHES_MAX_INDEX { - &ZERO_HASHES[height] - } else { - panic!("Tree exceeds MAX_TREE_DEPTH of {ZERO_HASHES_MAX_INDEX}") - } -} - -#[derive(Debug, PartialEq, Clone)] -pub enum TreeHashType { - Basic, - Vector, - List, - Container, -} - -pub trait TreeHash { - fn tree_hash_type() -> TreeHashType; - - fn tree_hash_packed_encoding(&self) -> PackedEncoding; - - fn tree_hash_packing_factor() -> usize; - - fn tree_hash_root(&self) -> Hash256; -} - -/// Punch through references. -impl<'a, T> TreeHash for &'a T -where - T: TreeHash, -{ - fn tree_hash_type() -> TreeHashType { - T::tree_hash_type() - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - T::tree_hash_packed_encoding(*self) - } - - fn tree_hash_packing_factor() -> usize { - T::tree_hash_packing_factor() - } - - fn tree_hash_root(&self) -> Hash256 { - T::tree_hash_root(*self) - } -} - -#[macro_export] -macro_rules! tree_hash_ssz_encoding_as_vector { - ($type: ident) => { - impl tree_hash::TreeHash for $type { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - tree_hash::merkle_root(&ssz::ssz_encode(self)) - } - } - }; -} - -#[macro_export] -macro_rules! tree_hash_ssz_encoding_as_list { - ($type: ident) => { - impl tree_hash::TreeHash for $type { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - ssz::ssz_encode(self).tree_hash_root() - } - } - }; -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn mix_length() { - let hash = { - let mut preimage = vec![42; BYTES_PER_CHUNK]; - preimage.append(&mut vec![42]); - preimage.append(&mut vec![0; BYTES_PER_CHUNK - 1]); - eth2_hashing::hash(&preimage) - }; - - assert_eq!( - mix_in_length(&Hash256::from_slice(&[42; BYTES_PER_CHUNK]), 42).as_bytes(), - &hash[..] - ); - } -} diff --git a/consensus/tree_hash/src/merkle_hasher.rs b/consensus/tree_hash/src/merkle_hasher.rs deleted file mode 100644 index 2acaf1c3b..000000000 --- a/consensus/tree_hash/src/merkle_hasher.rs +++ /dev/null @@ -1,573 +0,0 @@ -use crate::{get_zero_hash, Hash256, HASHSIZE}; -use eth2_hashing::{Context, Sha256Context, HASH_LEN}; -use smallvec::{smallvec, SmallVec}; -use std::mem; - -type SmallVec8 = SmallVec<[T; 8]>; - -#[derive(Clone, Debug, PartialEq)] -pub enum Error { - /// The maximum number of leaves defined by the initialization `depth` has been exceed. - MaximumLeavesExceeded { max_leaves: usize }, -} - -/// Helper struct to store either a hash digest or a slice. -/// -/// Should be used as a left or right value for some node. -enum Preimage<'a> { - Digest([u8; HASH_LEN]), - Slice(&'a [u8]), -} - -impl<'a> Preimage<'a> { - /// Returns a 32-byte slice. - fn as_bytes(&self) -> &[u8] { - match self { - Preimage::Digest(digest) => digest.as_ref(), - Preimage::Slice(slice) => slice, - } - } -} - -/// A node that has had a left child supplied, but not a right child. -struct HalfNode { - /// The hasher context. - context: Context, - /// The tree id of the node. The root node has in id of `1` and ids increase moving down the - /// tree from left to right. - id: usize, -} - -impl HalfNode { - /// Create a new half-node from the given `left` value. - fn new(id: usize, left: Preimage) -> Self { - let mut context = Context::new(); - context.update(left.as_bytes()); - - Self { context, id } - } - - /// Complete the half-node by providing a `right` value. Returns a digest of the left and right - /// nodes. - fn finish(mut self, right: Preimage) -> [u8; HASH_LEN] { - self.context.update(right.as_bytes()); - self.context.finalize() - } -} - -/// Provides a Merkle-root hasher that allows for streaming bytes (i.e., providing any-length byte -/// slices without need to separate into leaves). Efficiently handles cases where not all leaves -/// have been provided by assuming all non-provided leaves are `[0; 32]` and pre-computing the -/// zero-value hashes at all depths of the tree. -/// -/// This algorithm aims to allocate as little memory as possible and it does this by "folding" up -/// the tree as each leaf is provided. Consider this step-by-step functional diagram of hashing a -/// tree with depth three: -/// -/// ## Functional Diagram -/// -/// Nodes that are `-` have not been defined and do not occupy memory. Nodes that are `L` are -/// leaves that are provided but are not stored. Nodes that have integers (`1`, `2`) are stored in -/// our struct. Finally, nodes that are `X` were stored, but are now removed. -/// -/// ### Start -/// -/// ```ignore -/// - -/// / \ -/// - - -/// / \ / \ -/// - - - - -/// ``` -/// -/// ### Provide first leaf -/// -/// ```ignore -/// - -/// / \ -/// 2 - -/// / \ / \ -/// L - - - -/// ``` -/// -/// ### Provide second leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X - -/// / \ / \ -/// L L - - -/// ``` -/// -/// ### Provide third leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X 3 -/// / \ / \ -/// L L L - -/// ``` -/// -/// ### Provide fourth and final leaf -/// -/// ```ignore -/// 1 -/// / \ -/// X X -/// / \ / \ -/// L L L L -/// ``` -/// -pub struct MerkleHasher { - /// Stores the nodes that are half-complete and awaiting a right node. - /// - /// A smallvec of size 8 means we can hash a tree with 256 leaves without allocating on the - /// heap. Each half-node is 232 bytes, so this smallvec may store 1856 bytes on the stack. - half_nodes: SmallVec8, - /// The depth of the tree that will be produced. - /// - /// Depth is counted top-down (i.e., the root node is at depth 0). A tree with 1 leaf has a - /// depth of 1, a tree with 4 leaves has a depth of 3. - depth: usize, - /// The next leaf that we are expecting to process. - next_leaf: usize, - /// A buffer of bytes that are waiting to be written to a leaf. - buffer: SmallVec<[u8; 32]>, - /// Set to Some(root) when the root of the tree is known. - root: Option, -} - -/// Returns the parent of node with id `i`. -fn get_parent(i: usize) -> usize { - i / 2 -} - -/// Gets the depth of a node with an id of `i`. -/// -/// It is a logic error to provide `i == 0`. -/// -/// E.g., if `i` is 1, depth is 0. If `i` is is 1, depth is 1. -fn get_depth(i: usize) -> usize { - let total_bits = mem::size_of::() * 8; - total_bits - i.leading_zeros() as usize - 1 -} - -impl MerkleHasher { - /// Instantiate a hasher for a tree with a given number of leaves. - /// - /// `num_leaves` will be rounded to the next power of two. E.g., if `num_leaves == 6`, then the - /// tree will _actually_ be able to accomodate 8 leaves and the resulting hasher is exactly the - /// same as one that was instantiated with `Self::with_leaves(8)`. - /// - /// ## Notes - /// - /// If `num_leaves == 0`, a tree of depth 1 will be created. If no leaves are provided it will - /// return a root of `[0; 32]`. - pub fn with_leaves(num_leaves: usize) -> Self { - let depth = get_depth(num_leaves.next_power_of_two()) + 1; - Self::with_depth(depth) - } - - /// Instantiates a new, empty hasher for a tree with `depth` layers which will have capacity - /// for `1 << (depth - 1)` leaf nodes. - /// - /// It is not possible to grow the depth of the tree after instantiation. - /// - /// ## Panics - /// - /// Panics if `depth == 0`. - fn with_depth(depth: usize) -> Self { - assert!(depth > 0, "merkle tree cannot have a depth of zero"); - - Self { - half_nodes: SmallVec::with_capacity(depth - 1), - depth, - next_leaf: 1 << (depth - 1), - buffer: SmallVec::with_capacity(32), - root: None, - } - } - - /// Write some bytes to the hasher. - /// - /// ## Errors - /// - /// Returns an error if the given bytes would create a leaf that would exceed the maximum - /// permissible number of leaves defined by the initialization `depth`. E.g., a tree of `depth - /// == 2` can only accept 2 leaves. A tree of `depth == 14` can only accept 8,192 leaves. - pub fn write(&mut self, bytes: &[u8]) -> Result<(), Error> { - let mut ptr = 0; - while ptr <= bytes.len() { - let slice = &bytes[ptr..std::cmp::min(bytes.len(), ptr + HASHSIZE)]; - - if self.buffer.is_empty() && slice.len() == HASHSIZE { - self.process_leaf(slice)?; - ptr += HASHSIZE - } else if self.buffer.len() + slice.len() < HASHSIZE { - self.buffer.extend_from_slice(slice); - ptr += HASHSIZE - } else { - let buf_len = self.buffer.len(); - let required = HASHSIZE - buf_len; - - let mut leaf = [0; HASHSIZE]; - leaf[..buf_len].copy_from_slice(&self.buffer); - leaf[buf_len..].copy_from_slice(&slice[0..required]); - - self.process_leaf(&leaf)?; - self.buffer = smallvec![]; - - ptr += required - } - } - - Ok(()) - } - - /// Process the next leaf in the tree. - /// - /// ## Errors - /// - /// Returns an error if the given leaf would exceed the maximum permissible number of leaves - /// defined by the initialization `depth`. E.g., a tree of `depth == 2` can only accept 2 - /// leaves. A tree of `depth == 14` can only accept 8,192 leaves. - fn process_leaf(&mut self, leaf: &[u8]) -> Result<(), Error> { - assert_eq!(leaf.len(), HASHSIZE, "a leaf must be 32 bytes"); - - let max_leaves = 1 << (self.depth + 1); - - if self.next_leaf > max_leaves { - return Err(Error::MaximumLeavesExceeded { max_leaves }); - } else if self.next_leaf == 1 { - // A tree of depth one has a root that is equal to the first given leaf. - self.root = Some(Hash256::from_slice(leaf)) - } else if self.next_leaf % 2 == 0 { - self.process_left_node(self.next_leaf, Preimage::Slice(leaf)) - } else { - self.process_right_node(self.next_leaf, Preimage::Slice(leaf)) - } - - self.next_leaf += 1; - - Ok(()) - } - - /// Returns the root of the Merkle tree. - /// - /// If not all leaves have been provided, the tree will be efficiently completed under the - /// assumption that all not-yet-provided leaves are equal to `[0; 32]`. - /// - /// ## Errors - /// - /// Returns an error if the bytes remaining in the buffer would create a leaf that would exceed - /// the maximum permissible number of leaves defined by the initialization `depth`. - pub fn finish(mut self) -> Result { - if !self.buffer.is_empty() { - let mut leaf = [0; HASHSIZE]; - leaf[..self.buffer.len()].copy_from_slice(&self.buffer); - self.process_leaf(&leaf)? - } - - // If the tree is incomplete, we must complete it by providing zero-hashes. - loop { - if let Some(root) = self.root { - break Ok(root); - } else if let Some(node) = self.half_nodes.last() { - let right_child = node.id * 2 + 1; - self.process_right_node(right_child, self.zero_hash(right_child)); - } else if self.next_leaf == 1 { - // The next_leaf can only be 1 if the tree has a depth of one. If have been no - // leaves supplied, assume a root of zero. - break Ok(Hash256::zero()); - } else { - // The only scenario where there are (a) no half nodes and (b) a tree of depth - // two or more is where no leaves have been supplied at all. - // - // Once we supply this first zero-hash leaf then all future operations will be - // triggered via the `process_right_node` branch. - self.process_left_node(self.next_leaf, self.zero_hash(self.next_leaf)) - } - } - } - - /// Process a node that will become the left-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// In this scenario, the only option is to push a new half-node. - fn process_left_node(&mut self, id: usize, preimage: Preimage) { - self.half_nodes - .push(HalfNode::new(get_parent(id), preimage)) - } - - /// Process a node that will become the right-hand node of some parent. The supplied `id` is - /// that of the node (not the parent). The `preimage` is the value of the node (i.e., if this - /// is a leaf node it will be the value of that leaf). - /// - /// This operation will always complete one node, then it will attempt to crawl up the tree and - /// collapse all other completed nodes. For example, consider a tree of depth 3 (see diagram - /// below). When providing the node with id `7`, the node with id `3` will be completed which - /// will also provide the right-node for the `1` node. This function will complete both of - /// those nodes and ultimately find the root of the tree. - /// - /// ```ignore - /// 1 <-- completed - /// / \ - /// 2 3 <-- completed - /// / \ / \ - /// 4 5 6 7 <-- supplied right node - /// ``` - fn process_right_node(&mut self, id: usize, mut preimage: Preimage) { - let mut parent = get_parent(id); - - loop { - match self.half_nodes.last() { - Some(node) if node.id == parent => { - preimage = Preimage::Digest( - self.half_nodes - .pop() - .expect("if .last() is Some then .pop() must succeed") - .finish(preimage), - ); - if parent == 1 { - self.root = Some(Hash256::from_slice(preimage.as_bytes())); - break; - } else { - parent = get_parent(parent); - } - } - _ => { - self.half_nodes.push(HalfNode::new(parent, preimage)); - break; - } - } - } - } - - /// Returns a "zero hash" from a pre-computed set for the given node. - /// - /// Note: this node is not always zero, instead it is the result of hashing up a tree where the - /// leaves are all zeros. E.g., in a tree of depth 2, the `zero_hash` of a node at depth 1 - /// will be `[0; 32]`. However, the `zero_hash` for a node at depth 0 will be - /// `hash(concat([0; 32], [0; 32])))`. - fn zero_hash(&self, id: usize) -> Preimage<'static> { - Preimage::Slice(get_zero_hash(self.depth - (get_depth(id) + 1))) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::merkleize_padded; - - /// This test is just to ensure that the stack size of the `Context` remains the same. We choose - /// our smallvec size based upon this, so it's good to know if it suddenly changes in size. - #[test] - fn context_size() { - assert_eq!( - mem::size_of::(), - 224, - "Halfnode size should be as expected" - ); - } - - fn compare_with_reference(leaves: &[Hash256], depth: usize) { - let reference_bytes = leaves - .iter() - .flat_map(|hash| hash.as_bytes()) - .copied() - .collect::>(); - - let reference_root = merkleize_padded(&reference_bytes, 1 << (depth - 1)); - - let merklizer_root_32_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_32_bytes, - "32 bytes should match reference root" - ); - - let merklizer_root_individual_3_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for bytes in reference_bytes.chunks(3) { - m.write(bytes).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_3_bytes, - "3 bytes should match reference root" - ); - - let merklizer_root_individual_single_bytes = { - let mut m = MerkleHasher::with_depth(depth); - for byte in reference_bytes.iter() { - m.write(&[*byte]).expect("should process byte"); - } - m.finish().expect("should finish") - }; - - assert_eq!( - reference_root, merklizer_root_individual_single_bytes, - "single bytes should match reference root" - ); - } - - /// A simple wrapper to compare MerkleHasher to the reference function by just giving a number - /// of leaves and a depth. - fn compare_reference_with_len(leaves: u64, depth: usize) { - let leaves = (0..leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - compare_with_reference(&leaves, depth) - } - - /// Compares the `MerkleHasher::with_depth` and `MerkleHasher::with_leaves` generate consistent - /// results. - fn compare_new_with_leaf_count(num_leaves: u64, depth: usize) { - let leaves = (0..num_leaves) - .map(Hash256::from_low_u64_be) - .collect::>(); - - let from_depth = { - let mut m = MerkleHasher::with_depth(depth); - for leaf in leaves.iter() { - m.write(leaf.as_bytes()).expect("should process leaf"); - } - m.finish() - }; - - let from_num_leaves = { - let mut m = MerkleHasher::with_leaves(num_leaves as usize); - for leaf in leaves.iter() { - m.process_leaf(leaf.as_bytes()) - .expect("should process leaf"); - } - m.finish() - }; - - assert_eq!( - from_depth, from_num_leaves, - "hash generated by depth should match that from num leaves" - ); - } - - #[test] - fn with_leaves() { - compare_new_with_leaf_count(1, 1); - compare_new_with_leaf_count(2, 2); - compare_new_with_leaf_count(3, 3); - compare_new_with_leaf_count(4, 3); - compare_new_with_leaf_count(5, 4); - compare_new_with_leaf_count(6, 4); - compare_new_with_leaf_count(7, 4); - compare_new_with_leaf_count(8, 4); - compare_new_with_leaf_count(9, 5); - compare_new_with_leaf_count(10, 5); - compare_new_with_leaf_count(11, 5); - compare_new_with_leaf_count(12, 5); - compare_new_with_leaf_count(13, 5); - compare_new_with_leaf_count(14, 5); - compare_new_with_leaf_count(15, 5); - } - - #[test] - fn depth() { - assert_eq!(get_depth(1), 0); - assert_eq!(get_depth(2), 1); - assert_eq!(get_depth(3), 1); - assert_eq!(get_depth(4), 2); - assert_eq!(get_depth(5), 2); - assert_eq!(get_depth(6), 2); - assert_eq!(get_depth(7), 2); - assert_eq!(get_depth(8), 3); - } - - #[test] - fn with_0_leaves() { - let hasher = MerkleHasher::with_leaves(0); - assert_eq!(hasher.finish().unwrap(), Hash256::zero()); - } - - #[test] - #[should_panic] - fn too_many_leaves() { - compare_reference_with_len(2, 1); - } - - #[test] - fn full_trees() { - compare_reference_with_len(1, 1); - compare_reference_with_len(2, 2); - compare_reference_with_len(4, 3); - compare_reference_with_len(8, 4); - compare_reference_with_len(16, 5); - compare_reference_with_len(32, 6); - compare_reference_with_len(64, 7); - compare_reference_with_len(128, 8); - compare_reference_with_len(256, 9); - compare_reference_with_len(256, 9); - compare_reference_with_len(8192, 14); - } - - #[test] - fn incomplete_trees() { - compare_reference_with_len(0, 1); - - compare_reference_with_len(0, 2); - compare_reference_with_len(1, 2); - - for i in 0..=4 { - compare_reference_with_len(i, 3); - } - - for i in 0..=7 { - compare_reference_with_len(i, 4); - } - - for i in 0..=15 { - compare_reference_with_len(i, 5); - } - - for i in 0..=32 { - compare_reference_with_len(i, 6); - } - - for i in 0..=64 { - compare_reference_with_len(i, 7); - } - - compare_reference_with_len(0, 14); - compare_reference_with_len(13, 14); - compare_reference_with_len(8191, 14); - } - - #[test] - fn remaining_buffer() { - let a = { - let mut m = MerkleHasher::with_leaves(2); - m.write(&[1]).expect("should write"); - m.finish().expect("should finish") - }; - - let b = { - let mut m = MerkleHasher::with_leaves(2); - let mut leaf = vec![1]; - leaf.extend_from_slice(&[0; 31]); - m.write(&leaf).expect("should write"); - m.write(&[0; 32]).expect("should write"); - m.finish().expect("should finish") - }; - - assert_eq!(a, b, "should complete buffer"); - } -} diff --git a/consensus/tree_hash/src/merkleize_padded.rs b/consensus/tree_hash/src/merkleize_padded.rs deleted file mode 100644 index f7dce3994..000000000 --- a/consensus/tree_hash/src/merkleize_padded.rs +++ /dev/null @@ -1,330 +0,0 @@ -use super::{get_zero_hash, Hash256, BYTES_PER_CHUNK}; -use eth2_hashing::{hash32_concat, hash_fixed}; - -/// Merkleize `bytes` and return the root, optionally padding the tree out to `min_leaves` number of -/// leaves. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// First all nodes are extracted from `bytes` and then a padding node is added until the number of -/// leaf chunks is greater than or equal to `min_leaves`. Callers may set `min_leaves` to `0` if no -/// adding additional chunks should be added to the given `bytes`. -/// -/// If `bytes.len() <= BYTES_PER_CHUNK`, no hashing is done and `bytes` is returned, potentially -/// padded out to `BYTES_PER_CHUNK` length with `0`. -/// -/// ## CPU Performance -/// -/// A cache of `MAX_TREE_DEPTH` hashes are stored to avoid re-computing the hashes of padding nodes -/// (or their parents). Therefore, adding padding nodes only incurs one more hash per additional -/// height of the tree. -/// -/// ## Memory Performance -/// -/// This algorithm has two interesting memory usage properties: -/// -/// 1. The maximum memory footprint is roughly `O(V / 2)` memory, where `V` is the number of leaf -/// chunks with values (i.e., leaves that are not padding). The means adding padding nodes to -/// the tree does not increase the memory footprint. -/// 2. At each height of the tree half of the memory is freed until only a single chunk is stored. -/// 3. The input `bytes` are not copied into another list before processing. -/// -/// _Note: there are some minor memory overheads, including a handful of usizes and a list of -/// `MAX_TREE_DEPTH` hashes as `lazy_static` constants._ -pub fn merkleize_padded(bytes: &[u8], min_leaves: usize) -> Hash256 { - // If the bytes are just one chunk or less, pad to one chunk and return without hashing. - if bytes.len() <= BYTES_PER_CHUNK && min_leaves <= 1 { - let mut o = bytes.to_vec(); - o.resize(BYTES_PER_CHUNK, 0); - return Hash256::from_slice(&o); - } - - assert!( - bytes.len() > BYTES_PER_CHUNK || min_leaves > 1, - "Merkle hashing only needs to happen if there is more than one chunk" - ); - - // The number of leaves that can be made directly from `bytes`. - let leaves_with_values = (bytes.len() + (BYTES_PER_CHUNK - 1)) / BYTES_PER_CHUNK; - - // The number of parents that have at least one non-padding leaf. - // - // Since there is more than one node in this tree (see prior assertion), there should always be - // one or more initial parent nodes. - let initial_parents_with_values = std::cmp::max(1, next_even_number(leaves_with_values) / 2); - - // The number of leaves in the full tree (including padding nodes). - let num_leaves = std::cmp::max(leaves_with_values, min_leaves).next_power_of_two(); - - // The number of levels in the tree. - // - // A tree with a single node has `height == 1`. - let height = num_leaves.trailing_zeros() as usize + 1; - - assert!(height >= 2, "The tree should have two or more heights"); - - // A buffer/scratch-space used for storing each round of hashes at each height. - // - // This buffer is kept as small as possible; it will shrink so it never stores a padding node. - let mut chunks = ChunkStore::with_capacity(initial_parents_with_values); - - // Create a parent in the `chunks` buffer for every two chunks in `bytes`. - // - // I.e., do the first round of hashing, hashing from the `bytes` slice and filling the `chunks` - // struct. - for i in 0..initial_parents_with_values { - let start = i * BYTES_PER_CHUNK * 2; - - // Hash two chunks, creating a parent chunk. - let hash = match bytes.get(start..start + BYTES_PER_CHUNK * 2) { - // All bytes are available, hash as usual. - Some(slice) => hash_fixed(slice), - // Unable to get all the bytes, get a small slice and pad it out. - None => { - let mut preimage = bytes - .get(start..) - .expect("`i` can only be larger than zero if there are bytes to read") - .to_vec(); - preimage.resize(BYTES_PER_CHUNK * 2, 0); - hash_fixed(&preimage) - } - }; - - assert_eq!( - hash.len(), - BYTES_PER_CHUNK, - "Hashes should be exactly one chunk" - ); - - // Store the parent node. - chunks - .set(i, &hash) - .expect("Buffer should always have capacity for parent nodes") - } - - // Iterate through all heights above the leaf nodes and either (a) hash two children or, (b) - // hash a left child and a right padding node. - // - // Skip the 0'th height because the leaves have already been processed. Skip the highest-height - // in the tree as it is the root does not require hashing. - // - // The padding nodes for each height are cached via `lazy static` to simulate non-adjacent - // padding nodes (i.e., avoid doing unnecessary hashing). - for height in 1..height - 1 { - let child_nodes = chunks.len(); - let parent_nodes = next_even_number(child_nodes) / 2; - - // For each pair of nodes stored in `chunks`: - // - // - If two nodes are available, hash them to form a parent. - // - If one node is available, hash it and a cached padding node to form a parent. - for i in 0..parent_nodes { - let (left, right) = match (chunks.get(i * 2), chunks.get(i * 2 + 1)) { - (Ok(left), Ok(right)) => (left, right), - (Ok(left), Err(_)) => (left, get_zero_hash(height)), - // Deriving `parent_nodes` from `chunks.len()` has ensured that we never encounter the - // scenario where we expect two nodes but there are none. - (Err(_), Err(_)) => unreachable!("Parent must have one child"), - // `chunks` is a contiguous array so it is impossible for an index to be missing - // when a higher index is present. - (Err(_), Ok(_)) => unreachable!("Parent must have a left child"), - }; - - assert!( - left.len() == right.len() && right.len() == BYTES_PER_CHUNK, - "Both children should be `BYTES_PER_CHUNK` bytes." - ); - - let hash = hash32_concat(left, right); - - // Store a parent node. - chunks - .set(i, &hash) - .expect("Buf is adequate size for parent"); - } - - // Shrink the buffer so it neatly fits the number of new nodes created in this round. - // - // The number of `parent_nodes` is either decreasing or stable. It never increases. - chunks.truncate(parent_nodes); - } - - // There should be a single chunk left in the buffer and it is the Merkle root. - let root = chunks.into_vec(); - - assert_eq!(root.len(), BYTES_PER_CHUNK, "Only one chunk should remain"); - - Hash256::from_slice(&root) -} - -/// A helper struct for storing words of `BYTES_PER_CHUNK` size in a flat byte array. -#[derive(Debug)] -struct ChunkStore(Vec); - -impl ChunkStore { - /// Creates a new instance with `chunks` padding nodes. - fn with_capacity(chunks: usize) -> Self { - Self(vec![0; chunks * BYTES_PER_CHUNK]) - } - - /// Set the `i`th chunk to `value`. - /// - /// Returns `Err` if `value.len() != BYTES_PER_CHUNK` or `i` is out-of-bounds. - fn set(&mut self, i: usize, value: &[u8]) -> Result<(), ()> { - if i < self.len() && value.len() == BYTES_PER_CHUNK { - let slice = &mut self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]; - slice.copy_from_slice(value); - Ok(()) - } else { - Err(()) - } - } - - /// Gets the `i`th chunk. - /// - /// Returns `Err` if `i` is out-of-bounds. - fn get(&self, i: usize) -> Result<&[u8], ()> { - if i < self.len() { - Ok(&self.0[i * BYTES_PER_CHUNK..i * BYTES_PER_CHUNK + BYTES_PER_CHUNK]) - } else { - Err(()) - } - } - - /// Returns the number of chunks presently stored in `self`. - fn len(&self) -> usize { - self.0.len() / BYTES_PER_CHUNK - } - - /// Truncates 'self' to `num_chunks` chunks. - /// - /// Functionally identical to `Vec::truncate`. - fn truncate(&mut self, num_chunks: usize) { - self.0.truncate(num_chunks * BYTES_PER_CHUNK) - } - - /// Consumes `self`, returning the underlying byte array. - fn into_vec(self) -> Vec { - self.0 - } -} - -/// Returns the next even number following `n`. If `n` is even, `n` is returned. -fn next_even_number(n: usize) -> usize { - n + n % 2 -} - -#[cfg(test)] -mod test { - use super::*; - use crate::ZERO_HASHES_MAX_INDEX; - - pub fn reference_root(bytes: &[u8]) -> Hash256 { - crate::merkleize_standard(bytes) - } - - macro_rules! common_tests { - ($get_bytes: ident) => { - #[test] - fn zero_value_0_nodes() { - test_against_reference(&$get_bytes(0 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_1_nodes() { - test_against_reference(&$get_bytes(1 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_2_nodes() { - test_against_reference(&$get_bytes(2 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_3_nodes() { - test_against_reference(&$get_bytes(3 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_4_nodes() { - test_against_reference(&$get_bytes(4 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes() { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_9_nodes() { - test_against_reference(&$get_bytes(9 * BYTES_PER_CHUNK), 0); - } - - #[test] - fn zero_value_8_nodes_varying_min_length() { - for i in 0..64 { - test_against_reference(&$get_bytes(8 * BYTES_PER_CHUNK), i); - } - } - - #[test] - fn zero_value_range_of_nodes() { - for i in 0..32 * BYTES_PER_CHUNK { - test_against_reference(&$get_bytes(i), 0); - } - } - - #[test] - fn max_tree_depth_min_nodes() { - let input = vec![0; 10 * BYTES_PER_CHUNK]; - let min_nodes = 2usize.pow(ZERO_HASHES_MAX_INDEX as u32); - assert_eq!( - merkleize_padded(&input, min_nodes).as_bytes(), - get_zero_hash(ZERO_HASHES_MAX_INDEX) - ); - } - }; - } - - mod zero_value { - use super::*; - - fn zero_bytes(bytes: usize) -> Vec { - vec![0; bytes] - } - - common_tests!(zero_bytes); - } - - mod random_value { - use super::*; - use rand::RngCore; - - fn random_bytes(bytes: usize) -> Vec { - let mut bytes = Vec::with_capacity(bytes); - rand::thread_rng().fill_bytes(&mut bytes); - bytes - } - - common_tests!(random_bytes); - } - - fn test_against_reference(input: &[u8], min_nodes: usize) { - let mut reference_input = input.to_vec(); - reference_input.resize( - std::cmp::max( - reference_input.len(), - min_nodes.next_power_of_two() * BYTES_PER_CHUNK, - ), - 0, - ); - - assert_eq!( - reference_root(&reference_input), - merkleize_padded(input, min_nodes), - "input.len(): {:?}", - input.len() - ); - } -} diff --git a/consensus/tree_hash/src/merkleize_standard.rs b/consensus/tree_hash/src/merkleize_standard.rs deleted file mode 100644 index 6dd046991..000000000 --- a/consensus/tree_hash/src/merkleize_standard.rs +++ /dev/null @@ -1,81 +0,0 @@ -use super::*; -use eth2_hashing::hash; - -/// Merkleizes bytes and returns the root, using a simple algorithm that does not optimize to avoid -/// processing or storing padding bytes. -/// -/// **Note**: This function is generally worse than using the `crate::merkle_root` which uses -/// `MerkleHasher`. We only keep this function around for reference testing. -/// -/// The input `bytes` will be padded to ensure that the number of leaves is a power-of-two. -/// -/// ## CPU Performance -/// -/// Will hash all nodes in the tree, even if they are padding and pre-determined. -/// -/// ## Memory Performance -/// -/// - Duplicates the input `bytes`. -/// - Stores all internal nodes, even if they are padding. -/// - Does not free up unused memory during operation. -pub fn merkleize_standard(bytes: &[u8]) -> Hash256 { - // If the bytes are just one chunk (or less than one chunk) just return them. - if bytes.len() <= HASHSIZE { - let mut o = bytes.to_vec(); - o.resize(HASHSIZE, 0); - return Hash256::from_slice(&o[0..HASHSIZE]); - } - - let leaves = num_sanitized_leaves(bytes.len()); - let nodes = num_nodes(leaves); - let internal_nodes = nodes - leaves; - - let num_bytes = std::cmp::max(internal_nodes, 1) * HASHSIZE + bytes.len(); - - let mut o: Vec = vec![0; internal_nodes * HASHSIZE]; - - o.append(&mut bytes.to_vec()); - - assert_eq!(o.len(), num_bytes); - - let empty_chunk_hash = hash(&[0; MERKLE_HASH_CHUNK]); - - let mut i = nodes * HASHSIZE; - let mut j = internal_nodes * HASHSIZE; - - while i >= MERKLE_HASH_CHUNK { - i -= MERKLE_HASH_CHUNK; - - j -= HASHSIZE; - let hash = match o.get(i..i + MERKLE_HASH_CHUNK) { - // All bytes are available, hash as usual. - Some(slice) => hash(slice), - // Unable to get all the bytes. - None => { - match o.get(i..) { - // Able to get some of the bytes, pad them out. - Some(slice) => { - let mut bytes = slice.to_vec(); - bytes.resize(MERKLE_HASH_CHUNK, 0); - hash(&bytes) - } - // Unable to get any bytes, use the empty-chunk hash. - None => empty_chunk_hash.clone(), - } - } - }; - - o[j..j + HASHSIZE].copy_from_slice(&hash); - } - - Hash256::from_slice(&o[0..HASHSIZE]) -} - -fn num_sanitized_leaves(num_bytes: usize) -> usize { - let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; - leaves.next_power_of_two() -} - -fn num_nodes(num_leaves: usize) -> usize { - 2 * num_leaves - 1 -} diff --git a/consensus/tree_hash/tests/tests.rs b/consensus/tree_hash/tests/tests.rs deleted file mode 100644 index 8b2a4b21b..000000000 --- a/consensus/tree_hash/tests/tests.rs +++ /dev/null @@ -1,128 +0,0 @@ -use ssz_derive::Encode; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, BYTES_PER_CHUNK}; -use tree_hash_derive::TreeHash; - -#[derive(Encode)] -struct HashVec { - vec: Vec, -} - -impl From> for HashVec { - fn from(vec: Vec) -> Self { - Self { vec } - } -} - -impl tree_hash::TreeHash for HashVec { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let mut hasher = - MerkleHasher::with_leaves((self.vec.len() + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK); - - for item in &self.vec { - hasher.write(&item.tree_hash_packed_encoding()).unwrap() - } - - let root = hasher.finish().unwrap(); - - tree_hash::mix_in_length(&root, self.vec.len()) - } -} - -fn mix_in_selector(a: Hash256, selector: u8) -> Hash256 { - let mut b = [0; 32]; - b[0] = selector; - - Hash256::from_slice(ð2_hashing::hash32_concat(a.as_bytes(), &b)) -} - -fn u8_hash_concat(v1: u8, v2: u8) -> Hash256 { - let mut a = [0; 32]; - let mut b = [0; 32]; - - a[0] = v1; - b[0] = v2; - - Hash256::from_slice(ð2_hashing::hash32_concat(&a, &b)) -} - -fn u8_hash(x: u8) -> Hash256 { - let mut a = [0; 32]; - a[0] = x; - Hash256::from_slice(&a) -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum FixedTrans { - A(u8), - B(u8), -} - -#[test] -fn fixed_trans() { - assert_eq!(FixedTrans::A(2).tree_hash_root(), u8_hash(2)); - assert_eq!(FixedTrans::B(2).tree_hash_root(), u8_hash(2)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum FixedUnion { - A(u8), - B(u8), -} - -#[test] -fn fixed_union() { - assert_eq!(FixedUnion::A(2).tree_hash_root(), u8_hash_concat(2, 0)); - assert_eq!(FixedUnion::B(2).tree_hash_root(), u8_hash_concat(2, 1)); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "transparent")] -enum VariableTrans { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_trans() { - assert_eq!( - VariableTrans::A(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); - assert_eq!( - VariableTrans::B(HashVec::from(vec![2])).tree_hash_root(), - u8_hash_concat(2, 1) - ); -} - -#[derive(TreeHash)] -#[tree_hash(enum_behaviour = "union")] -enum VariableUnion { - A(HashVec), - B(HashVec), -} - -#[test] -fn variable_union() { - assert_eq!( - VariableUnion::A(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 0) - ); - assert_eq!( - VariableUnion::B(HashVec::from(vec![2])).tree_hash_root(), - mix_in_selector(u8_hash_concat(2, 1), 1) - ); -} diff --git a/consensus/tree_hash_derive/Cargo.toml b/consensus/tree_hash_derive/Cargo.toml deleted file mode 100644 index 5f3396eb1..000000000 --- a/consensus/tree_hash_derive/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "tree_hash_derive" -version = "0.4.0" -authors = ["Paul Hauner "] -edition = "2021" -description = "Procedural derive macros to accompany the tree_hash crate." -license = "Apache-2.0" - -[lib] -proc-macro = true - -[dependencies] -syn = "1.0.42" -quote = "1.0.7" -darling = "0.13.0" diff --git a/consensus/tree_hash_derive/src/lib.rs b/consensus/tree_hash_derive/src/lib.rs deleted file mode 100644 index 85ece80fb..000000000 --- a/consensus/tree_hash_derive/src/lib.rs +++ /dev/null @@ -1,336 +0,0 @@ -use darling::FromDeriveInput; -use proc_macro::TokenStream; -use quote::quote; -use std::convert::TryInto; -use syn::{parse_macro_input, Attribute, DataEnum, DataStruct, DeriveInput, Meta}; - -/// The highest possible union selector value (higher values are reserved for backwards compatible -/// extensions). -const MAX_UNION_SELECTOR: u8 = 127; - -#[derive(Debug, FromDeriveInput)] -#[darling(attributes(tree_hash))] -struct StructOpts { - #[darling(default)] - enum_behaviour: Option, -} - -const ENUM_TRANSPARENT: &str = "transparent"; -const ENUM_UNION: &str = "union"; -const ENUM_VARIANTS: &[&str] = &[ENUM_TRANSPARENT, ENUM_UNION]; -const NO_ENUM_BEHAVIOUR_ERROR: &str = "enums require an \"enum_behaviour\" attribute, \ - e.g., #[tree_hash(enum_behaviour = \"transparent\")]"; - -enum EnumBehaviour { - Transparent, - Union, -} - -impl EnumBehaviour { - pub fn new(s: Option) -> Option { - s.map(|s| match s.as_ref() { - ENUM_TRANSPARENT => EnumBehaviour::Transparent, - ENUM_UNION => EnumBehaviour::Union, - other => panic!( - "{} is an invalid enum_behaviour, use either {:?}", - other, ENUM_VARIANTS - ), - }) - } -} - -/// Return a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields -/// that should not be hashed. -/// -/// # Panics -/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_hashable_fields(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> { - get_hashable_fields_and_their_caches(struct_data) - .into_iter() - .map(|(ident, _, _)| ident) - .collect() -} - -/// Return a Vec of the hashable fields of a struct, and each field's type and optional cache field. -fn get_hashable_fields_and_their_caches( - struct_data: &syn::DataStruct, -) -> Vec<(&syn::Ident, syn::Type, Option)> { - struct_data - .fields - .iter() - .filter_map(|f| { - if should_skip_hashing(f) { - None - } else { - let ident = f - .ident - .as_ref() - .expect("tree_hash_derive only supports named struct fields"); - let opt_cache_field = get_cache_field_for(f); - Some((ident, f.ty.clone(), opt_cache_field)) - } - }) - .collect() -} - -/// Parse the cached_tree_hash attribute for a field. -/// -/// Extract the cache field name from `#[cached_tree_hash(cache_field_name)]` -/// -/// Return `Some(cache_field_name)` if the field has a cached tree hash attribute, -/// or `None` otherwise. -fn get_cache_field_for(field: &syn::Field) -> Option { - use syn::{MetaList, NestedMeta}; - - let parsed_attrs = cached_tree_hash_attr_metas(&field.attrs); - if let [Meta::List(MetaList { nested, .. })] = &parsed_attrs[..] { - nested.iter().find_map(|x| match x { - NestedMeta::Meta(Meta::Path(path)) => path.get_ident().cloned(), - _ => None, - }) - } else { - None - } -} - -/// Process the `cached_tree_hash` attributes from a list of attributes into structured `Meta`s. -fn cached_tree_hash_attr_metas(attrs: &[Attribute]) -> Vec { - attrs - .iter() - .filter(|attr| attr.path.is_ident("cached_tree_hash")) - .flat_map(|attr| attr.parse_meta()) - .collect() -} - -/// Returns true if some field has an attribute declaring it should not be hashed. -/// -/// The field attribute is: `#[tree_hash(skip_hashing)]` -fn should_skip_hashing(field: &syn::Field) -> bool { - field.attrs.iter().any(|attr| { - attr.path.is_ident("tree_hash") - && attr.tokens.to_string().replace(' ', "") == "(skip_hashing)" - }) -} - -/// Implements `tree_hash::TreeHash` for some `struct`. -/// -/// Fields are hashed in the order they are defined. -#[proc_macro_derive(TreeHash, attributes(tree_hash))] -pub fn tree_hash_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - let opts = StructOpts::from_derive_input(&item).unwrap(); - let enum_opt = EnumBehaviour::new(opts.enum_behaviour); - - match &item.data { - syn::Data::Struct(s) => { - if enum_opt.is_some() { - panic!("enum_behaviour is invalid for structs"); - } - tree_hash_derive_struct(&item, s) - } - syn::Data::Enum(s) => match enum_opt.expect(NO_ENUM_BEHAVIOUR_ERROR) { - EnumBehaviour::Transparent => tree_hash_derive_enum_transparent(&item, s), - EnumBehaviour::Union => tree_hash_derive_enum_union(&item, s), - }, - _ => panic!("tree_hash_derive only supports structs and enums."), - } -} - -fn tree_hash_derive_struct(item: &DeriveInput, struct_data: &DataStruct) -> TokenStream { - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let idents = get_hashable_fields(struct_data); - let num_leaves = idents.len(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Struct should never be packed.") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - let mut hasher = tree_hash::MerkleHasher::with_leaves(#num_leaves); - - #( - hasher.write(self.#idents.tree_hash_root().as_bytes()) - .expect("tree hash derive should not apply too many leaves"); - )* - - hasher.finish().expect("tree hash derive should not have a remaining buffer") - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an enum in the "transparent" method. -/// -/// The "transparent" method is distinct from the "union" method specified in the SSZ specification. -/// When using "transparent", the enum will be ignored and the contained field will be hashed as if -/// the enum does not exist. -/// -///## Limitations -/// -/// Only supports: -/// - Enums with a single field per variant, where -/// - All fields are "container" types. -/// -/// ## Panics -/// -/// Will panic at compile-time if the single field requirement isn't met, but will panic *at run -/// time* if the container type requirement isn't met. -fn tree_hash_derive_enum_transparent( - derive_input: &DeriveInput, - enum_data: &DataEnum, -) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let (patterns, type_exprs): (Vec<_>, Vec<_>) = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - let pattern = quote! { - #name::#variant_name(ref inner) - }; - - let ty = &(&variant.fields).into_iter().next().unwrap().ty; - let type_expr = quote! { - <#ty as tree_hash::TreeHash>::tree_hash_type() - }; - (pattern, type_expr) - }) - .unzip(); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - #( - assert_eq!( - #type_exprs, - tree_hash::TreeHashType::Container, - "all variants must be of container type" - ); - )* - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => inner.tree_hash_root(), - )* - } - } - } - }; - output.into() -} - -/// Derive `TreeHash` for an `enum` following the "union" SSZ spec. -/// -/// The union selector will be determined based upon the order in which the enum variants are -/// defined. E.g., the top-most variant in the enum will have a selector of `0`, the variant -/// beneath it will have a selector of `1` and so on. -/// -/// # Limitations -/// -/// Only supports enums where each variant has a single field. -fn tree_hash_derive_enum_union(derive_input: &DeriveInput, enum_data: &DataEnum) -> TokenStream { - let name = &derive_input.ident; - let (impl_generics, ty_generics, where_clause) = &derive_input.generics.split_for_impl(); - - let patterns: Vec<_> = enum_data - .variants - .iter() - .map(|variant| { - let variant_name = &variant.ident; - - if variant.fields.len() != 1 { - panic!("TreeHash can only be derived for enums with 1 field per variant"); - } - - quote! { - #name::#variant_name(ref inner) - } - }) - .collect(); - - let union_selectors = compute_union_selectors(patterns.len()); - - let output = quote! { - impl #impl_generics tree_hash::TreeHash for #name #ty_generics #where_clause { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Container - } - - fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { - unreachable!("Enum should never be packed") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Enum should never be packed") - } - - fn tree_hash_root(&self) -> tree_hash::Hash256 { - match self { - #( - #patterns => { - let root = inner.tree_hash_root(); - let selector = #union_selectors; - tree_hash::mix_in_selector(&root, selector) - .expect("derive macro should prevent out-of-bounds selectors") - }, - )* - } - } - } - }; - output.into() -} - -fn compute_union_selectors(num_variants: usize) -> Vec { - let union_selectors = (0..num_variants) - .map(|i| { - i.try_into() - .expect("union selector exceeds u8::max_value, union has too many variants") - }) - .collect::>(); - - let highest_selector = union_selectors - .last() - .copied() - .expect("0-variant union is not permitted"); - - assert!( - highest_selector <= MAX_UNION_SELECTOR, - "union selector {} exceeds limit of {}, enum has too many variants", - highest_selector, - MAX_UNION_SELECTOR - ); - - union_selectors -} diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 1ddf6058e..65883bd27 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -16,7 +16,7 @@ compare_fields = { path = "../../common/compare_fields" } compare_fields_derive = { path = "../../common/compare_fields_derive" } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum-types = { version = "0.14.1", features = ["arbitrary"] } -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" hex = "0.4.2" int_to_bytes = { path = "../int_to_bytes" } log = "0.4.11" @@ -26,13 +26,13 @@ safe_arith = { path = "../safe_arith" } serde = {version = "1.0.116" , features = ["rc"] } serde_derive = "1.0.116" slog = "2.5.2" -eth2_ssz = { version = "0.4.1", features = ["arbitrary"] } -eth2_ssz_derive = "0.3.1" -eth2_ssz_types = { version = "0.2.2", features = ["arbitrary"] } +ethereum_ssz = { version = "0.5.0", features = ["arbitrary"] } +ethereum_ssz_derive = "0.5.0" +ssz_types = { version = "0.5.0", features = ["arbitrary"] } swap_or_not_shuffle = { path = "../swap_or_not_shuffle", features = ["arbitrary"] } test_random_derive = { path = "../../common/test_random_derive" } -tree_hash = { version = "0.4.1", features = ["arbitrary"] } -tree_hash_derive = "0.4.0" +tree_hash = { version = "0.5.0", features = ["arbitrary"] } +tree_hash_derive = "0.5.0" rand_xorshift = "0.3.0" cached_tree_hash = { path = "../cached_tree_hash" } serde_yaml = "0.8.13" @@ -41,8 +41,8 @@ derivative = "2.1.1" # The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by # `AbstractExecPayload` arbitrary = { version = "1.0", features = ["derive"] } +ethereum_serde_utils = "0.5.0" rusqlite = { version = "0.28.0", features = ["bundled"], optional = true } -eth2_serde_utils = "0.1.1" regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" diff --git a/consensus/types/presets/gnosis/capella.yaml b/consensus/types/presets/gnosis/capella.yaml index 913c2956b..fb36f9463 100644 --- a/consensus/types/presets/gnosis/capella.yaml +++ b/consensus/types/presets/gnosis/capella.yaml @@ -9,9 +9,9 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16 # Execution # --------------------------------------------------------------- # 2**4 (= 16) withdrawals -MAX_WITHDRAWALS_PER_PAYLOAD: 16 +MAX_WITHDRAWALS_PER_PAYLOAD: 8 # Withdrawals processing # --------------------------------------------------------------- # 2**14 (= 16384) validators -MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384 +MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 8192 \ No newline at end of file diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/aggregate_and_proof.rs index 39a0a28c0..20d66cd44 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/aggregate_and_proof.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct AggregateAndProof { /// The index of the validator that created the attestation. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate attestation. pub aggregate: Attestation, diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation_data.rs index c6a661c85..286502b44 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation_data.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; )] pub struct AttestationData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, // LMD GHOST vote diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation_duty.rs index 87a9c932a..93a4c147b 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation_duty.rs @@ -12,6 +12,6 @@ pub struct AttestationDuty { /// The total number of attesters in the committee. pub committee_len: usize, /// The committee count at `attestation_slot`. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub committees_at_slot: u64, } diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 090a361cd..0d52d7080 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -58,7 +58,7 @@ pub struct BeaconBlock = FullPayload #[superstruct(getter(copy))] pub slot: Slot, #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, #[superstruct(getter(copy))] pub parent_root: Hash256, diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index c6d6678f3..f2ef0a3dc 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct BeaconBlockHeader { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 58c0eed33..ca333048a 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -5,7 +5,7 @@ use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; use derivative::Derivative; -use eth2_hashing::hash; +use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; @@ -210,7 +210,7 @@ where { // Versioning #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, #[superstruct(getter(copy))] pub genesis_validators_root: Hash256, @@ -232,7 +232,7 @@ where pub eth1_data: Eth1Data, pub eth1_data_votes: VariableList, #[superstruct(getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry @@ -301,10 +301,10 @@ where // Capella #[superstruct(only(Capella, Deneb), partial_getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_index: u64, #[superstruct(only(Capella, Deneb), partial_getter(copy))] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella, Deneb))] diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index f0a1f0ee4..dbc250de5 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -48,11 +48,11 @@ impl Ord for BlobIdentifier { #[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] pub struct BlobSidecar { pub block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, pub slot: Slot, pub block_parent_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub blob: Blob, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/bls_to_execution_change.rs index b279515bd..3ed9ee925 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/bls_to_execution_change.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct BlsToExecutionChange { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub from_bls_pubkey: PublicKeyBytes, pub to_execution_address: Address, diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index e922e81c7..8723c2afe 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -16,7 +16,7 @@ use tree_hash_derive::TreeHash; pub struct BuilderBid> { #[serde_as(as = "BlindedPayloadAsHeader")] pub header: Payload, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, #[serde(skip)] @@ -50,7 +50,7 @@ impl> ForkVersionDeserialize #[derive(Deserialize)] struct Helper { header: serde_json::Value, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] value: Uint256, pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 1c07c9a02..9d2f230b5 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1,9 +1,9 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::*; -use eth2_serde_utils::quoted_u64::MaybeQuoted; use int_to_bytes::int_to_bytes4; use serde::{Deserializer, Serialize, Serializer}; use serde_derive::Deserialize; +use serde_utils::quoted_u64::MaybeQuoted; use std::fs::File; use std::path::Path; use tree_hash::TreeHash; @@ -871,7 +871,7 @@ impl ChainSpec { */ capella_fork_version: [0x03, 0x00, 0x00, 0x64], capella_fork_epoch: None, - max_validators_per_withdrawals_sweep: 16384, + max_validators_per_withdrawals_sweep: 8192, /* * Deneb hard fork params @@ -929,33 +929,33 @@ pub struct Config { pub preset_base: String, #[serde(default = "default_terminal_total_difficulty")] - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] pub terminal_total_difficulty: Uint256, #[serde(default = "default_terminal_block_hash")] pub terminal_block_hash: ExecutionBlockHash, #[serde(default = "default_terminal_block_hash_activation_epoch")] pub terminal_block_hash_activation_epoch: Epoch, #[serde(default = "default_safe_slots_to_import_optimistically")] - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub safe_slots_to_import_optimistically: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_genesis_active_validator_count: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_genesis_time: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] genesis_delay: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] altair_fork_version: [u8; 4], #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub altair_fork_epoch: Option>, #[serde(default = "default_bellatrix_fork_version")] - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] bellatrix_fork_version: [u8; 4], #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] @@ -963,7 +963,7 @@ pub struct Config { pub bellatrix_fork_epoch: Option>, #[serde(default = "default_capella_fork_version")] - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] capella_fork_version: [u8; 4], #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] @@ -971,41 +971,41 @@ pub struct Config { pub capella_fork_epoch: Option>, #[serde(default = "default_deneb_fork_version")] - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] deneb_fork_version: [u8; 4], #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] pub deneb_fork_epoch: Option>, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] seconds_per_eth1_block: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_validator_withdrawability_delay: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] shard_committee_period: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] eth1_follow_distance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] inactivity_score_bias: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] inactivity_score_recovery_rate: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] ejection_balance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] min_per_epoch_churn_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] churn_limit_quotient: u64, #[serde(skip_serializing_if = "Option::is_none")] proposer_score_boost: Option>, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] deposit_chain_id: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] deposit_network_id: u64, deposit_contract_address: Address, } diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/contribution_and_proof.rs index 167b0857c..7e757f89b 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/contribution_and_proof.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; #[arbitrary(bound = "T: EthSpec")] pub struct ContributionAndProof { /// The index of the validator that created the sync contribution. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub aggregator_index: u64, /// The aggregate contribution. pub contribution: SyncCommitteeContribution, diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit_data.rs index 196931167..d75643f65 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; pub struct DepositData { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub signature: SignatureBytes, } diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit_message.rs index 63073401c..1096cfaa2 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit_message.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct DepositMessage { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index 21bbab81f..aea4677f2 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -1,5 +1,5 @@ use crate::*; -use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use ethereum_hashing::{hash32_concat, ZERO_HASHES}; use int_to_bytes::int_to_bytes32; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 3556e31a9..409383c90 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct EnrForkId { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/eth1_data.rs index 6b2396e11..d8f476b99 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/eth1_data.rs @@ -26,7 +26,7 @@ use tree_hash_derive::TreeHash; )] pub struct Eth1Data { pub deposit_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub deposit_count: u64, pub block_hash: Hash256, } diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 03b767a17..e7c00d195 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -397,7 +397,7 @@ impl EthSpec for GnosisEthSpec { type MaxPendingAttestations = U2048; // 128 max attestations * 16 slots per epoch type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlsToExecutionChanges = U16; - type MaxWithdrawalsPerPayload = U16; + type MaxWithdrawalsPerPayload = U8; type MaxBlobsPerBlock = U4; type FieldElementsPerBlob = U4096; type BytesPerFieldElement = U32; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 823483b01..448c1bf23 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -60,21 +60,21 @@ pub struct ExecutionPayload { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(getter(copy))] @@ -84,7 +84,7 @@ pub struct ExecutionPayload { #[superstruct(only(Capella, Deneb))] pub withdrawals: Withdrawals, #[superstruct(only(Deneb))] - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub excess_data_gas: Uint256, } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index bc1acc0ba..57381b532 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -53,21 +53,21 @@ pub struct ExecutionPayloadHeader { pub logs_bloom: FixedVector, #[superstruct(getter(copy))] pub prev_randao: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub block_number: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub gas_used: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] #[superstruct(getter(copy))] pub timestamp: u64, #[serde(with = "ssz_types::serde_utils::hex_var_list")] pub extra_data: VariableList, - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub base_fee_per_gas: Uint256, #[superstruct(getter(copy))] @@ -78,7 +78,7 @@ pub struct ExecutionPayloadHeader { #[superstruct(getter(copy))] pub withdrawals_root: Hash256, #[superstruct(only(Deneb))] - #[serde(with = "eth2_serde_utils::quoted_u256")] + #[serde(with = "serde_utils::quoted_u256")] #[superstruct(getter(copy))] pub excess_data_gas: Uint256, } diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork.rs index de332f0ca..4650881f7 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork.rs @@ -24,9 +24,9 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Fork { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub previous_version: [u8; 4], - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork_data.rs index cc7903931..bf9c48cd7 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork_data.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct ForkData { - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] pub current_version: [u8; 4], pub genesis_validators_root: Hash256, } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 6d52e0abb..e87b6d61a 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -28,6 +28,11 @@ impl ForkName { ] } + pub fn latest() -> ForkName { + // This unwrap is safe as long as we have 1+ forks. It is tested below. + *ForkName::list_all().last().unwrap() + } + /// Set the activation slots in the given `ChainSpec` so that the fork named by `self` /// is the only fork in effect from genesis. pub fn make_genesis_spec(&self, mut spec: ChainSpec) -> ChainSpec { @@ -201,7 +206,7 @@ mod test { #[test] fn previous_and_next_fork_consistent() { - assert_eq!(ForkName::Deneb.next_fork(), None); + assert_eq!(ForkName::latest().next_fork(), None); assert_eq!(ForkName::Base.previous_fork(), None); for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() { @@ -234,4 +239,15 @@ mod test { assert_eq!(ForkName::from_str("merge"), Ok(ForkName::Merge)); assert_eq!(ForkName::Merge.to_string(), "bellatrix"); } + + #[test] + fn fork_name_latest() { + assert_eq!(ForkName::latest(), *ForkName::list_all().last().unwrap()); + + let mut fork = ForkName::Base; + while let Some(next_fork) = fork.next_fork() { + fork = next_fork; + } + assert_eq!(ForkName::latest(), fork); + } } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 6288cdbe8..bd4abe37d 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -27,7 +27,7 @@ impl Graffiti { impl fmt::Display for Graffiti { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } @@ -96,7 +96,7 @@ pub mod serde_graffiti { where S: Serializer, { - serializer.serialize_str(ð2_serde_utils::hex::encode(bytes)) + serializer.serialize_str(&serde_utils::hex::encode(bytes)) } pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> @@ -105,7 +105,7 @@ pub mod serde_graffiti { { let s: String = Deserialize::deserialize(deserializer)?; - let bytes = eth2_serde_utils::hex::decode(&s).map_err(D::Error::custom)?; + let bytes = serde_utils::hex::decode(&s).map_err(D::Error::custom)?; if bytes.len() != GRAFFITI_BYTES_LEN { return Err(D::Error::custom(format!( diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/indexed_attestation.rs index 16ffb1ad8..c59cbef30 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/indexed_attestation.rs @@ -72,9 +72,9 @@ impl Hash for IndexedAttestation { mod quoted_variable_list_u64 { use super::*; use crate::Unsigned; - use eth2_serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; use serde::ser::SerializeSeq; use serde::{Deserializer, Serializer}; + use serde_utils::quoted_u64_vec::{QuotedIntVecVisitor, QuotedIntWrapper}; pub fn serialize(value: &VariableList, serializer: S) -> Result where diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/participation_flags.rs index bd98f8da0..4f170a60b 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/participation_flags.rs @@ -9,7 +9,7 @@ use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; #[serde(transparent)] #[derive(arbitrary::Arbitrary)] pub struct ParticipationFlags { - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] bits: u8, } diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/pending_attestation.rs index 1b9903ebb..88db0ec4d 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/pending_attestation.rs @@ -25,9 +25,9 @@ use tree_hash_derive::TreeHash; pub struct PendingAttestation { pub aggregation_bits: BitList, pub data: AttestationData, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_index: u64, } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index 20c78f051..e65dd8f60 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -12,71 +12,71 @@ use serde_derive::{Deserialize, Serialize}; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BasePreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_committees_per_slot: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub target_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_committee: u64, - #[serde(with = "eth2_serde_utils::quoted_u8")] + #[serde(with = "serde_utils::quoted_u8")] pub shuffle_round_count: u8, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_downward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub hysteresis_upward_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub safe_slots_to_update_justified: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_deposit_amount: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_effective_balance: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance_increment: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_attestation_inclusion_delay: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_epoch: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_seed_lookahead: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_eth1_voting_period: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub slots_per_historical_root: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_epochs_to_inactivity_penalty: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_historical_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_slashings_vector: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub historical_roots_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_registry_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub base_reward_factor: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub whistleblower_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proposer_reward_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_proposer_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attester_slashings: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_attestations: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_deposits: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_voluntary_exits: u64, } @@ -123,17 +123,17 @@ impl BasePreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct AltairPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_altair: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub sync_committee_size: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub epochs_per_sync_committee_period: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_sync_committee_participants: u64, } @@ -153,19 +153,19 @@ impl AltairPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct BellatrixPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub inactivity_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub min_slashing_penalty_quotient_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub proportional_slashing_multiplier_bellatrix: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bytes_per_transaction: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_transactions_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub bytes_per_logs_bloom: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_extra_data_bytes: u64, } @@ -187,11 +187,11 @@ impl BellatrixPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct CapellaPreset { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_bls_to_execution_changes: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_withdrawals_per_payload: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub max_validators_per_withdrawals_sweep: u64, } diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/proposer_preparation_data.rs index 6179828a9..2828b0d4d 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/proposer_preparation_data.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] pub struct ProposerPreparationData { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The fee-recipient address. pub fee_recipient: Address, diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/selection_proof.rs index f8bc8ba69..2a404b3b9 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/selection_proof.rs @@ -1,7 +1,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use std::cmp; diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/slot_epoch.rs index 06f99b988..991261d16 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/slot_epoch.rs @@ -38,7 +38,7 @@ use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssi Deserialize, )] #[serde(transparent)] -pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Slot(#[serde(with = "serde_utils::quoted_u64")] u64); #[derive( arbitrary::Arbitrary, @@ -54,7 +54,7 @@ pub struct Slot(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); Deserialize, )] #[serde(transparent)] -pub struct Epoch(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct Epoch(#[serde(with = "serde_utils::quoted_u64")] u64); impl_common!(Slot); impl_common!(Epoch); diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/subnet_id.rs index fd06eb78a..b885f89f7 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/subnet_id.rs @@ -20,7 +20,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn subnet_id_to_string(i: u64) -> &'static str { if i < MAX_SUBNET_ID as u64 { @@ -85,7 +85,7 @@ impl SubnetId { let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription; let permutation_seed = - eth2_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); + ethereum_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx)); let num_subnets = 1 << spec.attestation_subnet_prefix_bits(); diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 9e72438be..b10106812 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; )] pub struct SyncAggregatorSelectionData { pub slot: Slot, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, } diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee_contribution.rs index ef8b52bec..425f8f116 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee_contribution.rs @@ -32,7 +32,7 @@ pub enum Error { pub struct SyncCommitteeContribution { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub subcommittee_index: u64, pub aggregation_bits: BitVector, pub signature: AggregateSignature, diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee_message.rs index 5c2fb0837..d0301cdf6 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee_message.rs @@ -23,7 +23,7 @@ use tree_hash_derive::TreeHash; pub struct SyncCommitteeMessage { pub slot: Slot, pub beacon_block_root: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, // Signature by the validator over `beacon_block_root`. pub signature: Signature, diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee_subscription.rs index 7f5ed063f..8e040279d 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee_subscription.rs @@ -7,10 +7,10 @@ use ssz_derive::{Decode, Encode}; #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SyncCommitteeSubscription { /// The validators index. - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, /// The sync committee indices. - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub sync_committee_indices: Vec, /// Epoch until which this subscription is required. pub until_epoch: Epoch, diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_duty.rs index bdb078459..e3ffe62bf 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_duty.rs @@ -7,9 +7,9 @@ use std::collections::HashSet; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { pub pubkey: PublicKeyBytes, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64_vec")] + #[serde(with = "serde_utils::quoted_u64_vec")] pub validator_sync_committee_indices: Vec, } diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_selection_proof.rs index 570abace1..7cae3946c 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_selection_proof.rs @@ -5,7 +5,7 @@ use crate::{ ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, SyncAggregatorSelectionData, }; -use eth2_hashing::hash; +use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use ssz::Encode; use ssz_types::typenum::Unsigned; diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_subnet_id.rs index 11bcf2689..5af756ae0 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_subnet_id.rs @@ -21,7 +21,7 @@ lazy_static! { #[derive(arbitrary::Arbitrary, Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] -pub struct SyncSubnetId(#[serde(with = "eth2_serde_utils::quoted_u64")] u64); +pub struct SyncSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); pub fn sync_subnet_id_to_string(i: u64) -> &'static str { if i < SYNC_COMMITTEE_SUBNET_COUNT { diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 43b892cdf..6860397fb 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -25,7 +25,7 @@ use tree_hash_derive::TreeHash; pub struct Validator { pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, pub slashed: bool, pub activation_eligibility_epoch: Epoch, diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator_registration_data.rs index 5a3450df0..de7f26cc6 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator_registration_data.rs @@ -13,9 +13,9 @@ pub struct SignedValidatorRegistrationData { #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, TreeHash)] pub struct ValidatorRegistrationData { pub fee_recipient: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub gas_limit: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub timestamp: u64, pub pubkey: PublicKeyBytes, } diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/voluntary_exit.rs index 20c84986c..02686fef9 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/voluntary_exit.rs @@ -27,7 +27,7 @@ use tree_hash_derive::TreeHash; pub struct VoluntaryExit { /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, } diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal.rs index 5221ff63f..eed7c7e27 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal.rs @@ -20,12 +20,12 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct Withdrawal { - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub validator_index: u64, pub address: Address, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, } diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index c3331824d..a610f257c 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -5,15 +5,15 @@ authors = ["Paul Hauner "] edition = "2021" [dependencies] -eth2_ssz = "0.4.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +tree_hash = "0.5.0" milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.4.2", optional = true } rand = "0.7.3" serde = "1.0.116" serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" hex = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" ethereum-types = "0.14.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } zeroize = { version = "1.4.2", features = ["zeroize_derive"] } diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index a61529af2..e6e53253f 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -4,9 +4,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, Hash256, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 847d039c6..462e4cb2c 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,8 +1,8 @@ use crate::generic_public_key_bytes::GenericPublicKeyBytes; use crate::Error; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index c2f318ab6..59b0ffc43 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, PUBLIC_KEY_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 01e5ed1d4..05e0a222b 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -2,9 +2,9 @@ use crate::{ generic_public_key::{GenericPublicKey, TPublicKey}, Error, Hash256, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/crypto/bls/src/generic_signature_bytes.rs b/crypto/bls/src/generic_signature_bytes.rs index aa33c90d0..8f9f2a4d8 100644 --- a/crypto/bls/src/generic_signature_bytes.rs +++ b/crypto/bls/src/generic_signature_bytes.rs @@ -3,9 +3,9 @@ use crate::{ generic_signature::{GenericSignature, TSignature}, Error, INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN, }; -use eth2_serde_utils::hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; +use serde_utils::hex::encode as hex_encode; use ssz::{Decode, Encode}; use std::convert::TryInto; use std::fmt; diff --git a/crypto/bls/src/get_withdrawal_credentials.rs b/crypto/bls/src/get_withdrawal_credentials.rs index 98106434f..d5e647050 100644 --- a/crypto/bls/src/get_withdrawal_credentials.rs +++ b/crypto/bls/src/get_withdrawal_credentials.rs @@ -1,5 +1,5 @@ use crate::PublicKey; -use eth2_hashing::hash; +use ethereum_hashing::hash; use ssz::Encode; /// Returns the withdrawal credentials for a given public key. diff --git a/crypto/eth2_hashing/.cargo/config b/crypto/eth2_hashing/.cargo/config deleted file mode 100644 index 4ec2f3b86..000000000 --- a/crypto/eth2_hashing/.cargo/config +++ /dev/null @@ -1,2 +0,0 @@ -[target.wasm32-unknown-unknown] -runner = 'wasm-bindgen-test-runner' diff --git a/crypto/eth2_hashing/Cargo.toml b/crypto/eth2_hashing/Cargo.toml deleted file mode 100644 index db296c70f..000000000 --- a/crypto/eth2_hashing/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "eth2_hashing" -version = "0.3.0" -authors = ["Paul Hauner "] -edition = "2021" -license = "Apache-2.0" -description = "Hashing primitives used in Ethereum 2.0" - -[dependencies] -lazy_static = { version = "1.4.0", optional = true } -cpufeatures = { version = "0.2.5", optional = true } -ring = "0.16.19" -sha2 = "0.10" - -[dev-dependencies] -rustc-hex = "2.1.0" - -[target.'cfg(target_arch = "wasm32")'.dev-dependencies] -wasm-bindgen-test = "0.3.18" - -[features] -default = ["zero_hash_cache", "detect-cpufeatures"] -zero_hash_cache = ["lazy_static"] -detect-cpufeatures = ["cpufeatures"] diff --git a/crypto/eth2_hashing/src/lib.rs b/crypto/eth2_hashing/src/lib.rs deleted file mode 100644 index 36a3d1413..000000000 --- a/crypto/eth2_hashing/src/lib.rs +++ /dev/null @@ -1,251 +0,0 @@ -//! Optimized SHA256 for use in Ethereum 2.0. -//! -//! The initial purpose of this crate was to provide an abstraction over the hash function used in -//! Ethereum 2.0. The hash function changed during the specification process, so defining it once in -//! this crate made it easy to replace. -//! -//! Now this crate serves primarily as a wrapper over two SHA256 crates: `sha2` and `ring` – -//! which it switches between at runtime based on the availability of SHA intrinsics. - -pub use self::DynamicContext as Context; -use sha2::Digest; - -#[cfg(feature = "zero_hash_cache")] -use lazy_static::lazy_static; - -/// Length of a SHA256 hash in bytes. -pub const HASH_LEN: usize = 32; - -/// Returns the digest of `input` using the best available implementation. -pub fn hash(input: &[u8]) -> Vec { - DynamicImpl::best().hash(input) -} - -/// Hash function returning a fixed-size array (to save on allocations). -/// -/// Uses the best available implementation based on CPU features. -pub fn hash_fixed(input: &[u8]) -> [u8; HASH_LEN] { - DynamicImpl::best().hash_fixed(input) -} - -/// Compute the hash of two slices concatenated. -pub fn hash32_concat(h1: &[u8], h2: &[u8]) -> [u8; 32] { - let mut ctxt = DynamicContext::new(); - ctxt.update(h1); - ctxt.update(h2); - ctxt.finalize() -} - -/// Context trait for abstracting over implementation contexts. -pub trait Sha256Context { - fn new() -> Self; - - fn update(&mut self, bytes: &[u8]); - - fn finalize(self) -> [u8; HASH_LEN]; -} - -/// Top-level trait implemented by both `sha2` and `ring` implementations. -pub trait Sha256 { - type Context: Sha256Context; - - fn hash(&self, input: &[u8]) -> Vec; - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN]; -} - -/// Implementation of SHA256 using the `sha2` crate (fastest on CPUs with SHA extensions). -struct Sha2CrateImpl; - -impl Sha256Context for sha2::Sha256 { - fn new() -> Self { - sha2::Digest::new() - } - - fn update(&mut self, bytes: &[u8]) { - sha2::Digest::update(self, bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - sha2::Digest::finalize(self).into() - } -} - -impl Sha256 for Sha2CrateImpl { - type Context = sha2::Sha256; - - fn hash(&self, input: &[u8]) -> Vec { - Self::Context::digest(input).into_iter().collect() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - Self::Context::digest(input).into() - } -} - -/// Implementation of SHA256 using the `ring` crate (fastest on CPUs without SHA extensions). -pub struct RingImpl; - -impl Sha256Context for ring::digest::Context { - fn new() -> Self { - Self::new(&ring::digest::SHA256) - } - - fn update(&mut self, bytes: &[u8]) { - self.update(bytes) - } - - fn finalize(self) -> [u8; HASH_LEN] { - let mut output = [0; HASH_LEN]; - output.copy_from_slice(self.finish().as_ref()); - output - } -} - -impl Sha256 for RingImpl { - type Context = ring::digest::Context; - - fn hash(&self, input: &[u8]) -> Vec { - ring::digest::digest(&ring::digest::SHA256, input) - .as_ref() - .into() - } - - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - let mut ctxt = Self::Context::new(&ring::digest::SHA256); - ctxt.update(input); - ctxt.finalize() - } -} - -/// Default dynamic implementation that switches between available implementations. -pub enum DynamicImpl { - Sha2, - Ring, -} - -// Runtime latch for detecting the availability of SHA extensions on x86_64. -// -// Inspired by the runtime switch within the `sha2` crate itself. -#[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] -cpufeatures::new!(x86_sha_extensions, "sha", "sse2", "ssse3", "sse4.1"); - -#[inline(always)] -pub fn have_sha_extensions() -> bool { - #[cfg(all(feature = "detect-cpufeatures", target_arch = "x86_64"))] - return x86_sha_extensions::get(); - - #[cfg(not(all(feature = "detect-cpufeatures", target_arch = "x86_64")))] - return false; -} - -impl DynamicImpl { - /// Choose the best available implementation based on the currently executing CPU. - #[inline(always)] - pub fn best() -> Self { - if have_sha_extensions() { - Self::Sha2 - } else { - Self::Ring - } - } -} - -impl Sha256 for DynamicImpl { - type Context = DynamicContext; - - #[inline(always)] - fn hash(&self, input: &[u8]) -> Vec { - match self { - Self::Sha2 => Sha2CrateImpl.hash(input), - Self::Ring => RingImpl.hash(input), - } - } - - #[inline(always)] - fn hash_fixed(&self, input: &[u8]) -> [u8; HASH_LEN] { - match self { - Self::Sha2 => Sha2CrateImpl.hash_fixed(input), - Self::Ring => RingImpl.hash_fixed(input), - } - } -} - -/// Context encapsulating all implemenation contexts. -/// -/// This enum ends up being 8 bytes larger than the largest inner context. -pub enum DynamicContext { - Sha2(sha2::Sha256), - Ring(ring::digest::Context), -} - -impl Sha256Context for DynamicContext { - fn new() -> Self { - match DynamicImpl::best() { - DynamicImpl::Sha2 => Self::Sha2(Sha256Context::new()), - DynamicImpl::Ring => Self::Ring(Sha256Context::new()), - } - } - - fn update(&mut self, bytes: &[u8]) { - match self { - Self::Sha2(ctxt) => Sha256Context::update(ctxt, bytes), - Self::Ring(ctxt) => Sha256Context::update(ctxt, bytes), - } - } - - fn finalize(self) -> [u8; HASH_LEN] { - match self { - Self::Sha2(ctxt) => Sha256Context::finalize(ctxt), - Self::Ring(ctxt) => Sha256Context::finalize(ctxt), - } - } -} - -/// The max index that can be used with `ZERO_HASHES`. -#[cfg(feature = "zero_hash_cache")] -pub const ZERO_HASHES_MAX_INDEX: usize = 48; - -#[cfg(feature = "zero_hash_cache")] -lazy_static! { - /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. - pub static ref ZERO_HASHES: Vec> = { - let mut hashes = vec![vec![0; 32]; ZERO_HASHES_MAX_INDEX + 1]; - - for i in 0..ZERO_HASHES_MAX_INDEX { - hashes[i + 1] = hash32_concat(&hashes[i], &hashes[i])[..].to_vec(); - } - - hashes - }; -} - -#[cfg(test)] -mod tests { - use super::*; - use rustc_hex::FromHex; - - #[cfg(target_arch = "wasm32")] - use wasm_bindgen_test::*; - - #[cfg_attr(not(target_arch = "wasm32"), test)] - #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)] - fn test_hashing() { - let input: Vec = b"hello world".as_ref().into(); - - let output = hash(input.as_ref()); - let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"; - let expected: Vec = expected_hex.from_hex().unwrap(); - assert_eq!(expected, output); - } - - #[cfg(feature = "zero_hash_cache")] - mod zero_hash { - use super::*; - - #[test] - fn zero_hash_zero() { - assert_eq!(ZERO_HASHES[0], vec![0; 32]); - } - } -} diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index 5c4a499e9..0fed5b419 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -7,15 +7,15 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" derivative = "2.1.1" serde = "1.0.116" serde_derive = "1.0.116" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" hex = "0.4.2" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" c-kzg = {git = "https://github.com/ethereum/c-kzg-4844", rev = "fd24cf8e1e2f09a96b4e62a595b4e49f046ce6cf" } arbitrary = { version = "1.0", features = ["derive"], optional = true } diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs index 267f70462..561bb10cd 100644 --- a/crypto/kzg/src/kzg_commitment.rs +++ b/crypto/kzg/src/kzg_commitment.rs @@ -1,6 +1,6 @@ use c_kzg::{Bytes48, BYTES_PER_COMMITMENT}; use derivative::Derivative; -use eth2_hashing::hash_fixed; +use ethereum_hashing::hash_fixed; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz_derive::{Decode, Encode}; @@ -32,7 +32,7 @@ impl From for Bytes48 { impl Display for KzgCommitment { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } @@ -121,7 +121,7 @@ impl FromStr for KzgCommitment { impl Debug for KzgCommitment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } diff --git a/crypto/kzg/src/kzg_proof.rs b/crypto/kzg/src/kzg_proof.rs index 7c6eb59ab..76035a4a8 100644 --- a/crypto/kzg/src/kzg_proof.rs +++ b/crypto/kzg/src/kzg_proof.rs @@ -27,7 +27,7 @@ impl KzgProof { impl fmt::Display for KzgProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } @@ -128,7 +128,7 @@ impl FromStr for KzgProof { impl Debug for KzgProof { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", eth2_serde_utils::hex::encode(self.0)) + write!(f, "{}", serde_utils::hex::encode(self.0)) } } diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 1da3b0c48..077d81eaf 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "4.1.0" +version = "4.2.0" authors = ["Paul Hauner "] edition = "2021" @@ -22,13 +22,13 @@ env_logger = "0.9.0" types = { path = "../consensus/types" } state_processing = { path = "../consensus/state_processing" } int_to_bytes = { path = "../consensus/int_to_bytes" } -eth2_ssz = "0.4.1" -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" +ethereum_ssz = "0.5.0" environment = { path = "../lighthouse/environment" } eth2_network_config = { path = "../common/eth2_network_config" } genesis = { path = "../beacon_node/genesis" } deposit_contract = { path = "../common/deposit_contract" } -tree_hash = "0.4.1" +tree_hash = "0.5.0" clap_utils = { path = "../common/clap_utils" } lighthouse_network = { path = "../beacon_node/lighthouse_network" } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } @@ -36,7 +36,6 @@ lighthouse_version = { path = "../common/lighthouse_version" } directory = { path = "../common/directory" } account_utils = { path = "../common/account_utils" } eth2_wallet = { path = "../crypto/eth2_wallet" } -web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } eth1_test_rig = { path = "../testing/eth1_test_rig" } sensitive_url = { path = "../common/sensitive_url" } eth2 = { path = "../common/eth2" } @@ -44,6 +43,7 @@ snap = "1.0.1" beacon_chain = { path = "../beacon_node/beacon_chain" } store = { path = "../beacon_node/store" } malloc_utils = { path = "../common/malloc_utils" } +rayon = "1.7.0" [package.metadata.cargo-udeps.ignore] normal = ["malloc_utils"] diff --git a/lcli/src/deploy_deposit_contract.rs b/lcli/src/deploy_deposit_contract.rs index 1128eb52a..8919ebdaf 100644 --- a/lcli/src/deploy_deposit_contract.rs +++ b/lcli/src/deploy_deposit_contract.rs @@ -2,19 +2,18 @@ use clap::ArgMatches; use environment::Environment; use types::EthSpec; -use web3::{transports::Http, Web3}; +use eth1_test_rig::{Http, Provider}; pub fn run(env: Environment, matches: &ArgMatches<'_>) -> Result<(), String> { let eth1_http: String = clap_utils::parse_required(matches, "eth1-http")?; let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?; let validator_count: Option = clap_utils::parse_optional(matches, "validator-count")?; - let transport = - Http::new(ð1_http).map_err(|e| format!("Unable to connect to eth1 HTTP: {:?}", e))?; - let web3 = Web3::new(transport); + let client = Provider::::try_from(ð1_http) + .map_err(|e| format!("Unable to connect to eth1 HTTP: {:?}", e))?; env.runtime().block_on(async { - let contract = eth1_test_rig::DepositContract::deploy(web3, confirmations, None) + let contract = eth1_test_rig::DepositContract::deploy(client, confirmations, None) .await .map_err(|e| format!("Failed to deploy deposit contract: {:?}", e))?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 2d6507d42..18695d277 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -10,6 +10,7 @@ mod generate_bootnode_enr; mod indexed_attestations; mod insecure_validators; mod interop_genesis; +mod mnemonic_validators; mod new_testnet; mod parse_ssz; mod replace_state_pubkeys; @@ -449,6 +450,22 @@ fn main() { "If present, a interop-style genesis.ssz file will be generated.", ), ) + .arg( + Arg::with_name("derived-genesis-state") + .long("derived-genesis-state") + .takes_value(false) + .help( + "If present, a genesis.ssz file will be generated with keys generated from a given mnemonic.", + ), + ) + .arg( + Arg::with_name("mnemonic-phrase") + .long("mnemonic-phrase") + .value_name("MNEMONIC_PHRASE") + .takes_value(true) + .requires("derived-genesis-state") + .help("The mnemonic with which we generate the validator keys for a derived genesis state"), + ) .arg( Arg::with_name("min-genesis-time") .long("min-genesis-time") @@ -722,6 +739,7 @@ fn main() { .long("count") .value_name("COUNT") .takes_value(true) + .required(true) .help("Produces validators in the range of 0..count."), ) .arg( @@ -729,6 +747,7 @@ fn main() { .long("base-dir") .value_name("BASE_DIR") .takes_value(true) + .required(true) .help("The base directory where validator keypairs and secrets are stored"), ) .arg( @@ -739,6 +758,43 @@ fn main() { .help("The number of nodes to divide the validator keys to"), ) ) + .subcommand( + SubCommand::with_name("mnemonic-validators") + .about("Produces validator directories by deriving the keys from \ + a mnemonic. For testing purposes only, DO NOT USE IN \ + PRODUCTION!") + .arg( + Arg::with_name("count") + .long("count") + .value_name("COUNT") + .takes_value(true) + .required(true) + .help("Produces validators in the range of 0..count."), + ) + .arg( + Arg::with_name("base-dir") + .long("base-dir") + .value_name("BASE_DIR") + .takes_value(true) + .required(true) + .help("The base directory where validator keypairs and secrets are stored"), + ) + .arg( + Arg::with_name("node-count") + .long("node-count") + .value_name("NODE_COUNT") + .takes_value(true) + .help("The number of nodes to divide the validator keys to"), + ) + .arg( + Arg::with_name("mnemonic-phrase") + .long("mnemonic-phrase") + .value_name("MNEMONIC_PHRASE") + .takes_value(true) + .required(true) + .help("The mnemonic with which we generate the validator keys"), + ) + ) .subcommand( SubCommand::with_name("indexed-attestations") .about("Convert attestations to indexed form, using the committees from a state.") @@ -834,6 +890,7 @@ fn run( max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, // No SSE Logging in LCLI }) .map_err(|e| format!("should start logger: {:?}", e))? .build() @@ -880,6 +937,8 @@ fn run( .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), ("insecure-validators", Some(matches)) => insecure_validators::run(matches) .map_err(|e| format!("Failed to run insecure-validators command: {}", e)), + ("mnemonic-validators", Some(matches)) => mnemonic_validators::run(matches) + .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), ("indexed-attestations", Some(matches)) => indexed_attestations::run::(matches) .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), ("block-root", Some(matches)) => block_root::run::(env, matches) diff --git a/lcli/src/mnemonic_validators.rs b/lcli/src/mnemonic_validators.rs new file mode 100644 index 000000000..2653aee14 --- /dev/null +++ b/lcli/src/mnemonic_validators.rs @@ -0,0 +1,104 @@ +use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; +use account_utils::random_password; +use clap::ArgMatches; +use eth2_wallet::bip39::Seed; +use eth2_wallet::bip39::{Language, Mnemonic}; +use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; +use rayon::prelude::*; +use std::fs; +use std::path::PathBuf; +use validator_dir::Builder as ValidatorBuilder; + +/// Generates validator directories with keys derived from the given mnemonic. +pub fn generate_validator_dirs( + indices: &[usize], + mnemonic_phrase: &str, + validators_dir: PathBuf, + secrets_dir: PathBuf, +) -> Result<(), String> { + if !validators_dir.exists() { + fs::create_dir_all(&validators_dir) + .map_err(|e| format!("Unable to create validators dir: {:?}", e))?; + } + + if !secrets_dir.exists() { + fs::create_dir_all(&secrets_dir) + .map_err(|e| format!("Unable to create secrets dir: {:?}", e))?; + } + let mnemonic = Mnemonic::from_phrase(mnemonic_phrase, Language::English).map_err(|e| { + format!( + "Unable to derive mnemonic from string {:?}: {:?}", + mnemonic_phrase, e + ) + })?; + + let seed = Seed::new(&mnemonic, ""); + + let _: Vec<_> = indices + .par_iter() + .map(|index| { + let voting_password = random_password(); + + let derive = |key_type: KeyType, password: &[u8]| -> Result { + let (secret, path) = recover_validator_secret_from_mnemonic( + seed.as_bytes(), + *index as u32, + key_type, + ) + .map_err(|e| format!("Unable to recover validator keys: {:?}", e))?; + + let keypair = keypair_from_secret(secret.as_bytes()) + .map_err(|e| format!("Unable build keystore: {:?}", e))?; + + KeystoreBuilder::new(&keypair, password, format!("{}", path)) + .map_err(|e| format!("Unable build keystore: {:?}", e))? + .build() + .map_err(|e| format!("Unable build keystore: {:?}", e)) + }; + + let voting_keystore = derive(KeyType::Voting, voting_password.as_bytes()).unwrap(); + + println!("Validator {}", index + 1); + + ValidatorBuilder::new(validators_dir.clone()) + .password_dir(secrets_dir.clone()) + .store_withdrawal_keystore(false) + .voting_keystore(voting_keystore, voting_password.as_bytes()) + .build() + .map_err(|e| format!("Unable to build validator: {:?}", e)) + .unwrap() + }) + .collect(); + + Ok(()) +} + +pub fn run(matches: &ArgMatches) -> Result<(), String> { + let validator_count: usize = clap_utils::parse_required(matches, "count")?; + let base_dir: PathBuf = clap_utils::parse_required(matches, "base-dir")?; + let node_count: Option = clap_utils::parse_optional(matches, "node-count")?; + let mnemonic_phrase: String = clap_utils::parse_required(matches, "mnemonic-phrase")?; + if let Some(node_count) = node_count { + let validators_per_node = validator_count / node_count; + let validator_range = (0..validator_count).collect::>(); + let indices_range = validator_range + .chunks(validators_per_node) + .collect::>(); + + for (i, indices) in indices_range.iter().enumerate() { + let validators_dir = base_dir.join(format!("node_{}", i + 1)).join("validators"); + let secrets_dir = base_dir.join(format!("node_{}", i + 1)).join("secrets"); + generate_validator_dirs(indices, &mnemonic_phrase, validators_dir, secrets_dir)?; + } + } else { + let validators_dir = base_dir.join("validators"); + let secrets_dir = base_dir.join("secrets"); + generate_validator_dirs( + (0..validator_count).collect::>().as_slice(), + &mnemonic_phrase, + validators_dir, + secrets_dir, + )?; + } + Ok(()) +} diff --git a/lcli/src/new_testnet.rs b/lcli/src/new_testnet.rs index 8aef32f1a..dfb9a283a 100644 --- a/lcli/src/new_testnet.rs +++ b/lcli/src/new_testnet.rs @@ -1,7 +1,11 @@ +use account_utils::eth2_keystore::keypair_from_secret; use clap::ArgMatches; use clap_utils::{parse_optional, parse_required, parse_ssz_optional}; -use eth2_hashing::hash; use eth2_network_config::{Eth2NetworkConfig, TRUSTED_SETUP}; +use eth2_wallet::bip39::Seed; +use eth2_wallet::bip39::{Language, Mnemonic}; +use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType}; +use ethereum_hashing::hash; use kzg::TrustedSetup; use ssz::Decode; use ssz::Encode; @@ -16,8 +20,8 @@ use types::ExecutionBlockHash; use types::{ test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Epoch, Eth1Data, EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ForkName, Hash256, Keypair, - PublicKey, Validator, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRefMut, + ForkName, Hash256, Keypair, PublicKey, Validator, }; pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> { @@ -90,61 +94,59 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul spec.terminal_total_difficulty = ttd; } - let genesis_state_bytes = if matches.is_present("interop-genesis-state") { - let execution_payload_header: Option> = - parse_optional(matches, "execution-payload-header")? - .map(|filename: String| { - let mut bytes = vec![]; - let mut file = File::open(filename.as_str()) - .map_err(|e| format!("Unable to open {}: {}", filename, e))?; - file.read_to_end(&mut bytes) - .map_err(|e| format!("Unable to read {}: {}", filename, e))?; - let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); - match fork_name { - ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( - "genesis fork must be post-merge".to_string(), - )), - ForkName::Merge => { - ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Merge) - } - ForkName::Capella => { - ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Capella) - } - ForkName::Deneb => { - ExecutionPayloadHeaderDeneb::::from_ssz_bytes(bytes.as_slice()) - .map(ExecutionPayloadHeader::Deneb) - } + let validator_count = parse_required(matches, "validator-count")?; + let execution_payload_header: Option> = + parse_optional(matches, "execution-payload-header")? + .map(|filename: String| { + let mut bytes = vec![]; + let mut file = File::open(filename.as_str()) + .map_err(|e| format!("Unable to open {}: {}", filename, e))?; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read {}: {}", filename, e))?; + let fork_name = spec.fork_name_at_epoch(Epoch::new(0)); + match fork_name { + ForkName::Base | ForkName::Altair => Err(ssz::DecodeError::BytesInvalid( + "genesis fork must be post-merge".to_string(), + )), + ForkName::Merge => { + ExecutionPayloadHeaderMerge::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Merge) } - .map_err(|e| format!("SSZ decode failed: {:?}", e)) - }) - .transpose()?; + ForkName::Capella => { + ExecutionPayloadHeaderCapella::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Capella) + } + ForkName::Deneb => { + ExecutionPayloadHeaderDeneb::::from_ssz_bytes(bytes.as_slice()) + .map(ExecutionPayloadHeader::Deneb) + } + } + .map_err(|e| format!("SSZ decode failed: {:?}", e)) + }) + .transpose()?; - let (eth1_block_hash, genesis_time) = if let Some(payload) = - execution_payload_header.as_ref() - { - let eth1_block_hash = - parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); - let genesis_time = - parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); - (eth1_block_hash, genesis_time) - } else { - let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { - "One of `--execution-payload-header` or `--eth1-block-hash` must be set".to_string() - })?; - let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( - SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to get time: {:?}", e))? - .as_secs(), - ); - (eth1_block_hash, genesis_time) - }; - - let validator_count = parse_required(matches, "validator-count")?; + let (eth1_block_hash, genesis_time) = if let Some(payload) = execution_payload_header.as_ref() { + let eth1_block_hash = + parse_optional(matches, "eth1-block-hash")?.unwrap_or_else(|| payload.block_hash()); + let genesis_time = + parse_optional(matches, "genesis-time")?.unwrap_or_else(|| payload.timestamp()); + (eth1_block_hash, genesis_time) + } else { + let eth1_block_hash = parse_required(matches, "eth1-block-hash").map_err(|_| { + "One of `--execution-payload-header` or `--eth1-block-hash` must be set".to_string() + })?; + let genesis_time = parse_optional(matches, "genesis-time")?.unwrap_or( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to get time: {:?}", e))? + .as_secs(), + ); + (eth1_block_hash, genesis_time) + }; + let genesis_state_bytes = if matches.is_present("interop-genesis-state") { let keypairs = generate_deterministic_keypairs(validator_count); + let keypairs: Vec<_> = keypairs.into_iter().map(|kp| (kp.clone(), kp)).collect(); let genesis_state = initialize_state_with_validators::( &keypairs, @@ -154,6 +156,41 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul &spec, )?; + Some(genesis_state.as_ssz_bytes()) + } else if matches.is_present("derived-genesis-state") { + let mnemonic_phrase: String = clap_utils::parse_required(matches, "mnemonic-phrase")?; + let mnemonic = Mnemonic::from_phrase(&mnemonic_phrase, Language::English).map_err(|e| { + format!( + "Unable to derive mnemonic from string {:?}: {:?}", + mnemonic_phrase, e + ) + })?; + let seed = Seed::new(&mnemonic, ""); + let keypairs = (0..validator_count as u32) + .map(|index| { + let (secret, _) = + recover_validator_secret_from_mnemonic(seed.as_bytes(), index, KeyType::Voting) + .unwrap(); + + let voting_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); + + let (secret, _) = recover_validator_secret_from_mnemonic( + seed.as_bytes(), + index, + KeyType::Withdrawal, + ) + .unwrap(); + let withdrawal_keypair = keypair_from_secret(secret.as_bytes()).unwrap(); + (voting_keypair, withdrawal_keypair) + }) + .collect::>(); + let genesis_state = initialize_state_with_validators::( + &keypairs, + genesis_time, + eth1_block_hash.into_root(), + execution_payload_header, + &spec, + )?; Some(genesis_state.as_ssz_bytes()) } else { None @@ -182,25 +219,30 @@ pub fn run(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul testnet.write_to_file(testnet_dir_path, overwrite_files) } +/// Returns a `BeaconState` with the given validator keypairs embedded into the +/// genesis state. This allows us to start testnets without having to deposit validators +/// manually. +/// +/// The optional `execution_payload_header` allows us to start a network from the bellatrix +/// fork without the need to transition to altair and bellatrix. +/// +/// We need to ensure that `eth1_block_hash` is equal to the genesis block hash that is +/// generated from the execution side `genesis.json`. fn initialize_state_with_validators( - keypairs: &[Keypair], + keypairs: &[(Keypair, Keypair)], // Voting and Withdrawal keypairs genesis_time: u64, eth1_block_hash: Hash256, execution_payload_header: Option>, spec: &ChainSpec, ) -> Result, String> { - let default_header = ExecutionPayloadHeaderMerge { - gas_limit: 10, - base_fee_per_gas: 10.into(), - timestamp: genesis_time, - block_hash: ExecutionBlockHash(eth1_block_hash), - prev_randao: Hash256::random(), - parent_hash: ExecutionBlockHash::zero(), - transactions_root: Hash256::random(), - ..ExecutionPayloadHeaderMerge::default() - }; - let execution_payload_header = - execution_payload_header.or(Some(ExecutionPayloadHeader::Merge(default_header))); + // If no header is provided, then start from a Bellatrix state by default + let default_header: ExecutionPayloadHeader = + ExecutionPayloadHeader::Merge(ExecutionPayloadHeaderMerge { + block_hash: ExecutionBlockHash::from_root(eth1_block_hash), + parent_hash: ExecutionBlockHash::zero(), + ..ExecutionPayloadHeaderMerge::default() + }); + let execution_payload_header = execution_payload_header.unwrap_or(default_header); // Empty eth1 data let eth1_data = Eth1Data { block_hash: eth1_block_hash, @@ -224,8 +266,8 @@ fn initialize_state_with_validators( let amount = spec.max_effective_balance; // Create a new validator. let validator = Validator { - pubkey: keypair.pk.clone().into(), - withdrawal_credentials: withdrawal_credentials(&keypair.pk), + pubkey: keypair.0.pk.clone().into(), + withdrawal_credentials: withdrawal_credentials(&keypair.1.pk), activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch, @@ -264,12 +306,24 @@ fn initialize_state_with_validators( // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing - if let Some(ExecutionPayloadHeader::Merge(ref header)) = execution_payload_header { - *state - .latest_execution_payload_header_merge_mut() - .map_err(|_| { - "State must contain bellatrix execution payload header".to_string() - })? = header.clone(); + // Currently, we only support starting from a bellatrix state + match state + .latest_execution_payload_header_mut() + .map_err(|e| format!("Failed to get execution payload header: {:?}", e))? + { + ExecutionPayloadHeaderRefMut::Merge(header_mut) => { + if let ExecutionPayloadHeader::Merge(eph) = execution_payload_header { + *header_mut = eph; + } else { + return Err("Execution payload header must be a bellatrix header".to_string()); + } + } + ExecutionPayloadHeaderRefMut::Capella(_) => { + return Err("Cannot start genesis from a capella state".to_string()) + } + ExecutionPayloadHeaderRefMut::Deneb(_) => { + return Err("Cannot start genesis from a deneb state".to_string()) + } } } diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 44a1772cc..cf971c69f 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -74,7 +74,7 @@ use eth2::{ use ssz::Encode; use state_processing::{ block_signature_verifier::BlockSignatureVerifier, per_block_processing, per_slot_processing, - BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use std::borrow::Cow; use std::fs::File; @@ -381,6 +381,7 @@ fn do_transition( &mut pre_state, &block, BlockSignatureStrategy::NoVerification, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 9ba9517c3..b698a2e4d 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "4.1.0" +version = "4.2.0" authors = ["Sigma Prime "] edition = "2021" autotests = false @@ -33,7 +33,7 @@ slog = { version = "2.5.2", features = ["max_level_trace"] } sloggers = { version = "2.1.1", features = ["json"] } types = { "path" = "../consensus/types" } bls = { path = "../crypto/bls" } -eth2_hashing = "0.3.0" +ethereum_hashing = "1.0.0-beta.2" clap = "2.33.3" env_logger = "0.9.0" environment = { path = "./environment" } diff --git a/lighthouse/build.rs b/lighthouse/build.rs new file mode 100644 index 000000000..3d8a25ec8 --- /dev/null +++ b/lighthouse/build.rs @@ -0,0 +1,2 @@ +// This is a stub for determining the build profile, see `build_profile_name`. +fn main() {} diff --git a/lighthouse/environment/src/lib.rs b/lighthouse/environment/src/lib.rs index 8ef67e82d..53915b52d 100644 --- a/lighthouse/environment/src/lib.rs +++ b/lighthouse/environment/src/lib.rs @@ -12,6 +12,7 @@ use eth2_network_config::Eth2NetworkConfig; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::{future, StreamExt}; +use logging::SSELoggingComponents; use serde_derive::{Deserialize, Serialize}; use slog::{error, info, o, warn, Drain, Duplicate, Level, Logger}; use sloggers::{file::FileLoggerBuilder, types::Format, types::Severity, Build}; @@ -36,6 +37,7 @@ use {futures::channel::oneshot, std::cell::RefCell}; pub use task_executor::test_utils::null_logger; const LOG_CHANNEL_SIZE: usize = 2048; +const SSE_LOG_CHANNEL_SIZE: usize = 2048; /// The maximum time in seconds the client will wait for all internal tasks to shutdown. const MAXIMUM_SHUTDOWN_TIME: u64 = 15; @@ -57,6 +59,7 @@ pub struct LoggerConfig { pub max_log_number: usize, pub compression: bool, pub is_restricted: bool, + pub sse_logging: bool, } impl Default for LoggerConfig { fn default() -> Self { @@ -72,14 +75,54 @@ impl Default for LoggerConfig { max_log_number: 5, compression: false, is_restricted: true, + sse_logging: false, } } } +/// An execution context that can be used by a service. +/// +/// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a +/// `Runtime`, instead it only has access to a `Runtime`. +#[derive(Clone)] +pub struct RuntimeContext { + pub executor: TaskExecutor, + pub eth_spec_instance: E, + pub eth2_config: Eth2Config, + pub eth2_network_config: Option>, + pub sse_logging_components: Option, +} + +impl RuntimeContext { + /// Returns a sub-context of this context. + /// + /// The generated service will have the `service_name` in all it's logs. + pub fn service_context(&self, service_name: String) -> Self { + Self { + executor: self.executor.clone_with_name(service_name), + eth_spec_instance: self.eth_spec_instance.clone(), + eth2_config: self.eth2_config.clone(), + eth2_network_config: self.eth2_network_config.clone(), + sse_logging_components: self.sse_logging_components.clone(), + } + } + + /// Returns the `eth2_config` for this service. + pub fn eth2_config(&self) -> &Eth2Config { + &self.eth2_config + } + + /// Returns a reference to the logger for this service. + pub fn log(&self) -> &slog::Logger { + self.executor.log() + } +} + /// Builds an `Environment`. pub struct EnvironmentBuilder { runtime: Option>, log: Option, + sse_logging_components: Option, eth_spec_instance: E, eth2_config: Eth2Config, eth2_network_config: Option, @@ -91,6 +134,7 @@ impl EnvironmentBuilder { Self { runtime: None, log: None, + sse_logging_components: None, eth_spec_instance: MinimalEthSpec, eth2_config: Eth2Config::minimal(), eth2_network_config: None, @@ -104,6 +148,7 @@ impl EnvironmentBuilder { Self { runtime: None, log: None, + sse_logging_components: None, eth_spec_instance: MainnetEthSpec, eth2_config: Eth2Config::mainnet(), eth2_network_config: None, @@ -117,6 +162,7 @@ impl EnvironmentBuilder { Self { runtime: None, log: None, + sse_logging_components: None, eth_spec_instance: GnosisEthSpec, eth2_config: Eth2Config::gnosis(), eth2_network_config: None, @@ -265,7 +311,7 @@ impl EnvironmentBuilder { .build() .map_err(|e| format!("Unable to build file logger: {}", e))?; - let log = Logger::root(Duplicate::new(stdout_logger, file_logger).fuse(), o!()); + let mut log = Logger::root(Duplicate::new(stdout_logger, file_logger).fuse(), o!()); info!( log, @@ -273,6 +319,14 @@ impl EnvironmentBuilder { "path" => format!("{:?}", path) ); + // If the http API is enabled, we may need to send logs to be consumed by subscribers. + if config.sse_logging { + let sse_logger = SSELoggingComponents::new(SSE_LOG_CHANNEL_SIZE); + self.sse_logging_components = Some(sse_logger.clone()); + + log = Logger::root(Duplicate::new(log, sse_logger).fuse(), o!()); + } + self.log = Some(log); Ok(self) @@ -315,6 +369,7 @@ impl EnvironmentBuilder { signal: Some(signal), exit, log: self.log.ok_or("Cannot build environment without log")?, + sse_logging_components: self.sse_logging_components, eth_spec_instance: self.eth_spec_instance, eth2_config: self.eth2_config, eth2_network_config: self.eth2_network_config.map(Arc::new), @@ -322,42 +377,6 @@ impl EnvironmentBuilder { } } -/// An execution context that can be used by a service. -/// -/// Distinct from an `Environment` because a `Context` is not able to give a mutable reference to a -/// `Runtime`, instead it only has access to a `Runtime`. -#[derive(Clone)] -pub struct RuntimeContext { - pub executor: TaskExecutor, - pub eth_spec_instance: E, - pub eth2_config: Eth2Config, - pub eth2_network_config: Option>, -} - -impl RuntimeContext { - /// Returns a sub-context of this context. - /// - /// The generated service will have the `service_name` in all it's logs. - pub fn service_context(&self, service_name: String) -> Self { - Self { - executor: self.executor.clone_with_name(service_name), - eth_spec_instance: self.eth_spec_instance.clone(), - eth2_config: self.eth2_config.clone(), - eth2_network_config: self.eth2_network_config.clone(), - } - } - - /// Returns the `eth2_config` for this service. - pub fn eth2_config(&self) -> &Eth2Config { - &self.eth2_config - } - - /// Returns a reference to the logger for this service. - pub fn log(&self) -> &slog::Logger { - self.executor.log() - } -} - /// An environment where Lighthouse services can run. Used to start a production beacon node or /// validator client, or to run tests that involve logging and async task execution. pub struct Environment { @@ -369,6 +388,7 @@ pub struct Environment { signal: Option, exit: exit_future::Exit, log: Logger, + sse_logging_components: Option, eth_spec_instance: E, pub eth2_config: Eth2Config, pub eth2_network_config: Option>, @@ -395,6 +415,7 @@ impl Environment { eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), eth2_network_config: self.eth2_network_config.clone(), + sse_logging_components: self.sse_logging_components.clone(), } } @@ -410,6 +431,7 @@ impl Environment { eth_spec_instance: self.eth_spec_instance.clone(), eth2_config: self.eth2_config.clone(), eth2_network_config: self.eth2_network_config.clone(), + sse_logging_components: self.sse_logging_components.clone(), } } diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index b05e78fe5..b814639ce 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -6,8 +6,8 @@ use clap_utils::{flags::DISABLE_MALLOC_TUNING_FLAG, get_eth2_network_config}; use directory::{parse_path_or_default, DEFAULT_BEACON_NODE_DIR, DEFAULT_VALIDATOR_DIR}; use env_logger::{Builder, Env}; use environment::{EnvironmentBuilder, LoggerConfig}; -use eth2_hashing::have_sha_extensions; use eth2_network_config::{Eth2NetworkConfig, DEFAULT_HARDCODED_NETWORK, HARDCODED_NET_NAMES}; +use ethereum_hashing::have_sha_extensions; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; use slog::{crit, info, warn}; @@ -37,6 +37,17 @@ fn allocator_name() -> &'static str { } } +fn build_profile_name() -> String { + // Nice hack from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + // The profile name is always the 3rd last part of the path (with 1 based indexing). + // e.g. /code/core/target/cli/build/my-build-info-9f91ba6f99d7a061/out + std::env!("OUT_DIR") + .split(std::path::MAIN_SEPARATOR) + .nth_back(3) + .unwrap_or_else(|| "unknown") + .to_string() +} + fn main() { // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. if std::env::var("RUST_BACKTRACE").is_err() { @@ -58,11 +69,13 @@ fn main() { BLS library: {}\n\ SHA256 hardware acceleration: {}\n\ Allocator: {}\n\ + Profile: {}\n\ Specs: mainnet (true), minimal ({}), gnosis ({})", VERSION.replace("Lighthouse/", ""), bls_library_name(), have_sha_extensions(), allocator_name(), + build_profile_name(), cfg!(feature = "spec-minimal"), cfg!(feature = "gnosis"), ).as_str() @@ -152,7 +165,8 @@ fn main() { .help( "If present, log files will be generated as world-readable meaning they can be read by \ any user on the machine. Note that logs can often contain sensitive information \ - about your validator and so this flag should be used with caution.") + about your validator and so this flag should be used with caution. For Windows users, \ + the log file permissions will be inherited from the parent folder.") .global(true), ) .arg( @@ -469,6 +483,16 @@ fn run( }; } + let sse_logging = { + if let Some(bn_matches) = matches.subcommand_matches("beacon_node") { + bn_matches.is_present("gui") + } else if let Some(vc_matches) = matches.subcommand_matches("validator_client") { + vc_matches.is_present("http") + } else { + false + } + }; + let logger_config = LoggerConfig { path: log_path, debug_level: String::from(debug_level), @@ -481,6 +505,7 @@ fn run( max_log_number: logfile_max_number, compression: logfile_compress, is_restricted: logfile_restricted, + sse_logging, }; let builder = environment_builder.initialize_logger(logger_config.clone())?; diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 696830a0d..63d79fceb 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -28,10 +28,6 @@ use tempfile::{tempdir, TempDir}; use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; -// TODO: create tests for the `lighthouse account validator deposit` command. This involves getting -// access to an IPC endpoint during testing or adding support for deposit submission via HTTP and -// using ganache. - /// Returns the `lighthouse account` command. fn account_cmd() -> Command { let lighthouse_bin = env!("CARGO_BIN_EXE_lighthouse"); diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 754a01478..d1d4f3e40 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -345,6 +345,23 @@ fn trusted_peers_flag() { }); } +#[test] +fn genesis_backfill_flag() { + CommandLineTest::new() + .flag("genesis-backfill", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); +} + +/// The genesis backfill flag should be enabled if historic states flag is set. +#[test] +fn genesis_backfill_with_historic_flag() { + CommandLineTest::new() + .flag("reconstruct-historic-states", None) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.chain.genesis_backfill, true)); +} + #[test] fn always_prefer_builder_payload_flag() { CommandLineTest::new() @@ -1669,6 +1686,25 @@ fn block_cache_size_flag() { .with_config(|config| assert_eq!(config.store.block_cache_size, 4_usize)); } #[test] +fn historic_state_cache_size_flag() { + CommandLineTest::new() + .flag("historic-state-cache-size", Some("4")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.historic_state_cache_size, 4_usize)); +} +#[test] +fn historic_state_cache_size_default() { + use beacon_node::beacon_chain::store::config::DEFAULT_HISTORIC_STATE_CACHE_SIZE; + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.store.historic_state_cache_size, + DEFAULT_HISTORIC_STATE_CACHE_SIZE + ); + }); +} +#[test] fn auto_compact_db_flag() { CommandLineTest::new() .flag("auto-compact-db", Some("false")) @@ -2202,3 +2238,24 @@ fn disable_optimistic_finalized_sync() { assert!(!config.chain.optimistic_finalized_sync); }); } + +#[test] +fn invalid_gossip_verified_blocks_path_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert_eq!(config.network.invalid_block_storage, None)); +} + +#[test] +fn invalid_gossip_verified_blocks_path() { + let path = "/home/karlm/naughty-blocks"; + CommandLineTest::new() + .flag("invalid-gossip-verified-blocks-path", Some(path)) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.network.invalid_block_storage, + Some(PathBuf::from(path)) + ) + }); +} diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 45cd989a4..8c1f0477c 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -103,10 +103,8 @@ fn beacon_nodes_flag() { #[test] fn allow_unsynced_flag() { - CommandLineTest::new() - .flag("allow-unsynced", None) - .run() - .with_config(|config| assert!(config.allow_unsynced_beacon_node)); + // No-op, but doesn't crash. + CommandLineTest::new().flag("allow-unsynced", None).run(); } #[test] diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index c4050ac93..f261ea67f 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -1,11 +1,16 @@ # Simple Local Testnet -These scripts allow for running a small local testnet with multiple beacon nodes and validator clients. +These scripts allow for running a small local testnet with multiple beacon nodes and validator clients and a geth execution client. This setup can be useful for testing and development. ## Requirements -The scripts require `lcli` and `lighthouse` to be installed on `PATH`. From the +The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH`. + + +MacOS users need to install GNU `sed` and GNU `grep`, and add them both to `PATH` as well. + +From the root of this repository, run: ```bash @@ -17,17 +22,23 @@ make install-lcli Modify `vars.env` as desired. -Start a local eth1 ganache server plus boot node along with `BN_COUNT` -number of beacon nodes and `VC_COUNT` validator clients. +The testnet starts with a post-merge genesis state. +Start a consensus layer and execution layer boot node along with `BN_COUNT` +number of beacon nodes each connected to a geth execution client and `VC_COUNT` validator clients. + +The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. It also takes a mandatory `GENESIS_FILE` for initialising geth's state. +A sample `genesis.json` is provided in this directory. + +The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified. -The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` + ```bash -./start_local_testnet.sh +./start_local_testnet.sh genesis.json ``` ## Stopping the testnet @@ -41,31 +52,38 @@ This is not necessary before `start_local_testnet.sh` as it invokes `stop_local_ These scripts are used by ./start_local_testnet.sh and may be used to manually -Start a local eth1 ganache server -```bash -./ganache_test_node.sh -``` - -Assuming you are happy with the configuration in `vars.env`, deploy the deposit contract, make deposits, -create the testnet directory, genesis state and validator keys with: +Assuming you are happy with the configuration in `vars.env`, +create the testnet directory, genesis state with embedded validators and validator keys with: ```bash ./setup.sh ``` -Generate bootnode enr and start a discv5 bootnode so that multiple beacon nodes can find each other +Note: The generated genesis validators are embedded into the genesis state as genesis validators and hence do not require manual deposits to activate. + +Generate bootnode enr and start an EL and CL bootnode so that multiple nodes can find each other ```bash ./bootnode.sh +./el_bootnode.sh +``` + +Start a geth node: +```bash +./geth.sh +``` +e.g. +```bash +./geth.sh $HOME/.lighthouse/local-testnet/geth_1 5000 6000 7000 genesis.json ``` Start a beacon node: ```bash -./beacon_node.sh +./beacon_node.sh ``` e.g. ```bash -./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 +./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:6000 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret ``` In a new terminal, start the validator client which will attach to the first diff --git a/scripts/local_testnet/anvil_test_node.sh b/scripts/local_testnet/anvil_test_node.sh new file mode 100755 index 000000000..41be91756 --- /dev/null +++ b/scripts/local_testnet/anvil_test_node.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +source ./vars.env + +exec anvil \ + --balance 1000000000 \ + --gas-limit 1000000000 \ + --accounts 10 \ + --mnemonic "$ETH1_NETWORK_MNEMONIC" \ + --block-time $SECONDS_PER_ETH1_BLOCK \ + --port 8545 \ + --chain-id "$CHAIN_ID" diff --git a/scripts/local_testnet/el_bootnode.sh b/scripts/local_testnet/el_bootnode.sh index ee0b43b82..d73a463f6 100755 --- a/scripts/local_testnet/el_bootnode.sh +++ b/scripts/local_testnet/el_bootnode.sh @@ -1,5 +1,3 @@ priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91" - source ./vars.env - $EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file diff --git a/scripts/local_testnet/ganache_test_node.sh b/scripts/local_testnet/ganache_test_node.sh deleted file mode 100755 index a489c3322..000000000 --- a/scripts/local_testnet/ganache_test_node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -Eeuo pipefail - -source ./vars.env - -exec ganache \ - --defaultBalanceEther 1000000000 \ - --gasLimit 1000000000 \ - --accounts 10 \ - --mnemonic "$ETH1_NETWORK_MNEMONIC" \ - --port 8545 \ - --blockTime $SECONDS_PER_ETH1_BLOCK \ - --chain.chainId "$CHAIN_ID" diff --git a/scripts/local_testnet/genesis.json b/scripts/local_testnet/genesis.json index 751176048..64f4e80b1 100644 --- a/scripts/local_testnet/genesis.json +++ b/scripts/local_testnet/genesis.json @@ -20,6 +20,12 @@ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { "balance": "0x6d6172697573766477000000" }, + "0x7b8C3a386C0eea54693fFB0DA17373ffC9228139": { + "balance": "10000000000000000000000000" + }, + "0xdA2DD7560DB7e212B945fC72cEB54B7D8C886D77": { + "balance": "10000000000000000000000000" + }, "0x0000000000000000000000000000000000000000": { "balance": "1" }, diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index d405698f3..60a5c98bd 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -1,7 +1,6 @@ #!/usr/bin/env bash # -# Deploys the deposit contract and makes deposits for $VALIDATOR_COUNT insecure deterministic validators. # Produces a testnet specification and a genesis state where the genesis time # is now + $GENESIS_DELAY. # @@ -51,12 +50,3 @@ lcli \ --node-count $BN_COUNT echo Validators generated with keystore passwords at $DATADIR. - -GENESIS_TIME=$(lcli pretty-ssz state_merge ~/.lighthouse/local-testnet/testnet/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') -CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) -DENEB_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) - -CURR_DIR=`pwd` - -sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $CURR_DIR/genesis.json -sed -i 's/"shardingForkTime".*$/"shardingForkTime": '"$DENEB_TIME"',/g' $CURR_DIR/genesis.json diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index ce36966e2..b4631c907 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -102,6 +102,17 @@ execute_command_add_PID() { echo "executing: ./setup.sh >> $LOG_DIR/setup.log" ./setup.sh >> $LOG_DIR/setup.log 2>&1 +# Update future hardforks time in the EL genesis file based on the CL genesis time +GENESIS_TIME=$(lcli pretty-ssz state_merge $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') +echo $GENESIS_TIME +CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) +echo $CAPELLA_TIME +sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file +DENEB_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) +echo $DENEB_TIME +sed -i 's/"shardingForkTime".*$/"shardingForkTime": '"$DENEB_TIME"',/g' $genesis_file +cat $genesis_file + # Delay to let boot_enr.yaml to be created execute_command_add_PID bootnode.log ./bootnode.sh sleeping 3 @@ -109,6 +120,9 @@ sleeping 3 execute_command_add_PID el_bootnode.log ./el_bootnode.sh sleeping 3 +execute_command_add_PID el_bootnode.log ./el_bootnode.sh +sleeping 1 + # Start beacon nodes BN_udp_tcp_base=9000 BN_http_port_base=8000 @@ -126,8 +140,8 @@ done sleeping 20 # Reset the `genesis.json` config file fork times. -sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' genesis.json -sed -i 's/"shardingForkTime".*$/"shardingForkTime": 0,/g' genesis.json +sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' $genesis_file +sed -i 's/"shardingForkTime".*$/"shardingForkTime": 0,/g' $genesis_file for (( bn=1; bn<=$BN_COUNT; bn++ )); do secret=$DATADIR/geth_datadir$bn/geth/jwtsecret diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index 475bedea3..9daf7d236 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -1,3 +1,4 @@ +# Path to the geth binary GETH_BINARY=geth EL_BOOTNODE_BINARY=bootnode @@ -7,6 +8,9 @@ DATADIR=~/.lighthouse/local-testnet # Directory for the eth2 config TESTNET_DIR=$DATADIR/testnet +# Mnemonic for generating validator keys +MNEMONIC_PHRASE="vast thought differ pull jewel broom cook wrist tribe word before omit" + EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301" # Hardcoded deposit contract @@ -15,7 +19,7 @@ DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 GENESIS_FORK_VERSION=0x42424242 # Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=4c2221e15760fd06c8c7a5202258c67e3d9e4aedf6db3a886ce9dc36938ad8d0 +ETH1_BLOCK_HASH=4b0e17cf5c04616d64526d292b80a1f2720cf2195d990006e4ea6950c5bbcb9f VALIDATOR_COUNT=80 GENESIS_VALIDATOR_COUNT=80 diff --git a/scripts/tests/doppelganger_protection.sh b/scripts/tests/doppelganger_protection.sh index 722d85a28..174f62df6 100755 --- a/scripts/tests/doppelganger_protection.sh +++ b/scripts/tests/doppelganger_protection.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Requires `lighthouse`, ``lcli`, `geth`, `curl`, `jq` +# Requires `lighthouse`, `lcli`, `geth`, `bootnode`, `curl`, `jq` BEHAVIOR=$1 @@ -49,12 +49,14 @@ exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_data sleep 20 -# Reset the `genesis.json` config file fork times. -sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' genesis.json -sed -i 's/"shardingForkTime".*$/"shardingForkTime": 0,/g' genesis.json +echo "Starting local execution nodes" -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:5000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> /dev/null & -exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 http://localhost:5100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> beacon1.log & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir1 7000 6000 5000 $genesis_file &> geth.log & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir2 7100 6100 5100 $genesis_file &> /dev/null & +exit_if_fails ../local_testnet/geth.sh $HOME/.lighthouse/local-testnet/geth_datadir3 7200 6200 5200 $genesis_file &> /dev/null & + +exit_if_fails ../local_testnet/beacon_node.sh -d debug $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:5000 $HOME/.lighthouse/local-testnet/geth_datadir1/geth/jwtsecret &> beacon1.log & +exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9100 8100 http://localhost:5100 $HOME/.lighthouse/local-testnet/geth_datadir2/geth/jwtsecret &> /dev/null & exit_if_fails ../local_testnet/beacon_node.sh $HOME/.lighthouse/local-testnet/node_3 9200 8200 http://localhost:5200 $HOME/.lighthouse/local-testnet/geth_datadir3/geth/jwtsecret &> /dev/null & echo "Starting local validator clients" @@ -156,4 +158,4 @@ if [[ "$BEHAVIOR" == "success" ]]; then fi fi -exit 0 +exit 0 \ No newline at end of file diff --git a/scripts/tests/genesis.json b/scripts/tests/genesis.json index 751176048..306f28dab 100644 --- a/scripts/tests/genesis.json +++ b/scripts/tests/genesis.json @@ -11,7 +11,7 @@ "istanbulBlock": 0, "berlinBlock": 0, "londonBlock": 0, - "mergeNetsplitBlock": 0, + "mergeForkBlock": 0, "shanghaiTime": 0, "shardingForkTime": 0, "terminalTotalDifficulty": 0 diff --git a/scripts/tests/vars.env b/scripts/tests/vars.env index 6c39aeb99..14707283c 100644 --- a/scripts/tests/vars.env +++ b/scripts/tests/vars.env @@ -16,7 +16,7 @@ DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242 GENESIS_FORK_VERSION=0x42424242 # Block hash generated from genesis.json in directory -ETH1_BLOCK_HASH=4c2221e15760fd06c8c7a5202258c67e3d9e4aedf6db3a886ce9dc36938ad8d0 +ETH1_BLOCK_HASH=add7865f8346031c72287e2edc4a4952fd34fc0a8642403e8c1bce67f215c92b VALIDATOR_COUNT=80 GENESIS_VALIDATOR_COUNT=80 @@ -56,7 +56,7 @@ SECONDS_PER_SLOT=3 SECONDS_PER_ETH1_BLOCK=1 # Proposer score boost percentage -PROPOSER_SCORE_BOOST=40 +PROPOSER_SCORE_BOOST=70 # Enable doppelganger detection -VC_ARGS=" --enable-doppelganger-protection " +VC_ARGS=" --enable-doppelganger-protection " \ No newline at end of file diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index c5ce8793a..7f2ac456b 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -12,8 +12,8 @@ lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] [dependencies] bincode = "1.3.1" byteorder = "1.3.4" -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lazy_static = "1.4.0" lighthouse_metrics = { path = "../common/lighthouse_metrics" } @@ -26,8 +26,8 @@ serde = "1.0" serde_derive = "1.0" slog = "2.5.2" sloggers = { version = "2.1.1", features = ["json"] } -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" types = { path = "../consensus/types" } strum = { version = "0.24.1", features = ["derive"] } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index be34446d9..c10ea7a47 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -25,11 +25,11 @@ serde_json = "1.0.58" serde_repr = "0.1.6" serde_yaml = "0.8.13" eth2_network_config = { path = "../../common/eth2_network_config" } -eth2_serde_utils = { path = "../../consensus/serde_utils" } -eth2_ssz = "0.4.1" -eth2_ssz_derive = "0.3.1" -tree_hash = "0.4.1" -tree_hash_derive = "0.4.0" +ethereum_serde_utils = "0.5.0" +ethereum_ssz = "0.5.0" +ethereum_ssz_derive = "0.5.0" +tree_hash = "0.5.0" +tree_hash_derive = "0.5.0" cached_tree_hash = { path = "../../consensus/cached_tree_hash" } state_processing = { path = "../../consensus/state_processing" } swap_or_not_shuffle = { path = "../../consensus/swap_or_not_shuffle" } diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index 8a7578972..e51fed190 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, - ConsensusContext, VerifyBlockRoot, + ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use types::{BeaconState, EthSpec, ForkName, RelativeEpoch, SignedBeaconBlock}; @@ -96,6 +96,7 @@ impl Case for SanityBlocks { &mut indiv_state, signed_block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, @@ -106,6 +107,7 @@ impl Case for SanityBlocks { &mut bulk_state, signed_block, BlockSignatureStrategy::VerifyBulk, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index 4974eb881..5c6da900e 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -4,7 +4,7 @@ use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, state_advance::complete_state_advance, BlockSignatureStrategy, - ConsensusContext, VerifyBlockRoot, + ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use std::str::FromStr; use types::{BeaconState, Epoch, ForkName, SignedBeaconBlock}; @@ -107,6 +107,7 @@ impl Case for TransitionTest { &mut state, block, BlockSignatureStrategy::VerifyBulk, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, spec, diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml index 08766f14f..5c78c0902 100644 --- a/testing/eth1_test_rig/Cargo.toml +++ b/testing/eth1_test_rig/Cargo.toml @@ -6,8 +6,11 @@ edition = "2021" [dependencies] tokio = { version = "1.14.0", features = ["time"] } -web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] } +ethers-core = "1.0.2" +ethers-providers = "1.0.2" +ethers-contract = "1.0.2" types = { path = "../../consensus/types"} serde_json = "1.0.58" deposit_contract = { path = "../../common/deposit_contract"} unused_port = { path = "../../common/unused_port" } +hex = "0.4.2" diff --git a/testing/eth1_test_rig/src/anvil.rs b/testing/eth1_test_rig/src/anvil.rs new file mode 100644 index 000000000..1b86711c2 --- /dev/null +++ b/testing/eth1_test_rig/src/anvil.rs @@ -0,0 +1,101 @@ +use ethers_core::utils::{Anvil, AnvilInstance}; +use ethers_providers::{Http, Middleware, Provider}; +use serde_json::json; +use std::convert::TryFrom; +use unused_port::unused_tcp4_port; + +/// Provides a dedicated `anvil` instance. +/// +/// Requires that `anvil` is installed and available on `PATH`. +pub struct AnvilCliInstance { + pub port: u16, + pub anvil: AnvilInstance, + pub client: Provider, + chain_id: u64, +} + +impl AnvilCliInstance { + fn new_from_child(anvil_instance: Anvil, chain_id: u64, port: u16) -> Result { + let client = Provider::::try_from(&endpoint(port)) + .map_err(|e| format!("Failed to start HTTP transport connected to anvil: {:?}", e))?; + Ok(Self { + port, + anvil: anvil_instance.spawn(), + client, + chain_id, + }) + } + pub fn new(chain_id: u64) -> Result { + let port = unused_tcp4_port()?; + + let anvil = Anvil::new() + .port(port) + .mnemonic("vast thought differ pull jewel broom cook wrist tribe word before omit") + .arg("--balance") + .arg("1000000000") + .arg("--gas-limit") + .arg("1000000000") + .arg("--accounts") + .arg("10") + .arg("--chain-id") + .arg(format!("{}", chain_id)); + + Self::new_from_child(anvil, chain_id, port) + } + + pub fn fork(&self) -> Result { + let port = unused_tcp4_port()?; + + let anvil = Anvil::new() + .port(port) + .arg("--chain-id") + .arg(format!("{}", self.chain_id())) + .fork(self.endpoint()); + + Self::new_from_child(anvil, self.chain_id, port) + } + + /// Returns the endpoint that this instance is listening on. + pub fn endpoint(&self) -> String { + endpoint(self.port) + } + + /// Returns the chain id of the anvil instance + pub fn chain_id(&self) -> u64 { + self.chain_id + } + + /// Increase the timestamp on future blocks by `increase_by` seconds. + pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { + self.client + .request("evm_increaseTime", vec![json!(increase_by)]) + .await + .map(|_json_value: u64| ()) + .map_err(|e| format!("Failed to increase time on EVM (is this anvil?): {:?}", e)) + } + + /// Returns the current block number, as u64 + pub async fn block_number(&self) -> Result { + self.client + .get_block_number() + .await + .map(|v| v.as_u64()) + .map_err(|e| format!("Failed to get block number: {:?}", e)) + } + + /// Mines a single block. + pub async fn evm_mine(&self) -> Result<(), String> { + self.client + .request("evm_mine", ()) + .await + .map(|_: String| ()) + .map_err(|_| { + "utils should mine new block with evm_mine (only works with anvil/ganache!)" + .to_string() + }) + } +} + +fn endpoint(port: u16) -> String { + format!("http://127.0.0.1:{}", port) +} diff --git a/testing/eth1_test_rig/src/ganache.rs b/testing/eth1_test_rig/src/ganache.rs deleted file mode 100644 index 898a089ba..000000000 --- a/testing/eth1_test_rig/src/ganache.rs +++ /dev/null @@ -1,193 +0,0 @@ -use serde_json::json; -use std::io::prelude::*; -use std::io::BufReader; -use std::process::{Child, Command, Stdio}; -use std::time::{Duration, Instant}; -use unused_port::unused_tcp4_port; -use web3::{transports::Http, Transport, Web3}; - -/// How long we will wait for ganache to indicate that it is ready. -const GANACHE_STARTUP_TIMEOUT_MILLIS: u64 = 10_000; - -/// Provides a dedicated `ganachi-cli` instance with a connected `Web3` instance. -/// -/// Requires that `ganachi-cli` is installed and available on `PATH`. -pub struct GanacheInstance { - pub port: u16, - child: Child, - pub web3: Web3, - chain_id: u64, -} - -impl GanacheInstance { - fn new_from_child(mut child: Child, port: u16, chain_id: u64) -> Result { - let stdout = child - .stdout - .ok_or("Unable to get stdout for ganache child process")?; - - let start = Instant::now(); - let mut reader = BufReader::new(stdout); - loop { - if start + Duration::from_millis(GANACHE_STARTUP_TIMEOUT_MILLIS) <= Instant::now() { - break Err( - "Timed out waiting for ganache to start. Is ganache installed?".to_string(), - ); - } - - let mut line = String::new(); - if let Err(e) = reader.read_line(&mut line) { - break Err(format!("Failed to read line from ganache process: {:?}", e)); - } else if line.starts_with("RPC Listening on") { - break Ok(()); - } else { - continue; - } - }?; - - let transport = Http::new(&endpoint(port)).map_err(|e| { - format!( - "Failed to start HTTP transport connected to ganache: {:?}", - e - ) - })?; - let web3 = Web3::new(transport); - - child.stdout = Some(reader.into_inner()); - - Ok(Self { - port, - child, - web3, - chain_id, - }) - } - - /// Start a new `ganache` process, waiting until it indicates that it is ready to accept - /// RPC connections. - pub fn new(chain_id: u64) -> Result { - let port = unused_tcp4_port()?; - let binary = match cfg!(windows) { - true => "ganache.cmd", - false => "ganache", - }; - let child = Command::new(binary) - .stdout(Stdio::piped()) - .arg("--defaultBalanceEther") - .arg("1000000000") - .arg("--gasLimit") - .arg("1000000000") - .arg("--accounts") - .arg("10") - .arg("--port") - .arg(format!("{}", port)) - .arg("--mnemonic") - .arg("\"vast thought differ pull jewel broom cook wrist tribe word before omit\"") - .arg("--chain.chainId") - .arg(format!("{}", chain_id)) - .spawn() - .map_err(|e| { - format!( - "Failed to start {}. \ - Is it installed and available on $PATH? Error: {:?}", - binary, e - ) - })?; - - Self::new_from_child(child, port, chain_id) - } - - pub fn fork(&self) -> Result { - let port = unused_tcp4_port()?; - let binary = match cfg!(windows) { - true => "ganache.cmd", - false => "ganache", - }; - let child = Command::new(binary) - .stdout(Stdio::piped()) - .arg("--fork") - .arg(self.endpoint()) - .arg("--port") - .arg(format!("{}", port)) - .arg("--chain.chainId") - .arg(format!("{}", self.chain_id)) - .spawn() - .map_err(|e| { - format!( - "Failed to start {}. \ - Is it installed and available on $PATH? Error: {:?}", - binary, e - ) - })?; - - Self::new_from_child(child, port, self.chain_id) - } - - /// Returns the endpoint that this instance is listening on. - pub fn endpoint(&self) -> String { - endpoint(self.port) - } - - /// Returns the chain id of the ganache instance - pub fn chain_id(&self) -> u64 { - self.chain_id - } - - /// Increase the timestamp on future blocks by `increase_by` seconds. - pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { - self.web3 - .transport() - .execute("evm_increaseTime", vec![json!(increase_by)]) - .await - .map(|_json_value| ()) - .map_err(|e| format!("Failed to increase time on EVM (is this ganache?): {:?}", e)) - } - - /// Returns the current block number, as u64 - pub async fn block_number(&self) -> Result { - self.web3 - .eth() - .block_number() - .await - .map(|v| v.as_u64()) - .map_err(|e| format!("Failed to get block number: {:?}", e)) - } - - /// Mines a single block. - pub async fn evm_mine(&self) -> Result<(), String> { - self.web3 - .transport() - .execute("evm_mine", vec![]) - .await - .map(|_| ()) - .map_err(|_| { - "utils should mine new block with evm_mine (only works with ganache!)".to_string() - }) - } -} - -fn endpoint(port: u16) -> String { - format!("http://127.0.0.1:{}", port) -} - -impl Drop for GanacheInstance { - fn drop(&mut self) { - if cfg!(windows) { - // Calling child.kill() in Windows will only kill the process - // that spawned ganache, leaving the actual ganache process - // intact. You have to kill the whole process tree. What's more, - // if you don't spawn ganache with --keepAliveTimeout=0, Windows - // will STILL keep the server running even after you've ended - // the process tree and it's disappeared from the task manager. - // Unbelievable... - Command::new("taskkill") - .arg("/pid") - .arg(self.child.id().to_string()) - .arg("/T") - .arg("/F") - .output() - .expect("failed to execute taskkill"); - } else { - let _ = self.child.kill(); - } - } -} diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs index 42081a60e..0063975ee 100644 --- a/testing/eth1_test_rig/src/lib.rs +++ b/testing/eth1_test_rig/src/lib.rs @@ -1,77 +1,79 @@ //! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. //! -//! Presently used with [`ganache`](https://github.com/trufflesuite/ganache) to simulate +//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil) to simulate //! the deposit contract for testing beacon node eth1 integration. //! //! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be //! some initial issues. -mod ganache; +mod anvil; +use anvil::AnvilCliInstance; use deposit_contract::{ encode_eth1_tx_data, testnet, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, }; -use ganache::GanacheInstance; +use ethers_contract::Contract; +use ethers_core::{ + abi::Abi, + types::{transaction::eip2718::TypedTransaction, Address, Bytes, TransactionRequest, U256}, +}; +pub use ethers_providers::{Http, Middleware, Provider}; use std::time::Duration; use tokio::time::sleep; use types::DepositData; use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; -use web3::contract::{Contract, Options}; -use web3::transports::Http; -use web3::types::{Address, TransactionRequest, U256}; -use web3::Web3; pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0; pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0; -/// Provides a dedicated ganache instance with the deposit contract already deployed. -pub struct GanacheEth1Instance { - pub ganache: GanacheInstance, +/// Provides a dedicated anvil instance with the deposit contract already deployed. +pub struct AnvilEth1Instance { + pub anvil: AnvilCliInstance, pub deposit_contract: DepositContract, } -impl GanacheEth1Instance { +impl AnvilEth1Instance { pub async fn new(chain_id: u64) -> Result { - let ganache = GanacheInstance::new(chain_id)?; - DepositContract::deploy(ganache.web3.clone(), 0, None) + let anvil = AnvilCliInstance::new(chain_id)?; + DepositContract::deploy(anvil.client.clone(), 0, None) .await .map(|deposit_contract| Self { - ganache, + anvil, deposit_contract, }) } pub fn endpoint(&self) -> String { - self.ganache.endpoint() + self.anvil.endpoint() } - pub fn web3(&self) -> Web3 { - self.ganache.web3.clone() + pub fn json_rpc_client(&self) -> Provider { + self.anvil.client.clone() } } /// Deploys and provides functions for the eth2 deposit contract, deployed on the eth1 chain. #[derive(Clone, Debug)] pub struct DepositContract { - web3: Web3, - contract: Contract, + client: Provider, + contract: Contract>, } impl DepositContract { pub async fn deploy( - web3: Web3, + client: Provider, confirmations: usize, password: Option, ) -> Result { - Self::deploy_bytecode(web3, confirmations, BYTECODE, ABI, password).await + Self::deploy_bytecode(client, confirmations, BYTECODE, ABI, password).await } pub async fn deploy_testnet( - web3: Web3, + client: Provider, confirmations: usize, password: Option, ) -> Result { Self::deploy_bytecode( - web3, + client, confirmations, testnet::BYTECODE, testnet::ABI, @@ -81,29 +83,25 @@ impl DepositContract { } async fn deploy_bytecode( - web3: Web3, + client: Provider, confirmations: usize, bytecode: &[u8], abi: &[u8], password: Option, ) -> Result { - let address = deploy_deposit_contract( - web3.clone(), - confirmations, - bytecode.to_vec(), - abi.to_vec(), - password, - ) - .await - .map_err(|e| { - format!( - "Failed to deploy contract: {}. Is scripts/ganache_tests_node.sh running?.", - e - ) - })?; - Contract::from_json(web3.clone().eth(), address, ABI) - .map_err(|e| format!("Failed to init contract: {:?}", e)) - .map(move |contract| Self { web3, contract }) + let abi = Abi::load(abi).map_err(|e| format!("Invalid deposit contract abi: {:?}", e))?; + let address = + deploy_deposit_contract(client.clone(), confirmations, bytecode.to_vec(), password) + .await + .map_err(|e| { + format!( + "Failed to deploy contract: {}. Is scripts/anvil_tests_node.sh running?.", + e + ) + })?; + + let contract = Contract::new(address, abi, client.clone()); + Ok(Self { client, contract }) } /// The deposit contract's address in `0x00ab...` format. @@ -178,9 +176,8 @@ impl DepositContract { /// Performs a non-blocking deposit. pub async fn deposit_async(&self, deposit_data: DepositData) -> Result<(), String> { let from = self - .web3 - .eth() - .accounts() + .client + .get_accounts() .await .map_err(|e| format!("Failed to get accounts: {:?}", e)) .and_then(|accounts| { @@ -189,32 +186,33 @@ impl DepositContract { .cloned() .ok_or_else(|| "Insufficient accounts for deposit".to_string()) })?; - let tx_request = TransactionRequest { - from, - to: Some(self.contract.address()), - gas: Some(U256::from(DEPOSIT_GAS)), - gas_price: None, - max_fee_per_gas: None, - max_priority_fee_per_gas: None, - value: Some(from_gwei(deposit_data.amount)), - // Note: the reason we use this `TransactionRequest` instead of just using the - // function in `self.contract` is so that the `eth1_tx_data` function gets used - // during testing. - // - // It's important that `eth1_tx_data` stays correct and does not suffer from - // code-rot. - data: encode_eth1_tx_data(&deposit_data).map(Into::into).ok(), - nonce: None, - condition: None, - transaction_type: None, - access_list: None, - }; + // Note: the reason we use this `TransactionRequest` instead of just using the + // function in `self.contract` is so that the `eth1_tx_data` function gets used + // during testing. + // + // It's important that `eth1_tx_data` stays correct and does not suffer from + // code-rot. + let tx_request = TransactionRequest::new() + .from(from) + .to(self.contract.address()) + .gas(DEPOSIT_GAS) + .value(from_gwei(deposit_data.amount)) + .data(Bytes::from(encode_eth1_tx_data(&deposit_data).map_err( + |e| format!("Failed to encode deposit data: {:?}", e), + )?)); - self.web3 - .eth() - .send_transaction(tx_request) + let pending_tx = self + .client + .send_transaction(tx_request, None) .await .map_err(|e| format!("Failed to call deposit fn: {:?}", e))?; + + pending_tx + .interval(Duration::from_millis(10)) + .confirmations(0) + .await + .map_err(|e| format!("Transaction failed to resolve: {:?}", e))? + .ok_or_else(|| "Transaction dropped from mempool".to_string())?; Ok(()) } @@ -245,17 +243,13 @@ fn from_gwei(gwei: u64) -> U256 { /// Deploys the deposit contract to the given web3 instance using the account with index /// `DEPLOYER_ACCOUNTS_INDEX`. async fn deploy_deposit_contract( - web3: Web3, + client: Provider, confirmations: usize, bytecode: Vec, - abi: Vec, password_opt: Option, ) -> Result { - let bytecode = String::from_utf8(bytecode).expect("bytecode must be valid utf8"); - - let from_address = web3 - .eth() - .accounts() + let from_address = client + .get_accounts() .await .map_err(|e| format!("Failed to get accounts: {:?}", e)) .and_then(|accounts| { @@ -266,30 +260,42 @@ async fn deploy_deposit_contract( })?; let deploy_address = if let Some(password) = password_opt { - let result = web3 - .personal() - .unlock_account(from_address, &password, None) + let result = client + .request( + "personal_unlockAccount", + vec![from_address.to_string(), password], + ) .await; + match result { - Ok(true) => return Ok(from_address), + Ok(true) => from_address, Ok(false) => return Err("Eth1 node refused to unlock account".to_string()), Err(e) => return Err(format!("Eth1 unlock request failed: {:?}", e)), - }; + } } else { from_address }; - let pending_contract = Contract::deploy(web3.eth(), &abi) - .map_err(|e| format!("Unable to build contract deployer: {:?}", e))? - .confirmations(confirmations) - .options(Options { - gas: Some(U256::from(CONTRACT_DEPLOY_GAS)), - ..Options::default() - }) - .execute(bytecode, (), deploy_address); + let mut bytecode = String::from_utf8(bytecode).unwrap(); + bytecode.retain(|c| c.is_ascii_hexdigit()); + let bytecode = hex::decode(&bytecode[1..]).unwrap(); - pending_contract + let deploy_tx: TypedTransaction = TransactionRequest::new() + .from(deploy_address) + .data(Bytes::from(bytecode)) + .gas(CONTRACT_DEPLOY_GAS) + .into(); + + let pending_tx = client + .send_transaction(deploy_tx, None) .await - .map(|contract| contract.address()) - .map_err(|e| format!("Unable to resolve pending contract: {:?}", e)) + .map_err(|e| format!("Failed to send tx: {:?}", e))?; + + let tx = pending_tx + .interval(Duration::from_millis(500)) + .confirmations(confirmations) + .await + .map_err(|e| format!("Failed to fetch tx receipt: {:?}", e))?; + tx.and_then(|tx| tx.contract_address) + .ok_or_else(|| "Deposit contract not deployed successfully".to_string()) } diff --git a/testing/simulator/src/cli.rs b/testing/simulator/src/cli.rs index f1196502f..5dc2d5ec8 100644 --- a/testing/simulator/src/cli.rs +++ b/testing/simulator/src/cli.rs @@ -10,7 +10,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .about( "Lighthouse Beacon Chain Simulator creates `n` beacon node and validator clients, \ each with `v` validators. A deposit contract is deployed at the start of the \ - simulation using a local `ganache` instance (you must have `ganache` \ + simulation using a local `anvil` instance (you must have `anvil` \ installed and avaliable on your path). All beacon nodes independently listen \ for genesis from the deposit contract, then start operating. \ \ @@ -24,6 +24,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Number of beacon nodes")) + .arg(Arg::with_name("proposer-nodes") + .short("n") + .long("nodes") + .takes_value(true) + .default_value("2") + .help("Number of proposer-only beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") @@ -57,6 +63,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("4") .help("Number of beacon nodes")) + .arg(Arg::with_name("proposer-nodes") + .short("n") + .long("nodes") + .takes_value(true) + .default_value("2") + .help("Number of proposer-only beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") diff --git a/testing/simulator/src/eth1_sim.rs b/testing/simulator/src/eth1_sim.rs index 43e8a5cf4..3e764d27d 100644 --- a/testing/simulator/src/eth1_sim.rs +++ b/testing/simulator/src/eth1_sim.rs @@ -2,7 +2,7 @@ use crate::local_network::{EXECUTION_PORT, TERMINAL_BLOCK, TERMINAL_DIFFICULTY}; use crate::{checks, LocalNetwork, E}; use clap::ArgMatches; use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; -use eth1_test_rig::GanacheEth1Instance; +use eth1_test_rig::AnvilEth1Instance; use execution_layer::http::deposit_methods::Eth1Id; use futures::prelude::*; @@ -27,6 +27,8 @@ const SUGGESTED_FEE_RECIPIENT: [u8; 20] = pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let node_count = value_t!(matches, "nodes", usize).expect("missing nodes default"); + let proposer_nodes = value_t!(matches, "proposer-nodes", usize).unwrap_or(0); + println!("PROPOSER-NODES: {}", proposer_nodes); let validators_per_node = value_t!(matches, "validators_per_node", usize) .expect("missing validators_per_node default"); let speed_up_factor = @@ -35,7 +37,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let post_merge_sim = matches.is_present("post-merge"); println!("Beacon Chain Simulator:"); - println!(" nodes:{}", node_count); + println!(" nodes:{}, proposer_nodes: {}", node_count, proposer_nodes); + println!(" validators_per_node:{}", validators_per_node); println!(" post merge simulation:{}", post_merge_sim); println!(" continue_after_checks:{}", continue_after_checks); @@ -69,6 +72,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, })? .multi_threaded_tokio_runtime()? .build()?; @@ -107,12 +111,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * Deploy the deposit contract, spawn tasks to keep creating new blocks and deposit * validators. */ - let ganache_eth1_instance = GanacheEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; - let deposit_contract = ganache_eth1_instance.deposit_contract; - let chain_id = ganache_eth1_instance.ganache.chain_id(); - let ganache = ganache_eth1_instance.ganache; - let eth1_endpoint = SensitiveUrl::parse(ganache.endpoint().as_str()) - .expect("Unable to parse ganache endpoint."); + let anvil_eth1_instance = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await?; + let deposit_contract = anvil_eth1_instance.deposit_contract; + let chain_id = anvil_eth1_instance.anvil.chain_id(); + let anvil = anvil_eth1_instance.anvil; + let eth1_endpoint = SensitiveUrl::parse(anvil.endpoint().as_str()) + .expect("Unable to parse anvil endpoint."); let deposit_contract_address = deposit_contract.address(); // Start a timer that produces eth1 blocks on an interval. @@ -120,7 +124,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { let mut interval = tokio::time::interval(eth1_block_time); loop { interval.tick().await; - let _ = ganache.evm_mine().await; + let _ = anvil.evm_mine().await; } }); @@ -147,7 +151,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { beacon_config.sync_eth1_chain = true; beacon_config.eth1.auto_update_interval_millis = eth1_block_time.as_millis() as u64; beacon_config.eth1.chain_id = Eth1Id::from(chain_id); - beacon_config.network.target_peers = node_count - 1; + beacon_config.network.target_peers = node_count + proposer_nodes - 1; beacon_config.network.enr_address = (Some(Ipv4Addr::LOCALHOST), None); @@ -173,7 +177,17 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { * One by one, add beacon nodes to the network. */ for _ in 0..node_count - 1 { - network.add_beacon_node(beacon_config.clone()).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + } + + /* + * One by one, add proposer nodes to the network. + */ + for _ in 0..proposer_nodes - 1 { + println!("Adding a proposer node"); + network.add_beacon_node(beacon_config.clone(), true).await?; } /* @@ -310,7 +324,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ println!( "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), + network.beacon_node_count() + network.proposer_node_count(), network.validator_client_count() ); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 3e481df88..e35870d12 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -25,6 +25,7 @@ pub const TERMINAL_BLOCK: u64 = 64; pub struct Inner { pub context: RuntimeContext, pub beacon_nodes: RwLock>>, + pub proposer_nodes: RwLock>>, pub validator_clients: RwLock>>, pub execution_nodes: RwLock>>, } @@ -97,6 +98,7 @@ impl LocalNetwork { inner: Arc::new(Inner { context, beacon_nodes: RwLock::new(vec![beacon_node]), + proposer_nodes: RwLock::new(vec![]), execution_nodes: RwLock::new(execution_node), validator_clients: RwLock::new(vec![]), }), @@ -111,6 +113,14 @@ impl LocalNetwork { self.beacon_nodes.read().len() } + /// Returns the number of proposer nodes in the network. + /// + /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected + /// (e.g., another Lighthouse process on the same machine.) + pub fn proposer_node_count(&self) -> usize { + self.proposer_nodes.read().len() + } + /// Returns the number of validator clients in the network. /// /// Note: does not count nodes that are external to this `LocalNetwork` that may have connected @@ -120,7 +130,11 @@ impl LocalNetwork { } /// Adds a beacon node to the network, connecting to the 0'th beacon node via ENR. - pub async fn add_beacon_node(&self, mut beacon_config: ClientConfig) -> Result<(), String> { + pub async fn add_beacon_node( + &self, + mut beacon_config: ClientConfig, + is_proposer: bool, + ) -> Result<(), String> { let self_1 = self.clone(); let count = self.beacon_node_count() as u16; println!("Adding beacon node.."); @@ -135,6 +149,7 @@ impl LocalNetwork { .enr() .expect("bootnode must have a network"), ); + let count = (self.beacon_node_count() + self.proposer_node_count()) as u16; beacon_config.network.set_ipv4_listening_address( std::net::Ipv4Addr::UNSPECIFIED, BOOTNODE_PORT + count, @@ -143,6 +158,7 @@ impl LocalNetwork { beacon_config.network.enr_udp4_port = Some(BOOTNODE_PORT + count); beacon_config.network.enr_tcp4_port = Some(BOOTNODE_PORT + count); beacon_config.network.discv5_config.table_filter = |_| true; + beacon_config.network.proposer_only = is_proposer; } if let Some(el_config) = &mut beacon_config.execution_layer { let config = MockExecutionConfig { @@ -173,7 +189,11 @@ impl LocalNetwork { beacon_config, ) .await?; - self_1.beacon_nodes.write().push(beacon_node); + if is_proposer { + self_1.proposer_nodes.write().push(beacon_node); + } else { + self_1.beacon_nodes.write().push(beacon_node); + } Ok(()) } @@ -200,6 +220,16 @@ impl LocalNetwork { .http_api_listen_addr() .expect("Must have http started") }; + // If there is a proposer node for the same index, we will use that for proposing + let proposer_socket_addr = { + let read_lock = self.proposer_nodes.read(); + read_lock.get(beacon_node).map(|proposer_node| { + proposer_node + .client + .http_api_listen_addr() + .expect("Must have http started") + }) + }; let beacon_node = SensitiveUrl::parse( format!("http://{}:{}", socket_addr.ip(), socket_addr.port()).as_str(), @@ -210,6 +240,21 @@ impl LocalNetwork { } else { vec![beacon_node] }; + + // If we have a proposer node established, use it. + if let Some(proposer_socket_addr) = proposer_socket_addr { + let url = SensitiveUrl::parse( + format!( + "http://{}:{}", + proposer_socket_addr.ip(), + proposer_socket_addr.port() + ) + .as_str(), + ) + .unwrap(); + validator_config.proposer_nodes = vec![url]; + } + let validator_client = LocalValidatorClient::production_with_insecure_keypairs( context, validator_config, @@ -223,9 +268,11 @@ impl LocalNetwork { /// For all beacon nodes in `Self`, return a HTTP client to access each nodes HTTP API. pub fn remote_nodes(&self) -> Result, String> { let beacon_nodes = self.beacon_nodes.read(); + let proposer_nodes = self.proposer_nodes.read(); beacon_nodes .iter() + .chain(proposer_nodes.iter()) .map(|beacon_node| beacon_node.remote_node()) .collect() } diff --git a/testing/simulator/src/main.rs b/testing/simulator/src/main.rs index 922149537..a19777c5a 100644 --- a/testing/simulator/src/main.rs +++ b/testing/simulator/src/main.rs @@ -1,6 +1,6 @@ //! This crate provides a simluation that creates `n` beacon node and validator clients, each with //! `v` validators. A deposit contract is deployed at the start of the simulation using a local -//! `ganache` instance (you must have `ganache` installed and avaliable on your path). All +//! `anvil` instance (you must have `anvil` installed and avaliable on your path). All //! beacon nodes independently listen for genesis from the deposit contract, then start operating. //! //! As the simulation runs, there are checks made to ensure that all components are running diff --git a/testing/simulator/src/no_eth1_sim.rs b/testing/simulator/src/no_eth1_sim.rs index f1f6dc442..fc18b1cd4 100644 --- a/testing/simulator/src/no_eth1_sim.rs +++ b/testing/simulator/src/no_eth1_sim.rs @@ -54,6 +54,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, })? .multi_threaded_tokio_runtime()? .build()?; @@ -100,7 +101,9 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ for _ in 0..node_count - 1 { - network.add_beacon_node(beacon_config.clone()).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; } /* @@ -151,7 +154,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> { */ println!( "Simulation complete. Finished with {} beacon nodes and {} validator clients", - network.beacon_node_count(), + network.beacon_node_count() + network.proposer_node_count(), network.validator_client_count() ); diff --git a/testing/simulator/src/sync_sim.rs b/testing/simulator/src/sync_sim.rs index c437457c2..78f7e1ee9 100644 --- a/testing/simulator/src/sync_sim.rs +++ b/testing/simulator/src/sync_sim.rs @@ -58,6 +58,7 @@ fn syncing_sim( max_log_number: 0, compression: false, is_restricted: true, + sse_logging: false, })? .multi_threaded_tokio_runtime()? .build()?; @@ -228,7 +229,7 @@ pub async fn verify_one_node_sync( ) .await; // Add a beacon node - network.add_beacon_node(beacon_config).await?; + network.add_beacon_node(beacon_config, false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); @@ -265,8 +266,10 @@ pub async fn verify_two_nodes_sync( ) .await; // Add beacon nodes - network.add_beacon_node(beacon_config.clone()).await?; - network.add_beacon_node(beacon_config).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + network.add_beacon_node(beacon_config, false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); @@ -305,8 +308,10 @@ pub async fn verify_in_between_sync( ) .await; // Add two beacon nodes - network.add_beacon_node(beacon_config.clone()).await?; - network.add_beacon_node(beacon_config).await?; + network + .add_beacon_node(beacon_config.clone(), false) + .await?; + network.add_beacon_node(beacon_config, false).await?; // Delay before adding additional syncing nodes. epoch_delay( Epoch::new(sync_timeout - 5), @@ -315,7 +320,7 @@ pub async fn verify_in_between_sync( ) .await; // Add a beacon node - network.add_beacon_node(config1.clone()).await?; + network.add_beacon_node(config1.clone(), false).await?; // Check every `epoch_duration` if nodes are synced // limited to at most `sync_timeout` epochs let mut interval = tokio::time::interval(epoch_duration); diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 6da9f2f4a..a25b3c31c 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" [dependencies] state_processing = { path = "../../consensus/state_processing" } types = { path = "../../consensus/types" } -eth2_ssz = "0.4.1" +ethereum_ssz = "0.5.0" beacon_chain = { path = "../../beacon_node/beacon_chain" } lazy_static = "1.4.0" tokio = { version = "1.14.0", features = ["rt-multi-thread"] } diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index d581eba96..7e7fd23e0 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -2,7 +2,7 @@ use super::*; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use state_processing::{ per_block_processing, per_block_processing::errors::ExitInvalid, BlockProcessingError, - BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, + BlockSignatureStrategy, ConsensusContext, StateProcessingStrategy, VerifyBlockRoot, }; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, SignedBeaconBlock}; @@ -69,6 +69,7 @@ impl ExitTest { state, block, BlockSignatureStrategy::VerifyIndividual, + StateProcessingStrategy::Accurate, VerifyBlockRoot::True, &mut ctxt, &E::default_spec(), diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index ada023f8c..494ebcb3d 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -13,7 +13,7 @@ tokio = { version = "1.14.0", features = ["time", "rt-multi-thread", "macros"] } logging = { path = "../common/logging" } [dependencies] -tree_hash = "0.4.1" +tree_hash = "0.5.0" clap = "2.33.3" slashing_protection = { path = "./slashing_protection" } slot_clock = { path = "../common/slot_clock" } @@ -25,6 +25,7 @@ bincode = "1.3.1" serde_json = "1.0.58" slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } tokio = { version = "1.14.0", features = ["time"] } +tokio-stream = { version = "0.1.3", features = ["sync"] } futures = "0.3.7" dirs = "3.0.1" directory = { path = "../common/directory" } @@ -46,7 +47,7 @@ lighthouse_version = { path = "../common/lighthouse_version" } warp_utils = { path = "../common/warp_utils" } warp = "0.3.2" hyper = "0.14.4" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" libsecp256k1 = "0.7.0" ring = "0.16.19" rand = { version = "0.8.5", features = ["small_rng"] } @@ -61,4 +62,5 @@ url = "2.2.2" malloc_utils = { path = "../common/malloc_utils" } sysinfo = "0.26.5" system_health = { path = "../common/system_health" } +logging = { path = "../common/logging" } diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 631e54dc4..278dc22d0 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -18,7 +18,7 @@ r2d2_sqlite = "0.21.0" serde = "1.0.116" serde_derive = "1.0.116" serde_json = "1.0.58" -eth2_serde_utils = "0.1.1" +ethereum_serde_utils = "0.5.0" filesystem = { path = "../../common/filesystem" } arbitrary = { version = "1.0", features = ["derive"], optional = true } diff --git a/validator_client/slashing_protection/src/interchange.rs b/validator_client/slashing_protection/src/interchange.rs index 3793766b6..99d37c38b 100644 --- a/validator_client/slashing_protection/src/interchange.rs +++ b/validator_client/slashing_protection/src/interchange.rs @@ -9,7 +9,7 @@ use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct InterchangeMetadata { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub interchange_format_version: u64, pub genesis_validators_root: Hash256, } @@ -27,7 +27,7 @@ pub struct InterchangeData { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedBlock { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub slot: Slot, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, @@ -37,9 +37,9 @@ pub struct SignedBlock { #[serde(deny_unknown_fields)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct SignedAttestation { - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub source_epoch: Epoch, - #[serde(with = "eth2_serde_utils::quoted_u64::require_quotes")] + #[serde(with = "serde_utils::quoted_u64::require_quotes")] pub target_epoch: Epoch, #[serde(skip_serializing_if = "Option::is_none")] pub signing_root: Option, diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 3e667429b..531cec08a 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -28,7 +28,7 @@ const UPDATE_REQUIRED_LOG_HINT: &str = "this VC or the remote BN may need updati /// too early, we risk switching nodes between the time of publishing an attestation and publishing /// an aggregate; this may result in a missed aggregation. If we set this time too late, we risk not /// having the correct nodes up and running prior to the start of the slot. -const SLOT_LOOKAHEAD: Duration = Duration::from_secs(1); +const SLOT_LOOKAHEAD: Duration = Duration::from_secs(2); /// Indicates a measurement of latency between the VC and a BN. pub struct LatencyMeasurement { @@ -52,7 +52,7 @@ pub fn start_fallback_updater_service( let future = async move { loop { - beacon_nodes.update_unready_candidates().await; + beacon_nodes.update_all_candidates().await; let sleep_time = beacon_nodes .slot_clock @@ -182,7 +182,10 @@ impl CandidateBeaconNode { spec: &ChainSpec, log: &Logger, ) -> Result<(), CandidateError> { - let new_status = if let Err(e) = self.is_online(log).await { + let previous_status = self.status(RequireSynced::Yes).await; + let was_offline = matches!(previous_status, Err(CandidateError::Offline)); + + let new_status = if let Err(e) = self.is_online(was_offline, log).await { Err(e) } else if let Err(e) = self.is_compatible(spec, log).await { Err(e) @@ -202,7 +205,7 @@ impl CandidateBeaconNode { } /// Checks if the node is reachable. - async fn is_online(&self, log: &Logger) -> Result<(), CandidateError> { + async fn is_online(&self, was_offline: bool, log: &Logger) -> Result<(), CandidateError> { let result = self .beacon_node .get_node_version() @@ -211,12 +214,14 @@ impl CandidateBeaconNode { match result { Ok(version) => { - info!( - log, - "Connected to beacon node"; - "version" => version, - "endpoint" => %self.beacon_node, - ); + if was_offline { + info!( + log, + "Connected to beacon node"; + "version" => version, + "endpoint" => %self.beacon_node, + ); + } Ok(()) } Err(e) => { @@ -385,33 +390,21 @@ impl BeaconNodeFallback { n } - /// Loop through any `self.candidates` that we don't think are online, compatible or synced and - /// poll them to see if their status has changed. + /// Loop through ALL candidates in `self.candidates` and update their sync status. /// - /// We do not poll nodes that are synced to avoid sending additional requests when everything is - /// going smoothly. - pub async fn update_unready_candidates(&self) { - let mut futures = Vec::new(); - for candidate in &self.candidates { - // There is a potential race condition between having the read lock and the write - // lock. The worst case of this race is running `try_become_ready` twice, which is - // acceptable. - // - // Note: `RequireSynced` is always set to false here. This forces us to recheck the sync - // status of nodes that were previously not-synced. - if candidate.status(RequireSynced::Yes).await.is_err() { - // There exists a race-condition that could result in `refresh_status` being called - // when the status does not require refreshing anymore. This is deemed an - // acceptable inefficiency. - futures.push(candidate.refresh_status( - self.slot_clock.as_ref(), - &self.spec, - &self.log, - )); - } - } + /// It is possible for a node to return an unsynced status while continuing to serve + /// low quality responses. To route around this it's best to poll all connected beacon nodes. + /// A previous implementation of this function polled only the unavailable BNs. + pub async fn update_all_candidates(&self) { + let futures = self + .candidates + .iter() + .map(|candidate| { + candidate.refresh_status(self.slot_clock.as_ref(), &self.spec, &self.log) + }) + .collect::>(); - //run all updates concurrently and ignore results + // run all updates concurrently and ignore errors let _ = future::join_all(futures).await; } diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 5fa32d3f4..f7b6d0935 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -12,6 +12,8 @@ use eth2::types::{BlockContents, SignedBlockContents}; use eth2::BeaconNodeHttpClient; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; +use std::fmt::Debug; +use std::future::Future; use std::ops::Deref; use std::sync::Arc; use std::time::Duration; @@ -48,6 +50,7 @@ pub struct BlockServiceBuilder { validator_store: Option>>, slot_clock: Option>, beacon_nodes: Option>>, + proposer_nodes: Option>>, context: Option>, graffiti: Option, graffiti_file: Option, @@ -60,6 +63,7 @@ impl BlockServiceBuilder { validator_store: None, slot_clock: None, beacon_nodes: None, + proposer_nodes: None, context: None, graffiti: None, graffiti_file: None, @@ -82,6 +86,11 @@ impl BlockServiceBuilder { self } + pub fn proposer_nodes(mut self, proposer_nodes: Arc>) -> Self { + self.proposer_nodes = Some(proposer_nodes); + self + } + pub fn runtime_context(mut self, context: RuntimeContext) -> Self { self.context = Some(context); self @@ -117,6 +126,7 @@ impl BlockServiceBuilder { context: self .context .ok_or("Cannot build BlockService without runtime_context")?, + proposer_nodes: self.proposer_nodes, graffiti: self.graffiti, graffiti_file: self.graffiti_file, block_delay: self.block_delay, @@ -125,11 +135,81 @@ impl BlockServiceBuilder { } } +// Combines a set of non-block-proposing `beacon_nodes` and only-block-proposing +// `proposer_nodes`. +pub struct ProposerFallback { + beacon_nodes: Arc>, + proposer_nodes: Option>>, +} + +impl ProposerFallback { + // Try `func` on `self.proposer_nodes` first. If that doesn't work, try `self.beacon_nodes`. + pub async fn first_success_try_proposers_first<'a, F, O, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result> + where + F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + R: Future>, + Err: Debug, + { + // If there are proposer nodes, try calling `func` on them and return early if they are successful. + if let Some(proposer_nodes) = &self.proposer_nodes { + if let Ok(result) = proposer_nodes + .first_success(require_synced, offline_on_failure, func.clone()) + .await + { + return Ok(result); + } + } + + // If the proposer nodes failed, try on the non-proposer nodes. + self.beacon_nodes + .first_success(require_synced, offline_on_failure, func) + .await + } + + // Try `func` on `self.beacon_nodes` first. If that doesn't work, try `self.proposer_nodes`. + pub async fn first_success_try_proposers_last<'a, F, O, Err, R>( + &'a self, + require_synced: RequireSynced, + offline_on_failure: OfflineOnFailure, + func: F, + ) -> Result> + where + F: Fn(&'a BeaconNodeHttpClient) -> R + Clone, + R: Future>, + Err: Debug, + { + // Try running `func` on the non-proposer beacon nodes. + let beacon_nodes_result = self + .beacon_nodes + .first_success(require_synced, offline_on_failure, func.clone()) + .await; + + match (beacon_nodes_result, &self.proposer_nodes) { + // The non-proposer node call succeed, return the result. + (Ok(success), _) => Ok(success), + // The non-proposer node call failed, but we don't have any proposer nodes. Return an error. + (Err(e), None) => Err(e), + // The non-proposer node call failed, try the same call on the proposer nodes. + (Err(_), Some(proposer_nodes)) => { + proposer_nodes + .first_success(require_synced, offline_on_failure, func) + .await + } + } + } +} + /// Helper to minimise `Arc` usage. pub struct Inner { validator_store: Arc>, slot_clock: Arc, beacon_nodes: Arc>, + proposer_nodes: Option>>, context: RuntimeContext, graffiti: Option, graffiti_file: Option, @@ -337,16 +417,23 @@ impl BlockService { let self_ref = &self; let proposer_index = self.validator_store.validator_index(&validator_pubkey); let validator_pubkey_ref = &validator_pubkey; + let proposer_fallback = ProposerFallback { + beacon_nodes: self.beacon_nodes.clone(), + proposer_nodes: self.proposer_nodes.clone(), + }; info!( log, "Requesting unsigned block"; "slot" => slot.as_u64(), ); + // Request block from first responsive beacon node. - let block_contents = self - .beacon_nodes - .first_success( + // + // Try the proposer nodes last, since it's likely that they don't have a + // great view of attestations on the network. + let block_contents = proposer_fallback + .first_success_try_proposers_last( RequireSynced::No, OfflineOnFailure::Yes, move |beacon_node| { @@ -397,8 +484,12 @@ impl BlockService { let signed_block_contents = SignedBlockContents::from((signed_block, maybe_signed_blobs)); // Publish block with first available beacon node. - self.beacon_nodes - .first_success( + // + // Try the proposer nodes first, since we've likely gone to efforts to + // protect them from DoS attacks and they're most likely to successfully + // publish a block. + proposer_fallback + .first_success_try_proposers_first( RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async { diff --git a/validator_client/src/check_synced.rs b/validator_client/src/check_synced.rs index c31457e28..fb88d33da 100644 --- a/validator_client/src/check_synced.rs +++ b/validator_client/src/check_synced.rs @@ -36,7 +36,10 @@ pub async fn check_synced( } }; - let is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); + // Default EL status to "online" for backwards-compatibility with BNs that don't include it. + let el_offline = resp.data.el_offline.unwrap_or(false); + let bn_is_synced = !resp.data.is_syncing || (resp.data.sync_distance.as_u64() < SYNC_TOLERANCE); + let is_synced = bn_is_synced && !el_offline; if let Some(log) = log_opt { if !is_synced { @@ -52,6 +55,7 @@ pub async fn check_synced( "sync_distance" => resp.data.sync_distance.as_u64(), "head_slot" => resp.data.head_slot.as_u64(), "endpoint" => %beacon_node, + "el_offline" => el_offline, ); } diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index fd96aa1f5..6e199cb17 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -26,6 +26,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { ) .takes_value(true), ) + .arg( + Arg::with_name("proposer-nodes") + .long("proposer-nodes") + .value_name("NETWORK_ADDRESSES") + .help("Comma-separated addresses to one or more beacon node HTTP APIs. \ + These specify nodes that are used to send beacon block proposals. A failure will revert back to the standard beacon nodes specified in --beacon-nodes." + ) + .takes_value(true), + ) .arg( Arg::with_name("disable-run-on-all") .long("disable-run-on-all") @@ -100,10 +109,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .arg( Arg::with_name("allow-unsynced") .long("allow-unsynced") - .help( - "If present, the validator client will still poll for duties if the beacon - node is not synced.", - ), + .help("DEPRECATED: this flag does nothing"), ) .arg( Arg::with_name("use-long-timeouts") @@ -118,7 +124,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("CERTIFICATE-FILES") .takes_value(true) .help("Comma-separated paths to custom TLS certificates to use when connecting \ - to a beacon node. These certificates must be in PEM format and are used \ + to a beacon node (and/or proposer node). These certificates must be in PEM format and are used \ in addition to the OS trust store. Commas must only be used as a \ delimiter, and must not be part of the certificate path.") ) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 724d6c74f..fa297dcfe 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -29,6 +29,8 @@ pub struct Config { /// /// Should be similar to `["http://localhost:8080"]` pub beacon_nodes: Vec, + /// An optional beacon node used for block proposals only. + pub proposer_nodes: Vec, /// If true, the validator client will still poll for duties and produce blocks even if the /// beacon node is not synced at startup. pub allow_unsynced_beacon_node: bool, @@ -95,6 +97,7 @@ impl Default for Config { validator_dir, secrets_dir, beacon_nodes, + proposer_nodes: Vec::new(), allow_unsynced_beacon_node: false, disable_auto_discover: false, init_slashing_protection: false, @@ -186,6 +189,14 @@ impl Config { .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?]; } + if let Some(proposer_nodes) = parse_optional::(cli_args, "proposer_nodes")? { + config.proposer_nodes = proposer_nodes + .split(',') + .map(SensitiveUrl::parse) + .collect::>() + .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; + } + if cli_args.is_present("delete-lockfiles") { warn!( log, @@ -194,7 +205,13 @@ impl Config { ); } - config.allow_unsynced_beacon_node = cli_args.is_present("allow-unsynced"); + if cli_args.is_present("allow-unsynced") { + warn!( + log, + "The --allow-unsynced flag is deprecated"; + "msg" => "it no longer has any effect", + ); + } config.disable_run_on_all = cli_args.is_present("disable-run-on-all"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index c335c67ab..83cdb936a 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -16,12 +16,15 @@ use crate::{ validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}, }; use environment::RuntimeContext; -use eth2::types::{AttesterData, BeaconCommitteeSubscription, ProposerData, StateId, ValidatorId}; +use eth2::types::{ + AttesterData, BeaconCommitteeSubscription, DutiesResponse, ProposerData, StateId, ValidatorId, +}; use futures::{stream, StreamExt}; use parking_lot::RwLock; use safe_arith::ArithError; use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; +use std::cmp::min; use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; use std::sync::Arc; use std::time::Duration; @@ -54,6 +57,11 @@ const SELECTION_PROOF_SCHEDULE_DENOM: u32 = 2; /// flag in the cli to enable collection of per validator metrics. const VALIDATOR_METRICS_MIN_COUNT: usize = 64; +/// The number of validators to request duty information for in the initial request. +/// The initial request is used to determine if further requests are required, so that it +/// reduces the amount of data that needs to be transferred. +const INITIAL_DUTIES_QUERY_SIZE: usize = 1; + #[derive(Debug)] pub enum Error { UnableToReadSlotClock, @@ -139,11 +147,6 @@ pub struct DutiesService { pub slot_clock: T, /// Provides HTTP access to remote beacon nodes. pub beacon_nodes: Arc>, - /// Controls whether or not this function will refuse to interact with non-synced beacon nodes. - /// - /// This functionality is a little redundant since most BNs will likely reject duties when they - /// aren't synced, but we keep it around for an emergency. - pub require_synced: RequireSynced, pub enable_high_validator_count_metrics: bool, pub context: RuntimeContext, pub spec: ChainSpec, @@ -413,7 +416,7 @@ async fn poll_validator_indices( let download_result = duties_service .beacon_nodes .first_success( - duties_service.require_synced, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { let _timer = metrics::start_timer_vec( @@ -531,7 +534,6 @@ async fn poll_beacon_attesters( current_epoch, &local_indices, &local_pubkeys, - current_slot, ) .await { @@ -544,6 +546,8 @@ async fn poll_beacon_attesters( ) } + update_per_validator_duty_metrics::(duties_service, current_epoch, current_slot); + drop(current_epoch_timer); let next_epoch_timer = metrics::start_timer_vec( &metrics::DUTIES_SERVICE_TIMES, @@ -551,14 +555,9 @@ async fn poll_beacon_attesters( ); // Download the duties and update the duties for the next epoch. - if let Err(e) = poll_beacon_attesters_for_epoch( - duties_service, - next_epoch, - &local_indices, - &local_pubkeys, - current_slot, - ) - .await + if let Err(e) = + poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys) + .await { error!( log, @@ -569,6 +568,8 @@ async fn poll_beacon_attesters( ) } + update_per_validator_duty_metrics::(duties_service, next_epoch, current_slot); + drop(next_epoch_timer); let subscriptions_timer = metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]); @@ -612,7 +613,7 @@ async fn poll_beacon_attesters( if let Err(e) = duties_service .beacon_nodes .run( - duties_service.require_synced, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { let _timer = metrics::start_timer_vec( @@ -655,7 +656,6 @@ async fn poll_beacon_attesters_for_epoch( epoch: Epoch, local_indices: &[u64], local_pubkeys: &HashSet, - current_slot: Slot, ) -> Result<(), Error> { let log = duties_service.context.log(); @@ -674,84 +674,69 @@ async fn poll_beacon_attesters_for_epoch( &[metrics::UPDATE_ATTESTERS_FETCH], ); - let response = duties_service - .beacon_nodes - .first_success( - duties_service.require_synced, - OfflineOnFailure::Yes, - |beacon_node| async move { - let _timer = metrics::start_timer_vec( - &metrics::DUTIES_SERVICE_TIMES, - &[metrics::ATTESTER_DUTIES_HTTP_POST], - ); - beacon_node - .post_validator_duties_attester(epoch, local_indices) - .await - }, - ) - .await - .map_err(|e| Error::FailedToDownloadAttesters(e.to_string()))?; + // Request duties for all uninitialized validators. If there isn't any, we will just request for + // `INITIAL_DUTIES_QUERY_SIZE` validators. We use the `dependent_root` in the response to + // determine whether validator duties need to be updated. This is to ensure that we don't + // request for extra data unless necessary in order to save on network bandwidth. + let uninitialized_validators = + get_uninitialized_validators(duties_service, &epoch, local_pubkeys); + let indices_to_request = if !uninitialized_validators.is_empty() { + uninitialized_validators.as_slice() + } else { + &local_indices[0..min(INITIAL_DUTIES_QUERY_SIZE, local_indices.len())] + }; + + let response = + post_validator_duties_attester(duties_service, epoch, indices_to_request).await?; + let dependent_root = response.dependent_root; + + // Find any validators which have conflicting (epoch, dependent_root) values or missing duties for the epoch. + let validators_to_update: Vec<_> = { + // Avoid holding the read-lock for any longer than required. + let attesters = duties_service.attesters.read(); + local_pubkeys + .iter() + .filter(|pubkey| { + attesters.get(pubkey).map_or(true, |duties| { + duties + .get(&epoch) + .map_or(true, |(prior, _)| *prior != dependent_root) + }) + }) + .collect::>() + }; + + if validators_to_update.is_empty() { + // No validators have conflicting (epoch, dependent_root) values or missing duties for the epoch. + return Ok(()); + } + + // Filter out validators which have already been requested. + let initial_duties = &response.data; + let indices_to_request = validators_to_update + .iter() + .filter(|&&&pubkey| !initial_duties.iter().any(|duty| duty.pubkey == pubkey)) + .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) + .collect::>(); + + let new_duties = if !indices_to_request.is_empty() { + post_validator_duties_attester(duties_service, epoch, indices_to_request.as_slice()) + .await? + .data + .into_iter() + .chain(response.data) + .collect::>() + } else { + response.data + }; drop(fetch_timer); + let _store_timer = metrics::start_timer_vec( &metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_ATTESTERS_STORE], ); - let dependent_root = response.dependent_root; - - // Filter any duties that are not relevant or already known. - let new_duties = { - // Avoid holding the read-lock for any longer than required. - let attesters = duties_service.attesters.read(); - response - .data - .into_iter() - .filter(|duty| { - if duties_service.per_validator_metrics() { - let validator_index = duty.validator_index; - let duty_slot = duty.slot; - if let Some(existing_slot_gauge) = - get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) - { - let existing_slot = Slot::new(existing_slot_gauge.get() as u64); - let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); - - // First condition ensures that we switch to the next epoch duty slot - // once the current epoch duty slot passes. - // Second condition is to ensure that next epoch duties don't override - // current epoch duties. - if existing_slot < current_slot - || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch - && duty_slot > current_slot - && duty_slot != existing_slot) - { - existing_slot_gauge.set(duty_slot.as_u64() as i64); - } - } else { - set_int_gauge( - &ATTESTATION_DUTY, - &[&validator_index.to_string()], - duty_slot.as_u64() as i64, - ); - } - } - - local_pubkeys.contains(&duty.pubkey) && { - // Only update the duties if either is true: - // - // - There were no known duties for this epoch. - // - The dependent root has changed, signalling a re-org. - attesters.get(&duty.pubkey).map_or(true, |duties| { - duties - .get(&epoch) - .map_or(true, |(prior, _)| *prior != dependent_root) - }) - } - }) - .collect::>() - }; - debug!( log, "Downloaded attester duties"; @@ -799,6 +784,89 @@ async fn poll_beacon_attesters_for_epoch( Ok(()) } +/// Get a filtered list of local validators for which we don't already know their duties for that epoch +fn get_uninitialized_validators( + duties_service: &Arc>, + epoch: &Epoch, + local_pubkeys: &HashSet, +) -> Vec { + let attesters = duties_service.attesters.read(); + local_pubkeys + .iter() + .filter(|pubkey| { + attesters + .get(pubkey) + .map_or(true, |duties| !duties.contains_key(epoch)) + }) + .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) + .collect::>() +} + +fn update_per_validator_duty_metrics( + duties_service: &Arc>, + epoch: Epoch, + current_slot: Slot, +) { + if duties_service.per_validator_metrics() { + let attesters = duties_service.attesters.read(); + attesters.values().for_each(|attester_duties_by_epoch| { + if let Some((_, duty_and_proof)) = attester_duties_by_epoch.get(&epoch) { + let duty = &duty_and_proof.duty; + let validator_index = duty.validator_index; + let duty_slot = duty.slot; + if let Some(existing_slot_gauge) = + get_int_gauge(&ATTESTATION_DUTY, &[&validator_index.to_string()]) + { + let existing_slot = Slot::new(existing_slot_gauge.get() as u64); + let existing_epoch = existing_slot.epoch(E::slots_per_epoch()); + + // First condition ensures that we switch to the next epoch duty slot + // once the current epoch duty slot passes. + // Second condition is to ensure that next epoch duties don't override + // current epoch duties. + if existing_slot < current_slot + || (duty_slot.epoch(E::slots_per_epoch()) <= existing_epoch + && duty_slot > current_slot + && duty_slot != existing_slot) + { + existing_slot_gauge.set(duty_slot.as_u64() as i64); + } + } else { + set_int_gauge( + &ATTESTATION_DUTY, + &[&validator_index.to_string()], + duty_slot.as_u64() as i64, + ); + } + } + }); + } +} + +async fn post_validator_duties_attester( + duties_service: &Arc>, + epoch: Epoch, + validator_indices: &[u64], +) -> Result>, Error> { + duties_service + .beacon_nodes + .first_success( + RequireSynced::No, + OfflineOnFailure::Yes, + |beacon_node| async move { + let _timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::ATTESTER_DUTIES_HTTP_POST], + ); + beacon_node + .post_validator_duties_attester(epoch, validator_indices) + .await + }, + ) + .await + .map_err(|e| Error::FailedToDownloadAttesters(e.to_string())) +} + /// Compute the attestation selection proofs for the `duties` and add them to the `attesters` map. /// /// Duties are computed in batches each slot. If a re-org is detected then the process will @@ -990,7 +1058,7 @@ async fn poll_beacon_proposers( let download_result = duties_service .beacon_nodes .first_success( - duties_service.require_synced, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { let _timer = metrics::start_timer_vec( diff --git a/validator_client/src/duties_service/sync.rs b/validator_client/src/duties_service/sync.rs index b9d4d7030..7a852091a 100644 --- a/validator_client/src/duties_service/sync.rs +++ b/validator_client/src/duties_service/sync.rs @@ -1,4 +1,4 @@ -use crate::beacon_node_fallback::OfflineOnFailure; +use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced}; use crate::{ doppelganger_service::DoppelgangerStatus, duties_service::{DutiesService, Error}, @@ -422,7 +422,7 @@ pub async fn poll_sync_committee_duties_for_period String { - eth2_serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) + serde_utils::hex::encode(&self.pk.serialize_compressed()[..]) } /// Returns the API token. @@ -205,7 +205,7 @@ impl ApiSecret { let message = Message::parse_slice(digest(&SHA256, input).as_ref()).expect("sha256 is 32 bytes"); let (signature, _) = libsecp256k1::sign(&message, &sk); - eth2_serde_utils::hex::encode(signature.serialize_der().as_ref()) + serde_utils::hex::encode(signature.serialize_der().as_ref()) } } } diff --git a/validator_client/src/http_api/create_validator.rs b/validator_client/src/http_api/create_validator.rs index a32ccce62..f3107cfed 100644 --- a/validator_client/src/http_api/create_validator.rs +++ b/validator_client/src/http_api/create_validator.rs @@ -159,7 +159,7 @@ pub async fn create_validators_mnemonic, T: 'static + SlotClock, gas_limit: request.gas_limit, builder_proposals: request.builder_proposals, voting_pubkey, - eth1_deposit_tx_data: eth2_serde_utils::hex::encode(ð1_deposit_data.rlp), + eth1_deposit_tx_data: serde_utils::hex::encode(ð1_deposit_data.rlp), deposit_gwei: request.deposit_gwei, }); } diff --git a/validator_client/src/http_api/mod.rs b/validator_client/src/http_api/mod.rs index 15b3f9fe0..fa6cde3ed 100644 --- a/validator_client/src/http_api/mod.rs +++ b/validator_client/src/http_api/mod.rs @@ -18,6 +18,7 @@ use eth2::lighthouse_vc::{ types::{self as api_types, GenericResponse, Graffiti, PublicKey, PublicKeyBytes}, }; use lighthouse_version::version_with_platform; +use logging::SSELoggingComponents; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use slog::{crit, info, warn, Logger}; @@ -31,6 +32,7 @@ use std::sync::Arc; use sysinfo::{System, SystemExt}; use system_health::observe_system_health_vc; use task_executor::TaskExecutor; +use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ChainSpec, ConfigAndPreset, EthSpec}; use validator_dir::Builder as ValidatorDirBuilder; use warp::{ @@ -39,6 +41,7 @@ use warp::{ response::Response, StatusCode, }, + sse::Event, Filter, }; @@ -73,6 +76,7 @@ pub struct Context { pub spec: ChainSpec, pub config: Config, pub log: Logger, + pub sse_logging_components: Option, pub slot_clock: T, pub _phantom: PhantomData, } @@ -201,6 +205,10 @@ pub fn serve( let api_token_path_inner = api_token_path.clone(); let api_token_path_filter = warp::any().map(move || api_token_path_inner.clone()); + // Filter for SEE Logging events + let inner_components = ctx.sse_logging_components.clone(); + let sse_component_filter = warp::any().map(move || inner_components.clone()); + // Create a `warp` filter that provides access to local system information. let system_info = Arc::new(RwLock::new(sysinfo::System::new())); { @@ -1021,6 +1029,49 @@ pub fn serve( }) }); + // Subscribe to get VC logs via Server side events + // /lighthouse/logs + let get_log_events = warp::path("lighthouse") + .and(warp::path("logs")) + .and(warp::path::end()) + .and(sse_component_filter) + .and_then(|sse_component: Option| { + warp_utils::task::blocking_task(move || { + if let Some(logging_components) = sse_component { + // Build a JSON stream + let s = + BroadcastStream::new(logging_components.sender.subscribe()).map(|msg| { + match msg { + Ok(data) => { + // Serialize to json + match data.to_json_string() { + // Send the json as a Server Sent Event + Ok(json) => Event::default().json_data(json).map_err(|e| { + warp_utils::reject::server_sent_event_error(format!( + "{:?}", + e + )) + }), + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to serialize to JSON {}", e), + )), + } + } + Err(e) => Err(warp_utils::reject::server_sent_event_error( + format!("Unable to receive event {}", e), + )), + } + }); + + Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s))) + } else { + Err(warp_utils::reject::custom_server_error( + "SSE Logging is not enabled".to_string(), + )) + } + }) + }); + let routes = warp::any() .and(authorization_header_filter) // Note: it is critical that the `authorization_header_filter` is applied to all routes. @@ -1061,8 +1112,8 @@ pub fn serve( .or(delete_std_remotekeys), )), ) - // The auth route is the only route that is allowed to be accessed without the API token. - .or(warp::get().and(get_auth)) + // The auth route and logs are the only routes that are allowed to be accessed without the API token. + .or(warp::get().and(get_auth.or(get_log_events.boxed()))) // Maps errors into HTTP responses. .recover(warp_utils::reject::handle_rejection) // Add a `Server` header. diff --git a/validator_client/src/http_api/tests.rs b/validator_client/src/http_api/tests.rs index df0e48044..84d2fe437 100644 --- a/validator_client/src/http_api/tests.rs +++ b/validator_client/src/http_api/tests.rs @@ -134,7 +134,8 @@ impl ApiTester { listen_port: 0, allow_origin: None, }, - log: log.clone(), + sse_logging_components: None, + log, slot_clock: slot_clock.clone(), _phantom: PhantomData, }); @@ -365,7 +366,7 @@ impl ApiTester { let withdrawal_keypair = keypairs.withdrawal.decrypt_keypair(PASSWORD_BYTES).unwrap(); let deposit_bytes = - eth2_serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); + serde_utils::hex::decode(&response[i].eth1_deposit_tx_data).unwrap(); let (deposit_data, _) = decode_eth1_tx_data(&deposit_bytes, E::default_spec().max_effective_balance) diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 556fdef26..3dde49f22 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -24,6 +24,7 @@ pub use config::Config; use initialized_validators::InitializedValidators; use lighthouse_metrics::set_gauge; use monitoring_api::{MonitoringHttpClient, ProcessType}; +use sensitive_url::SensitiveUrl; pub use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use crate::beacon_node_fallback::{ @@ -263,60 +264,70 @@ impl ProductionValidatorClient { .checked_sub(1) .ok_or_else(|| "No beacon nodes defined.".to_string())?; + let beacon_node_setup = |x: (usize, &SensitiveUrl)| { + let i = x.0; + let url = x.1; + let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); + + let mut beacon_node_http_client_builder = ClientBuilder::new(); + + // Add new custom root certificates if specified. + if let Some(certificates) = &config.beacon_nodes_tls_certs { + for cert in certificates { + beacon_node_http_client_builder = beacon_node_http_client_builder + .add_root_certificate(load_pem_certificate(cert)?); + } + } + + let beacon_node_http_client = beacon_node_http_client_builder + // Set default timeout to be the full slot duration. + .timeout(slot_duration) + .build() + .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; + + // Use quicker timeouts if a fallback beacon node exists. + let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { + info!( + log, + "Fallback endpoints are available, using optimized timeouts."; + ); + Timeouts { + attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, + attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, + proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, + proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, + sync_committee_contribution: slot_duration + / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, + sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, + get_beacon_blocks_ssz: slot_duration + / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, + get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, + get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, + } + } else { + Timeouts::set_all(slot_duration) + }; + + Ok(BeaconNodeHttpClient::from_components( + url.clone(), + beacon_node_http_client, + timeouts, + )) + }; + let beacon_nodes: Vec = config .beacon_nodes .iter() .enumerate() - .map(|(i, url)| { - let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); + .map(beacon_node_setup) + .collect::, String>>()?; - let mut beacon_node_http_client_builder = ClientBuilder::new(); - - // Add new custom root certificates if specified. - if let Some(certificates) = &config.beacon_nodes_tls_certs { - for cert in certificates { - beacon_node_http_client_builder = beacon_node_http_client_builder - .add_root_certificate(load_pem_certificate(cert)?); - } - } - - let beacon_node_http_client = beacon_node_http_client_builder - // Set default timeout to be the full slot duration. - .timeout(slot_duration) - .build() - .map_err(|e| format!("Unable to build HTTP client: {:?}", e))?; - - // Use quicker timeouts if a fallback beacon node exists. - let timeouts = if i < last_beacon_node_index && !config.use_long_timeouts { - info!( - log, - "Fallback endpoints are available, using optimized timeouts."; - ); - Timeouts { - attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, - attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, - liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, - proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, - proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, - sync_committee_contribution: slot_duration - / HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT, - sync_duties: slot_duration / HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT, - get_beacon_blocks_ssz: slot_duration - / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, - get_debug_beacon_states: slot_duration - / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, - get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, - } - } else { - Timeouts::set_all(slot_duration) - }; - - Ok(BeaconNodeHttpClient::from_components( - url.clone(), - beacon_node_http_client, - timeouts, - )) - }) + let proposer_nodes: Vec = config + .proposer_nodes + .iter() + .enumerate() + .map(beacon_node_setup) .collect::, String>>()?; let num_nodes = beacon_nodes.len(); @@ -325,6 +336,12 @@ impl ProductionValidatorClient { .map(CandidateBeaconNode::new) .collect(); + let proposer_nodes_num = proposer_nodes.len(); + let proposer_candidates = proposer_nodes + .into_iter() + .map(CandidateBeaconNode::new) + .collect(); + // Set the count for beacon node fallbacks excluding the primary beacon node. set_gauge( &http_metrics::metrics::ETH2_FALLBACK_CONFIGURED, @@ -349,9 +366,16 @@ impl ProductionValidatorClient { log.clone(), ); + let mut proposer_nodes: BeaconNodeFallback<_, T> = BeaconNodeFallback::new( + proposer_candidates, + config.disable_run_on_all, + context.eth2_config.spec.clone(), + log.clone(), + ); + // Perform some potentially long-running initialization tasks. let (genesis_time, genesis_validators_root) = tokio::select! { - tuple = init_from_beacon_node(&beacon_nodes, &context) => tuple?, + tuple = init_from_beacon_node(&beacon_nodes, &proposer_nodes, &context) => tuple?, () = context.executor.exit() => return Err("Shutting down".to_string()) }; @@ -367,9 +391,14 @@ impl ProductionValidatorClient { ); beacon_nodes.set_slot_clock(slot_clock.clone()); + proposer_nodes.set_slot_clock(slot_clock.clone()); + let beacon_nodes = Arc::new(beacon_nodes); start_fallback_updater_service(context.clone(), beacon_nodes.clone())?; + let proposer_nodes = Arc::new(proposer_nodes); + start_fallback_updater_service(context.clone(), proposer_nodes.clone())?; + let doppelganger_service = if config.enable_doppelganger_protection { Some(Arc::new(DoppelgangerService::new( context @@ -417,11 +446,6 @@ impl ProductionValidatorClient { slot_clock: slot_clock.clone(), beacon_nodes: beacon_nodes.clone(), validator_store: validator_store.clone(), - require_synced: if config.allow_unsynced_beacon_node { - RequireSynced::Yes - } else { - RequireSynced::No - }, spec: context.eth2_config.spec.clone(), context: duties_context, enable_high_validator_count_metrics: config.enable_high_validator_count_metrics, @@ -433,15 +457,21 @@ impl ProductionValidatorClient { ctx.shared.write().duties_service = Some(duties_service.clone()); } - let block_service = BlockServiceBuilder::new() + let mut block_service_builder = BlockServiceBuilder::new() .slot_clock(slot_clock.clone()) .validator_store(validator_store.clone()) .beacon_nodes(beacon_nodes.clone()) .runtime_context(context.service_context("block".into())) .graffiti(config.graffiti) .graffiti_file(config.graffiti_file.clone()) - .block_delay(config.block_delay) - .build()?; + .block_delay(config.block_delay); + + // If we have proposer nodes, add them to the block service builder. + if proposer_nodes_num > 0 { + block_service_builder = block_service_builder.proposer_nodes(proposer_nodes.clone()); + } + + let block_service = block_service_builder.build()?; let attestation_service = AttestationServiceBuilder::new() .duties_service(duties_service.clone()) @@ -546,6 +576,7 @@ impl ProductionValidatorClient { graffiti_flag: self.config.graffiti, spec: self.context.eth2_config.spec.clone(), config: self.config.http_api.clone(), + sse_logging_components: self.context.sse_logging_components.clone(), slot_clock: self.slot_clock.clone(), log: log.clone(), _phantom: PhantomData, @@ -581,13 +612,32 @@ impl ProductionValidatorClient { async fn init_from_beacon_node( beacon_nodes: &BeaconNodeFallback, + proposer_nodes: &BeaconNodeFallback, context: &RuntimeContext, ) -> Result<(u64, Hash256), String> { loop { - beacon_nodes.update_unready_candidates().await; + beacon_nodes.update_all_candidates().await; + proposer_nodes.update_all_candidates().await; + let num_available = beacon_nodes.num_available().await; let num_total = beacon_nodes.num_total(); - if num_available > 0 { + + let proposer_available = beacon_nodes.num_available().await; + let proposer_total = beacon_nodes.num_total(); + + if proposer_total > 0 && proposer_available == 0 { + warn!( + context.log(), + "Unable to connect to a proposer node"; + "retry in" => format!("{} seconds", RETRY_DELAY.as_secs()), + "total_proposers" => proposer_total, + "available_proposers" => proposer_available, + "total_beacon_nodes" => num_total, + "available_beacon_nodes" => num_available, + ); + } + + if num_available > 0 && proposer_available == 0 { info!( context.log(), "Initialized beacon node connections"; @@ -595,6 +645,16 @@ async fn init_from_beacon_node( "available" => num_available, ); break; + } else if num_available > 0 { + info!( + context.log(), + "Initialized beacon node connections"; + "total" => num_total, + "available" => num_available, + "proposers_available" => proposer_available, + "proposers_total" => proposer_total, + ); + break; } else { warn!( context.log(), diff --git a/validator_client/src/preparation_service.rs b/validator_client/src/preparation_service.rs index fc80f2ded..5bd93a505 100644 --- a/validator_client/src/preparation_service.rs +++ b/validator_client/src/preparation_service.rs @@ -332,7 +332,7 @@ impl PreparationService { match self .beacon_nodes .run( - RequireSynced::Yes, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { beacon_node @@ -451,7 +451,7 @@ impl PreparationService { match self .beacon_nodes .first_success( - RequireSynced::Yes, + RequireSynced::No, OfflineOnFailure::No, |beacon_node| async move { beacon_node.post_validator_register_validator(batch).await diff --git a/validator_client/src/signing_method/web3signer.rs b/validator_client/src/signing_method/web3signer.rs index 33d08f9d6..d7d74c944 100644 --- a/validator_client/src/signing_method/web3signer.rs +++ b/validator_client/src/signing_method/web3signer.rs @@ -55,9 +55,9 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload> { Deposit { pubkey: PublicKeyBytes, withdrawal_credentials: Hash256, - #[serde(with = "eth2_serde_utils::quoted_u64")] + #[serde(with = "serde_utils::quoted_u64")] amount: u64, - #[serde(with = "eth2_serde_utils::bytes_4_hex")] + #[serde(with = "serde_utils::bytes_4_hex")] genesis_fork_version: [u8; 4], }, RandaoReveal { diff --git a/validator_client/src/sync_committee_service.rs b/validator_client/src/sync_committee_service.rs index 3647396ed..cc20cedfc 100644 --- a/validator_client/src/sync_committee_service.rs +++ b/validator_client/src/sync_committee_service.rs @@ -178,7 +178,7 @@ impl SyncCommitteeService { let response = self .beacon_nodes .first_success( - RequireSynced::Yes, + RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async move { match beacon_node.get_beacon_blocks_root(BlockId::Head).await {