Merge branch 'unstable' into eip4844

This commit is contained in:
Diva M 2023-04-04 12:07:09 -05:00
commit cb818152f3
No known key found for this signature in database
GPG Key ID: 1BAE5E01126680FE
80 changed files with 7663 additions and 236 deletions

699
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -89,6 +89,8 @@ members = [
"validator_client", "validator_client",
"validator_client/slashing_protection", "validator_client/slashing_protection",
"watch",
] ]
resolver = "2" resolver = "2"

View File

@ -38,15 +38,15 @@ system_health = { path = "../../common/system_health" }
directory = { path = "../../common/directory" } directory = { path = "../../common/directory" }
eth2_serde_utils = "0.1.1" eth2_serde_utils = "0.1.1"
operation_pool = { path = "../operation_pool" } operation_pool = { path = "../operation_pool" }
sensitive_url = { path = "../../common/sensitive_url" }
unused_port = {path = "../../common/unused_port"}
logging = { path = "../../common/logging" }
store = { path = "../store" }
[dev-dependencies] [dev-dependencies]
store = { path = "../store" }
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
sensitive_url = { path = "../../common/sensitive_url" }
logging = { path = "../../common/logging" }
serde_json = "1.0.58" serde_json = "1.0.58"
proto_array = { path = "../../consensus/proto_array" } proto_array = { path = "../../consensus/proto_array" }
unused_port = {path = "../../common/unused_port"}
genesis = { path = "../genesis" } genesis = { path = "../genesis" }
[[test]] [[test]]

View File

@ -18,6 +18,7 @@ mod standard_block_rewards;
mod state_id; mod state_id;
mod sync_committee_rewards; mod sync_committee_rewards;
mod sync_committees; mod sync_committees;
pub mod test_utils;
mod ui; mod ui;
mod validator_inclusion; mod validator_inclusion;
mod version; mod version;

View File

@ -1,10 +1,10 @@
use crate::{Config, Context};
use beacon_chain::{ use beacon_chain::{
test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType},
BeaconChain, BeaconChainTypes, BeaconChain, BeaconChainTypes,
}; };
use directory::DEFAULT_ROOT_DIR; use directory::DEFAULT_ROOT_DIR;
use eth2::{BeaconNodeHttpClient, Timeouts}; use eth2::{BeaconNodeHttpClient, Timeouts};
use http_api::{Config, Context};
use lighthouse_network::{ use lighthouse_network::{
discv5::enr::{CombinedKey, EnrBuilder}, discv5::enr::{CombinedKey, EnrBuilder},
libp2p::{ libp2p::{
@ -179,7 +179,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
let eth1_service = let eth1_service =
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap(); eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap();
let context = Arc::new(Context { let ctx = Arc::new(Context {
config: Config { config: Config {
enabled: true, enabled: true,
listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
@ -190,19 +190,19 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR),
spec_fork_name: None, spec_fork_name: None,
}, },
chain: Some(chain.clone()), chain: Some(chain),
network_senders: Some(network_senders), network_senders: Some(network_senders),
network_globals: Some(network_globals), network_globals: Some(network_globals),
eth1_service: Some(eth1_service), eth1_service: Some(eth1_service),
log, log,
}); });
let ctx = context.clone();
let (shutdown_tx, shutdown_rx) = oneshot::channel(); let (shutdown_tx, shutdown_rx) = oneshot::channel();
let server_shutdown = async { let server_shutdown = async {
// It's not really interesting why this triggered, just that it happened. // It's not really interesting why this triggered, just that it happened.
let _ = shutdown_rx.await; let _ = shutdown_rx.await;
}; };
let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap(); let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap();
ApiServer { ApiServer {
server, server,

View File

@ -1,11 +1,11 @@
//! Tests for API behaviour across fork boundaries. //! Tests for API behaviour across fork boundaries.
use crate::common::*;
use beacon_chain::{ use beacon_chain::{
test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME}, test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME},
StateSkipConfig, StateSkipConfig,
}; };
use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee};
use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials}; use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials};
use http_api::test_utils::*;
use std::collections::HashSet; use std::collections::HashSet;
use types::{ use types::{
test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs},

View File

@ -1,11 +1,11 @@
//! Generic tests that make use of the (newer) `InteractiveApiTester` //! Generic tests that make use of the (newer) `InteractiveApiTester`
use crate::common::*;
use beacon_chain::{ use beacon_chain::{
chain_config::ReOrgThreshold, chain_config::ReOrgThreshold,
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
}; };
use eth2::types::DepositContractData; use eth2::types::DepositContractData;
use execution_layer::{ForkchoiceState, PayloadAttributes}; use execution_layer::{ForkchoiceState, PayloadAttributes};
use http_api::test_utils::InteractiveTester;
use parking_lot::Mutex; use parking_lot::Mutex;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use state_processing::{ use state_processing::{

View File

@ -1,6 +1,5 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(not(debug_assertions))] // Tests are too slow in debug.
pub mod common;
pub mod fork_tests; pub mod fork_tests;
pub mod interactive_tests; pub mod interactive_tests;
pub mod tests; pub mod tests;

View File

@ -1,4 +1,3 @@
use crate::common::{create_api_server, create_api_server_on_port, ApiServer};
use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::test_utils::RelativeSyncCommittee;
use beacon_chain::{ use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
@ -18,7 +17,10 @@ use execution_layer::test_utils::{
}; };
use futures::stream::{Stream, StreamExt}; use futures::stream::{Stream, StreamExt};
use futures::FutureExt; use futures::FutureExt;
use http_api::{BlockId, StateId}; use http_api::{
test_utils::{create_api_server, create_api_server_on_port, ApiServer},
BlockId, StateId,
};
use lighthouse_network::{Enr, EnrExt, PeerId}; use lighthouse_network::{Enr, EnrExt, PeerId};
use network::NetworkReceivers; use network::NetworkReceivers;
use proto_array::ExecutionStatus; use proto_array::ExecutionStatus;

View File

@ -22,7 +22,7 @@ use lighthouse_network::PeerId;
pub use reqwest; pub use reqwest;
use reqwest::{IntoUrl, RequestBuilder, Response}; use reqwest::{IntoUrl, RequestBuilder, Response};
pub use reqwest::{StatusCode, Url}; pub use reqwest::{StatusCode, Url};
pub use sensitive_url::SensitiveUrl; pub use sensitive_url::{SensitiveError, SensitiveUrl};
use serde::{de::DeserializeOwned, Serialize}; use serde::{de::DeserializeOwned, Serialize};
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt; use std::fmt;

View File

@ -13,7 +13,7 @@ use crate::{
BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock,
GenericResponse, ValidatorId, GenericResponse, ValidatorId,
}, },
BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode,
}; };
use proto_array::core::ProtoArray; use proto_array::core::ProtoArray;
use reqwest::IntoUrl; use reqwest::IntoUrl;
@ -566,4 +566,73 @@ impl BeaconNodeHttpClient {
self.post_with_response(path, &()).await self.post_with_response(path, &()).await
} }
///
/// Analysis endpoints.
///
/// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot
pub async fn get_lighthouse_analysis_block_rewards(
&self,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<BlockReward>, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("analysis")
.push("block_rewards");
path.query_pairs_mut()
.append_pair("start_slot", &start_slot.to_string())
.append_pair("end_slot", &end_slot.to_string());
self.get(path).await
}
/// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch
pub async fn get_lighthouse_analysis_block_packing(
&self,
start_epoch: Epoch,
end_epoch: Epoch,
) -> Result<Vec<BlockPackingEfficiency>, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("analysis")
.push("block_packing_efficiency");
path.query_pairs_mut()
.append_pair("start_epoch", &start_epoch.to_string())
.append_pair("end_epoch", &end_epoch.to_string());
self.get(path).await
}
/// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch
pub async fn get_lighthouse_analysis_attestation_performance(
&self,
start_epoch: Epoch,
end_epoch: Epoch,
target: String,
) -> Result<Vec<AttestationPerformance>, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("analysis")
.push("attestation_performance")
.push(&target);
path.query_pairs_mut()
.append_pair("start_epoch", &start_epoch.to_string())
.append_pair("end_epoch", &end_epoch.to_string());
self.get(path).await
}
} }

1
watch/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
config.yaml

45
watch/Cargo.toml Normal file
View File

@ -0,0 +1,45 @@
[package]
name = "watch"
version = "0.1.0"
edition = "2018"
[lib]
name = "watch"
path = "src/lib.rs"
[[bin]]
name = "watch"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "2.33.3"
log = "0.4.14"
env_logger = "0.9.0"
types = { path = "../consensus/types" }
eth2 = { path = "../common/eth2" }
beacon_node = { path = "../beacon_node"}
tokio = { version = "1.14.0", features = ["time"] }
axum = "0.5.15"
hyper = "0.14.20"
serde = "1.0.116"
serde_json = "1.0.58"
reqwest = { version = "0.11.0", features = ["json","stream"] }
url = "2.2.2"
rand = "0.7.3"
diesel = { version = "2.0.2", features = ["postgres", "r2d2"] }
diesel_migrations = { version = "2.0.0", features = ["postgres"] }
byteorder = "1.4.3"
bls = { path = "../crypto/bls" }
hex = "0.4.2"
r2d2 = "0.8.9"
serde_yaml = "0.8.24"
[dev-dependencies]
tokio-postgres = "0.7.5"
http_api = { path = "../beacon_node/http_api" }
beacon_chain = { path = "../beacon_node/beacon_chain" }
network = { path = "../beacon_node/network" }
testcontainers = "0.14.0"
unused_port = { path = "../common/unused_port" }

460
watch/README.md Normal file
View File

@ -0,0 +1,460 @@
## beacon.watch
>beacon.watch is pre-MVP and still under active development and subject to change.
beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to
data which is:
1. Not already stored natively in the Beacon Chain
2. Too specialized for Block Explorers
3. Too sensitive for public Block Explorers
### Requirements
- `git`
- `rust` : https://rustup.rs/
- `libpg` : https://www.postgresql.org/download/
- `diesel_cli` :
```
cargo install diesel_cli --no-default-features --features postgres
```
- `docker` : https://docs.docker.com/engine/install/
- `docker-compose` : https://docs.docker.com/compose/install/
### Setup
1. Setup the database:
```
cd postgres_docker_compose
docker-compose up
```
1. Ensure the tests pass:
```
cargo test --release
```
1. Drop the database (if it already exists) and run the required migrations:
```
diesel database reset --database-url postgres://postgres:postgres@localhost/dev
```
1. Ensure a synced Lighthouse beacon node with historical states is available
at `localhost:5052`.
The smaller the value of `--slots-per-restore-point` the faster beacon.watch
will be able to sync to the beacon node.
1. Run the updater daemon:
```
cargo run --release -- run-updater
```
1. Start the HTTP API server:
```
cargo run --release -- serve
```
1. Ensure connectivity:
```
curl "http://localhost:5059/v1/slots/highest"
```
> Functionality on MacOS has not been tested. Windows is not supported.
### Configuration
beacon.watch can be configured through the use of a config file.
Available options can be seen in `config.yaml.default`.
You can specify a config file during runtime:
```
cargo run -- run-updater --config path/to/config.yaml
cargo run -- serve --config path/to/config.yaml
```
You can specify only the parts of the config file which you need changed.
Missing values will remain as their defaults.
For example, if you wish to run with default settings but only wish to alter `log_level`
your config file would be:
```yaml
# config.yaml
log_level = "info"
```
### Available Endpoints
As beacon.watch continues to develop, more endpoints will be added.
> In these examples any data containing information from blockprint has either been redacted or fabricated.
#### `/v1/slots/{slot}`
```bash
curl "http://localhost:5059/v1/slots/4635296"
```
```json
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"skipped": false,
"beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
}
```
#### `/v1/slots?start_slot={}&end_slot={}`
```bash
curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297"
```
```json
[
{
"slot": "4635297",
"root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182",
"skipped": false,
"beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182"
},
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"skipped": false,
"beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
}
]
```
#### `/v1/slots/lowest`
```bash
curl "http://localhost:5059/v1/slots/lowest"
```
```json
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"skipped": false,
"beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
}
```
#### `/v1/slots/highest`
```bash
curl "http://localhost:5059/v1/slots/highest"
```
```json
{
"slot": "4635358",
"root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b",
"skipped": false,
"beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b"
}
```
#### `v1/slots/{slot}/block`
```bash
curl "http://localhost:5059/v1/slots/4635296/block"
```
```json
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
}
```
#### `/v1/blocks/{block_id}`
```bash
curl "http://localhost:5059/v1/blocks/4635296"
# OR
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
```
```json
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
}
```
#### `/v1/blocks?start_slot={}&end_slot={}`
```bash
curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297"
```
```json
[
{
"slot": "4635297",
"root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182",
"parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
},
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
}
]
```
#### `/v1/blocks/{block_id}/previous`
```bash
curl "http://localhost:5059/v1/blocks/4635297/previous"
# OR
curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous"
```
```json
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
}
```
#### `/v1/blocks/{block_id}/next`
```bash
curl "http://localhost:5059/v1/blocks/4635296/next"
# OR
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next"
```
```json
{
"slot": "4635297",
"root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182",
"parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
}
```
#### `/v1/blocks/lowest`
```bash
curl "http://localhost:5059/v1/blocks/lowest"
```
```json
{
"slot": "4635296",
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
}
```
#### `/v1/blocks/highest`
```bash
curl "http://localhost:5059/v1/blocks/highest"
```
```json
{
"slot": "4635358",
"root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b",
"parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2"
}
```
#### `/v1/blocks/{block_id}/proposer`
```bash
curl "http://localhost:5059/v1/blocks/4635296/proposer"
# OR
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer"
```
```json
{
"slot": "4635296",
"proposer_index": 223126,
"graffiti": ""
}
```
#### `/v1/blocks/{block_id}/rewards`
```bash
curl "http://localhost:5059/v1/blocks/4635296/reward"
# OR
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward"
```
```json
{
"slot": "4635296",
"total": 25380059,
"attestation_reward": 24351867,
"sync_committee_reward": 1028192
}
```
#### `/v1/blocks/{block_id}/packing`
```bash
curl "http://localhost:5059/v1/blocks/4635296/packing"
# OR
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing"
```
```json
{
"slot": "4635296",
"available": 16152,
"included": 13101,
"prior_skip_slots": 0
}
```
#### `/v1/validators/{validator}`
```bash
curl "http://localhost:5059/v1/validators/1"
# OR
curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c"
```
```json
{
"index": 1,
"public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c",
"status": "active_ongoing",
"client": null,
"activation_epoch": 0,
"exit_epoch": null
}
```
#### `/v1/validators/{validator}/attestation/{epoch}`
```bash
curl "http://localhost:5059/v1/validators/1/attestation/144853"
# OR
curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853"
```
```json
{
"index": 1,
"epoch": "144853",
"source": true,
"head": true,
"target": true
}
```
#### `/v1/validators/missed/{vote}/{epoch}`
```bash
curl "http://localhost:5059/v1/validators/missed/head/144853"
```
```json
[
63,
67,
98,
...
]
```
#### `/v1/validators/missed/{vote}/{epoch}/graffiti`
```bash
curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti"
```
```json
{
"Mr F was here": 3,
"Lighthouse/v3.1.0-aa022f4": 5,
...
}
```
#### `/v1/clients/missed/{vote}/{epoch}`
```bash
curl "http://localhost:5059/v1/clients/missed/source/144853"
```
```json
{
"Lighthouse": 100,
"Lodestar": 100,
"Nimbus": 100,
"Prysm": 100,
"Teku": 100,
"Unknown": 100
}
```
#### `/v1/clients/missed/{vote}/{epoch}/percentages`
Note that this endpoint expresses the following:
```
What percentage of each client implementation missed this vote?
```
```bash
curl "http://localhost:5059/v1/clients/missed/target/144853/percentages"
```
```json
{
"Lighthouse": 0.51234567890,
"Lodestar": 0.51234567890,
"Nimbus": 0.51234567890,
"Prysm": 0.09876543210,
"Teku": 0.09876543210,
"Unknown": 0.05647382910
}
```
#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative`
Note that this endpoint expresses the following:
```
For the validators which did miss this vote, what percentage of them were from each client implementation?
```
You can check these values against the output of `/v1/clients/percentages` to see any discrepancies.
```bash
curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative"
```
```json
{
"Lighthouse": 11.11111111111111,
"Lodestar": 11.11111111111111,
"Nimbus": 11.11111111111111,
"Prysm": 16.66666666666667,
"Teku": 16.66666666666667,
"Unknown": 33.33333333333333
}
```
#### `/v1/clients`
```bash
curl "http://localhost:5059/v1/clients"
```
```json
{
"Lighthouse": 5000,
"Lodestar": 5000,
"Nimbus": 5000,
"Prysm": 5000,
"Teku": 5000,
"Unknown": 5000
}
```
#### `/v1/clients/percentages`
```bash
curl "http://localhost:5059/v1/clients/percentages"
```
```json
{
"Lighthouse": 16.66666666666667,
"Lodestar": 16.66666666666667,
"Nimbus": 16.66666666666667,
"Prysm": 16.66666666666667,
"Teku": 16.66666666666667,
"Unknown": 16.66666666666667
}
```
### Future work
- New tables
- `skip_slots`?
- More API endpoints
- `/v1/proposers?start_epoch={}&end_epoch={}` and similar
- `/v1/validators/{status}/count`
- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills.
- Better/prettier (async?) logging.
- Connect to a range of beacon_nodes to sync different components concurrently.
Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync.
### Architecture
Connection Pooling:
- 1 Pool for Updater (read and write)
- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this)

49
watch/config.yaml.default Normal file
View File

@ -0,0 +1,49 @@
---
database:
user: "postgres"
password: "postgres"
dbname: "dev"
default_dbname: "postgres"
host: "localhost"
port: 5432
connect_timeout_millis: 2000
server:
listen_addr: "127.0.0.1"
listen_port: 5059
updater:
# The URL of the Beacon Node to perform sync tasks with.
# Cannot yet accept multiple beacon nodes.
beacon_node_url: "http://localhost:5052"
# The number of epochs to backfill. Must be below 100.
max_backfill_size_epochs: 2
# The epoch at which to stop backfilling.
backfill_stop_epoch: 0
# Whether to sync the attestations table.
attestations: true
# Whether to sync the proposer_info table.
proposer_info: true
# Whether to sync the block_rewards table.
block_rewards: true
# Whether to sync the block_packing table.
block_packing: true
blockprint:
# Whether to sync client information from blockprint.
enabled: false
# The URL of the blockprint server.
url: ""
# The username used to authenticate to the blockprint server.
username: ""
# The password used to authenticate to the blockprint server.
password: ""
# Log level.
# Valid options are:
# - "trace"
# - "debug"
# - "info"
# - "warn"
# - "error"
log_level: "debug"

5
watch/diesel.toml Normal file
View File

@ -0,0 +1,5 @@
# For documentation on how to configure this file,
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/database/schema.rs"

View File

View File

@ -0,0 +1,6 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();

View File

@ -0,0 +1,36 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View File

@ -0,0 +1 @@
DROP TABLE canonical_slots

View File

@ -0,0 +1,6 @@
CREATE TABLE canonical_slots (
slot integer PRIMARY KEY,
root bytea NOT NULL,
skipped boolean NOT NULL,
beacon_block bytea UNIQUE
)

View File

@ -0,0 +1 @@
DROP TABLE beacon_blocks

View File

@ -0,0 +1,7 @@
CREATE TABLE beacon_blocks (
slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE,
root bytea REFERENCES canonical_slots(beacon_block) NOT NULL,
parent_root bytea NOT NULL,
attestation_count integer NOT NULL,
transaction_count integer
)

View File

@ -0,0 +1 @@
DROP TABLE validators

View File

@ -0,0 +1,7 @@
CREATE TABLE validators (
index integer PRIMARY KEY,
public_key bytea NOT NULL,
status text NOT NULL,
activation_epoch integer,
exit_epoch integer
)

View File

@ -0,0 +1 @@
DROP TABLE proposer_info

View File

@ -0,0 +1,5 @@
CREATE TABLE proposer_info (
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL,
graffiti text NOT NULL
)

View File

@ -0,0 +1 @@
DROP TABLE active_config

View File

@ -0,0 +1,5 @@
CREATE TABLE active_config (
id integer PRIMARY KEY CHECK (id=1),
config_name text NOT NULL,
slots_per_epoch integer NOT NULL
)

View File

@ -0,0 +1 @@
DROP TABLE blockprint

View File

@ -0,0 +1,4 @@
CREATE TABLE blockprint (
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
best_guess text NOT NULL
)

View File

@ -0,0 +1 @@
DROP TABLE block_rewards

View File

@ -0,0 +1,6 @@
CREATE TABLE block_rewards (
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
total integer NOT NULL,
attestation_reward integer NOT NULL,
sync_committee_reward integer NOT NULL
)

View File

@ -0,0 +1 @@
DROP TABLE block_packing

View File

@ -0,0 +1,6 @@
CREATE TABLE block_packing (
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
available integer NOT NULL,
included integer NOT NULL,
prior_skip_slots integer NOT NULL
)

View File

@ -0,0 +1 @@
DROP TABLE suboptimal_attestations

View File

@ -0,0 +1,8 @@
CREATE TABLE suboptimal_attestations (
epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE,
index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE,
source boolean NOT NULL,
head boolean NOT NULL,
target boolean NOT NULL,
PRIMARY KEY(epoch_start_slot, index)
)

View File

@ -0,0 +1,2 @@
ALTER TABLE beacon_blocks
DROP COLUMN withdrawal_count;

View File

@ -0,0 +1,3 @@
ALTER TABLE beacon_blocks
ADD COLUMN withdrawal_count integer;

View File

@ -0,0 +1,16 @@
version: "3"
services:
postgres:
image: postgres:12.3-alpine
restart: always
environment:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
volumes:
- postgres:/var/lib/postgresql/data
ports:
- 127.0.0.1:5432:5432
volumes:
postgres:

View File

@ -0,0 +1,140 @@
use crate::database::{
schema::{beacon_blocks, block_packing},
watch_types::{WatchHash, WatchSlot},
Error, PgConn, MAX_SIZE_BATCH_INSERT,
};
use diesel::prelude::*;
use diesel::{Insertable, Queryable};
use log::debug;
use serde::{Deserialize, Serialize};
use std::time::Instant;
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = block_packing)]
pub struct WatchBlockPacking {
pub slot: WatchSlot,
pub available: i32,
pub included: i32,
pub prior_skip_slots: i32,
}
/// Insert a batch of values into the `block_packing` table.
///
/// On a conflict, it will do nothing, leaving the old value.
pub fn insert_batch_block_packing(
conn: &mut PgConn,
packing: Vec<WatchBlockPacking>,
) -> Result<(), Error> {
use self::block_packing::dsl::*;
let mut count = 0;
let timer = Instant::now();
for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) {
count += diesel::insert_into(block_packing)
.values(chunk)
.on_conflict_do_nothing()
.execute(conn)?;
}
let time_taken = timer.elapsed();
debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}");
Ok(())
}
/// Selects the row from the `block_packing` table where `slot` is minimum.
pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result<Option<WatchBlockPacking>, Error> {
use self::block_packing::dsl::*;
let timer = Instant::now();
let result = block_packing
.order_by(slot.asc())
.limit(1)
.first::<WatchBlockPacking>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block packing requested: lowest, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects the row from the `block_packing` table where `slot` is maximum.
pub fn get_highest_block_packing(conn: &mut PgConn) -> Result<Option<WatchBlockPacking>, Error> {
use self::block_packing::dsl::*;
let timer = Instant::now();
let result = block_packing
.order_by(slot.desc())
.limit(1)
.first::<WatchBlockPacking>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block packing requested: highest, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `block_packing` table corresponding to a given `root_query`.
pub fn get_block_packing_by_root(
conn: &mut PgConn,
root_query: WatchHash,
) -> Result<Option<WatchBlockPacking>, Error> {
use self::beacon_blocks::dsl::{beacon_blocks, root};
use self::block_packing::dsl::*;
let timer = Instant::now();
let join = beacon_blocks.inner_join(block_packing);
let result = join
.select((slot, available, included, prior_skip_slots))
.filter(root.eq(root_query))
.first::<WatchBlockPacking>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`.
pub fn get_block_packing_by_slot(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<Option<WatchBlockPacking>, Error> {
use self::block_packing::dsl::*;
let timer = Instant::now();
let result = block_packing
.filter(slot.eq(slot_query))
.first::<WatchBlockPacking>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding
/// row in `block_packing`.
#[allow(dead_code)]
pub fn get_unknown_block_packing(
conn: &mut PgConn,
slots_per_epoch: u64,
) -> Result<Vec<Option<WatchSlot>>, Error> {
use self::beacon_blocks::dsl::{beacon_blocks, root, slot};
use self::block_packing::dsl::block_packing;
let join = beacon_blocks.left_join(block_packing);
let result = join
.select(slot)
.filter(root.is_null())
// Block packing cannot be retrieved for epoch 0 so we need to exclude them.
.filter(slot.ge(slots_per_epoch as i32))
.order_by(slot.desc())
.nullable()
.load::<Option<WatchSlot>>(conn)?;
Ok(result)
}

View File

@ -0,0 +1,38 @@
pub mod database;
pub mod server;
pub mod updater;
use crate::database::watch_types::WatchSlot;
use crate::updater::error::Error;
pub use database::{
get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing,
get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing,
WatchBlockPacking,
};
pub use server::block_packing_routes;
use eth2::BeaconNodeHttpClient;
use types::Epoch;
/// Sends a request to `lighthouse/analysis/block_packing`.
/// Formats the response into a vector of `WatchBlockPacking`.
///
/// Will fail if `start_epoch == 0`.
pub async fn get_block_packing(
bn: &BeaconNodeHttpClient,
start_epoch: Epoch,
end_epoch: Epoch,
) -> Result<Vec<WatchBlockPacking>, Error> {
Ok(bn
.get_lighthouse_analysis_block_packing(start_epoch, end_epoch)
.await?
.into_iter()
.map(|data| WatchBlockPacking {
slot: WatchSlot::from_slot(data.slot),
available: data.available_attestations as i32,
included: data.included_attestations as i32,
prior_skip_slots: data.prior_skip_slots as i32,
})
.collect())
}

View File

@ -0,0 +1,31 @@
use crate::block_packing::database::{
get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking,
};
use crate::database::{get_connection, PgPool, WatchHash, WatchSlot};
use crate::server::Error;
use axum::{extract::Path, routing::get, Extension, Json, Router};
use eth2::types::BlockId;
use std::str::FromStr;
pub async fn get_block_packing(
Path(block_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBlockPacking>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
BlockId::Root(root) => Ok(Json(get_block_packing_by_root(
&mut conn,
WatchHash::from_hash(root),
)?)),
BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot(
&mut conn,
WatchSlot::from_slot(slot),
)?)),
_ => Err(Error::BadRequest),
}
}
pub fn block_packing_routes() -> Router {
Router::new().route("/v1/blocks/:block/packing", get(get_block_packing))
}

View File

@ -0,0 +1,211 @@
use crate::database::{self, Error as DbError};
use crate::updater::{Error, UpdateHandler};
use crate::block_packing::get_block_packing;
use eth2::types::{Epoch, EthSpec};
use log::{debug, error, warn};
const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50;
impl<T: EthSpec> UpdateHandler<T> {
/// Forward fills the `block_packing` table starting from the entry with the
/// highest slot.
///
/// It constructs a request to the `get_block_packing` API with:
/// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block)
/// `end_epoch` -> epoch of highest beacon block
///
/// It will resync the latest epoch if it is not fully filled.
/// That is, `if highest_filled_slot % slots_per_epoch != 31`
/// This means that if the last slot of an epoch is a skip slot, the whole epoch will be
//// resynced during the next head update.
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
pub async fn fill_block_packing(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
// Get the slot of the highest entry in the `block_packing` table.
let highest_filled_slot_opt = if self.config.block_packing {
database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot)
} else {
return Err(Error::NotEnabled("block_packing".to_string()));
};
let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt {
if highest_filled_slot.as_slot() % self.slots_per_epoch
== self.slots_per_epoch.saturating_sub(1)
{
// The whole epoch is filled so we can begin syncing the next one.
highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1
} else {
// The epoch is only partially synced. Try to sync it fully.
highest_filled_slot.as_slot().epoch(self.slots_per_epoch)
}
} else {
// No entries in the `block_packing` table. Use `beacon_blocks` instead.
if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? {
lowest_beacon_block
.slot
.as_slot()
.epoch(self.slots_per_epoch)
} else {
// There are no blocks in the database, do not fill the `block_packing` table.
warn!("Refusing to fill block packing as there are no blocks in the database");
return Ok(());
}
};
// The `get_block_packing` API endpoint cannot accept `start_epoch == 0`.
if start_epoch == 0 {
start_epoch += 1
}
if let Some(highest_block_slot) =
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
{
let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch);
if start_epoch > end_epoch {
debug!("Block packing is up to date with the head of the database");
return Ok(());
}
// Ensure the size of the request does not exceed the maximum allowed value.
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) {
end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING
}
if let Some(lowest_block_slot) =
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
{
let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?;
// Since we pull a full epoch of data but are not guaranteed to have all blocks of
// that epoch available, only insert blocks with corresponding `beacon_block`s.
packing.retain(|packing| {
packing.slot.as_slot() >= lowest_block_slot
&& packing.slot.as_slot() <= highest_block_slot
});
database::insert_batch_block_packing(&mut conn, packing)?;
} else {
return Err(Error::Database(DbError::Other(
"Database did not return a lowest block when one exists".to_string(),
)));
}
} else {
// There are no blocks in the `beacon_blocks` database, but there are entries in the
// `block_packing` table. This is a critical failure. It usually means someone has
// manually tampered with the database tables and should not occur during normal
// operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
Ok(())
}
/// Backfill the `block_packing` table starting from the entry with the lowest slot.
///
/// It constructs a request to the `get_block_packing` function with:
/// `start_epoch` -> epoch of lowest_beacon_block
/// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block)
///
/// It will resync the lowest epoch if it is not fully filled.
/// That is, `if lowest_filled_slot % slots_per_epoch != 0`
/// This means that if the last slot of an epoch is a skip slot, the whole epoch will be
//// resynced during the next head update.
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
pub async fn backfill_block_packing(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
let max_block_packing_backfill = self.config.max_backfill_size_epochs;
// Get the slot of the lowest entry in the `block_packing` table.
let lowest_filled_slot_opt = if self.config.block_packing {
database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot)
} else {
return Err(Error::NotEnabled("block_packing".to_string()));
};
let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt {
if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 {
lowest_filled_slot
.as_slot()
.epoch(self.slots_per_epoch)
.saturating_sub(Epoch::new(1))
} else {
// The epoch is only partially synced. Try to sync it fully.
lowest_filled_slot.as_slot().epoch(self.slots_per_epoch)
}
} else {
// No entries in the `block_packing` table. Use `beacon_blocks` instead.
if let Some(highest_beacon_block) =
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
{
highest_beacon_block.as_slot().epoch(self.slots_per_epoch)
} else {
// There are no blocks in the database, do not backfill the `block_packing` table.
warn!("Refusing to backfill block packing as there are no blocks in the database");
return Ok(());
}
};
if end_epoch <= 1 {
debug!("Block packing backfill is complete");
return Ok(());
}
if let Some(lowest_block_slot) =
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
{
let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch);
if start_epoch >= end_epoch {
debug!("Block packing is up to date with the base of the database");
return Ok(());
}
// Ensure that the request range does not exceed `max_block_packing_backfill` or
// `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) {
start_epoch = end_epoch.saturating_sub(max_block_packing_backfill)
}
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) {
start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING)
}
// The `block_packing` API cannot accept `start_epoch == 0`.
if start_epoch == 0 {
start_epoch += 1
}
if let Some(highest_block_slot) =
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
{
let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?;
// Only insert blocks with corresponding `beacon_block`s.
packing.retain(|packing| {
packing.slot.as_slot() >= lowest_block_slot
&& packing.slot.as_slot() <= highest_block_slot
});
database::insert_batch_block_packing(&mut conn, packing)?;
} else {
return Err(Error::Database(DbError::Other(
"Database did not return a lowest block when one exists".to_string(),
)));
}
} else {
// There are no blocks in the `beacon_blocks` database, but there are entries in the
// `block_packing` table. This is a critical failure. It usually means someone has
// manually tampered with the database tables and should not occur during normal
// operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
Ok(())
}
}

View File

@ -0,0 +1,137 @@
use crate::database::{
schema::{beacon_blocks, block_rewards},
watch_types::{WatchHash, WatchSlot},
Error, PgConn, MAX_SIZE_BATCH_INSERT,
};
use diesel::prelude::*;
use diesel::{Insertable, Queryable};
use log::debug;
use serde::{Deserialize, Serialize};
use std::time::Instant;
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = block_rewards)]
pub struct WatchBlockRewards {
pub slot: WatchSlot,
pub total: i32,
pub attestation_reward: i32,
pub sync_committee_reward: i32,
}
/// Insert a batch of values into the `block_rewards` table.
///
/// On a conflict, it will do nothing, leaving the old value.
pub fn insert_batch_block_rewards(
conn: &mut PgConn,
rewards: Vec<WatchBlockRewards>,
) -> Result<(), Error> {
use self::block_rewards::dsl::*;
let mut count = 0;
let timer = Instant::now();
for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) {
count += diesel::insert_into(block_rewards)
.values(chunk)
.on_conflict_do_nothing()
.execute(conn)?;
}
let time_taken = timer.elapsed();
debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}");
Ok(())
}
/// Selects the row from the `block_rewards` table where `slot` is minimum.
pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result<Option<WatchBlockRewards>, Error> {
use self::block_rewards::dsl::*;
let timer = Instant::now();
let result = block_rewards
.order_by(slot.asc())
.limit(1)
.first::<WatchBlockRewards>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block rewards requested: lowest, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects the row from the `block_rewards` table where `slot` is maximum.
pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result<Option<WatchBlockRewards>, Error> {
use self::block_rewards::dsl::*;
let timer = Instant::now();
let result = block_rewards
.order_by(slot.desc())
.limit(1)
.first::<WatchBlockRewards>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block rewards requested: highest, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`.
pub fn get_block_rewards_by_root(
conn: &mut PgConn,
root_query: WatchHash,
) -> Result<Option<WatchBlockRewards>, Error> {
use self::beacon_blocks::dsl::{beacon_blocks, root};
use self::block_rewards::dsl::*;
let timer = Instant::now();
let join = beacon_blocks.inner_join(block_rewards);
let result = join
.select((slot, total, attestation_reward, sync_committee_reward))
.filter(root.eq(root_query))
.first::<WatchBlockRewards>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`.
pub fn get_block_rewards_by_slot(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<Option<WatchBlockRewards>, Error> {
use self::block_rewards::dsl::*;
let timer = Instant::now();
let result = block_rewards
.filter(slot.eq(slot_query))
.first::<WatchBlockRewards>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding
/// row in `block_rewards`.
#[allow(dead_code)]
pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result<Vec<Option<WatchSlot>>, Error> {
use self::beacon_blocks::dsl::{beacon_blocks, root, slot};
use self::block_rewards::dsl::block_rewards;
let join = beacon_blocks.left_join(block_rewards);
let result = join
.select(slot)
.filter(root.is_null())
// Block rewards cannot be retrieved for `slot == 0` so we need to exclude it.
.filter(slot.ne(0))
.order_by(slot.desc())
.nullable()
.load::<Option<WatchSlot>>(conn)?;
Ok(result)
}

View File

@ -0,0 +1,38 @@
pub mod database;
mod server;
mod updater;
use crate::database::watch_types::WatchSlot;
use crate::updater::error::Error;
pub use database::{
get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards,
get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards,
WatchBlockRewards,
};
pub use server::block_rewards_routes;
use eth2::BeaconNodeHttpClient;
use types::Slot;
/// Sends a request to `lighthouse/analysis/block_rewards`.
/// Formats the response into a vector of `WatchBlockRewards`.
///
/// Will fail if `start_slot == 0`.
pub async fn get_block_rewards(
bn: &BeaconNodeHttpClient,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<WatchBlockRewards>, Error> {
Ok(bn
.get_lighthouse_analysis_block_rewards(start_slot, end_slot)
.await?
.into_iter()
.map(|data| WatchBlockRewards {
slot: WatchSlot::from_slot(data.meta.slot),
total: data.total as i32,
attestation_reward: data.attestation_rewards.total as i32,
sync_committee_reward: data.sync_committee_rewards as i32,
})
.collect())
}

View File

@ -0,0 +1,31 @@
use crate::block_rewards::database::{
get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards,
};
use crate::database::{get_connection, PgPool, WatchHash, WatchSlot};
use crate::server::Error;
use axum::{extract::Path, routing::get, Extension, Json, Router};
use eth2::types::BlockId;
use std::str::FromStr;
pub async fn get_block_rewards(
Path(block_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBlockRewards>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
BlockId::Root(root) => Ok(Json(get_block_rewards_by_root(
&mut conn,
WatchHash::from_hash(root),
)?)),
BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot(
&mut conn,
WatchSlot::from_slot(slot),
)?)),
_ => Err(Error::BadRequest),
}
}
pub fn block_rewards_routes() -> Router {
Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards))
}

View File

@ -0,0 +1,157 @@
use crate::database::{self, Error as DbError};
use crate::updater::{Error, UpdateHandler};
use crate::block_rewards::get_block_rewards;
use eth2::types::EthSpec;
use log::{debug, error, warn};
const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600;
impl<T: EthSpec> UpdateHandler<T> {
/// Forward fills the `block_rewards` table starting from the entry with the
/// highest slot.
///
/// It constructs a request to the `get_block_rewards` API with:
/// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block)
/// `end_slot` -> highest beacon block
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`.
pub async fn fill_block_rewards(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
// Get the slot of the highest entry in the `block_rewards` table.
let highest_filled_slot_opt = if self.config.block_rewards {
database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot)
} else {
return Err(Error::NotEnabled("block_rewards".to_string()));
};
let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt {
highest_filled_slot.as_slot() + 1
} else {
// No entries in the `block_rewards` table. Use `beacon_blocks` instead.
if let Some(lowest_beacon_block) =
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot)
{
lowest_beacon_block.as_slot()
} else {
// There are no blocks in the database, do not fill the `block_rewards` table.
warn!("Refusing to fill block rewards as there are no blocks in the database");
return Ok(());
}
};
// The `block_rewards` API cannot accept `start_slot == 0`.
if start_slot == 0 {
start_slot += 1;
}
if let Some(highest_beacon_block) =
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
{
let mut end_slot = highest_beacon_block.as_slot();
if start_slot > end_slot {
debug!("Block rewards are up to date with the head of the database");
return Ok(());
}
// Ensure the size of the request does not exceed the maximum allowed value.
if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) {
end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS
}
let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?;
database::insert_batch_block_rewards(&mut conn, rewards)?;
} else {
// There are no blocks in the `beacon_blocks` database, but there are entries in the
// `block_rewards` table. This is a critical failure. It usually means someone has
// manually tampered with the database tables and should not occur during normal
// operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
Ok(())
}
/// Backfill the `block_rewards` tables starting from the entry with the
/// lowest slot.
///
/// It constructs a request to the `get_block_rewards` API with:
/// `start_slot` -> lowest_beacon_block
/// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block)
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`.
pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch;
// Get the slot of the lowest entry in the `block_rewards` table.
let lowest_filled_slot_opt = if self.config.block_rewards {
database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot)
} else {
return Err(Error::NotEnabled("block_rewards".to_string()));
};
let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt {
lowest_filled_slot.as_slot().saturating_sub(1_u64)
} else {
// No entries in the `block_rewards` table. Use `beacon_blocks` instead.
if let Some(highest_beacon_block) =
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
{
highest_beacon_block.as_slot()
} else {
// There are no blocks in the database, do not backfill the `block_rewards` table.
warn!("Refusing to backfill block rewards as there are no blocks in the database");
return Ok(());
}
};
if end_slot <= 1 {
debug!("Block rewards backfill is complete");
return Ok(());
}
if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? {
let mut start_slot = lowest_block_slot.slot.as_slot();
if start_slot >= end_slot {
debug!("Block rewards are up to date with the base of the database");
return Ok(());
}
// Ensure that the request range does not exceed `max_block_reward_backfill` or
// `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`.
if start_slot < end_slot.saturating_sub(max_block_reward_backfill) {
start_slot = end_slot.saturating_sub(max_block_reward_backfill)
}
if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) {
start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS)
}
// The `block_rewards` API cannot accept `start_slot == 0`.
if start_slot == 0 {
start_slot += 1
}
let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?;
if self.config.block_rewards {
database::insert_batch_block_rewards(&mut conn, rewards)?;
}
} else {
// There are no blocks in the `beacon_blocks` database, but there are entries in the
// `block_rewards` table. This is a critical failure. It usually means someone has
// manually tampered with the database tables and should not occur during normal
// operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
Ok(())
}
}

View File

@ -0,0 +1,40 @@
use serde::{Deserialize, Serialize};
pub const fn enabled() -> bool {
false
}
pub const fn url() -> Option<String> {
None
}
pub const fn username() -> Option<String> {
None
}
pub const fn password() -> Option<String> {
None
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
#[serde(default = "enabled")]
pub enabled: bool,
#[serde(default = "url")]
pub url: Option<String>,
#[serde(default = "username")]
pub username: Option<String>,
#[serde(default = "password")]
pub password: Option<String>,
}
impl Default for Config {
fn default() -> Self {
Config {
enabled: enabled(),
url: url(),
username: username(),
password: password(),
}
}
}

View File

@ -0,0 +1,224 @@
use crate::database::{
self,
schema::{beacon_blocks, blockprint},
watch_types::{WatchHash, WatchSlot},
Error, PgConn, MAX_SIZE_BATCH_INSERT,
};
use diesel::prelude::*;
use diesel::sql_types::{Integer, Text};
use diesel::{Insertable, Queryable};
use log::debug;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Instant;
type WatchConsensusClient = String;
pub fn list_consensus_clients() -> Vec<WatchConsensusClient> {
vec![
"Lighthouse".to_string(),
"Lodestar".to_string(),
"Nimbus".to_string(),
"Prysm".to_string(),
"Teku".to_string(),
"Unknown".to_string(),
]
}
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = blockprint)]
pub struct WatchBlockprint {
pub slot: WatchSlot,
pub best_guess: WatchConsensusClient,
}
#[derive(Debug, QueryableByName, diesel::FromSqlRow)]
pub struct WatchValidatorBlockprint {
#[diesel(sql_type = Integer)]
pub proposer_index: i32,
#[diesel(sql_type = Text)]
pub best_guess: WatchConsensusClient,
#[diesel(sql_type = Integer)]
pub slot: WatchSlot,
}
/// Insert a batch of values into the `blockprint` table.
///
/// On a conflict, it will do nothing, leaving the old value.
pub fn insert_batch_blockprint(
conn: &mut PgConn,
prints: Vec<WatchBlockprint>,
) -> Result<(), Error> {
use self::blockprint::dsl::*;
let mut count = 0;
let timer = Instant::now();
for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) {
count += diesel::insert_into(blockprint)
.values(chunk)
.on_conflict_do_nothing()
.execute(conn)?;
}
let time_taken = timer.elapsed();
debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}");
Ok(())
}
/// Selects the row from the `blockprint` table where `slot` is minimum.
pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result<Option<WatchBlockprint>, Error> {
use self::blockprint::dsl::*;
let timer = Instant::now();
let result = blockprint
.order_by(slot.asc())
.limit(1)
.first::<WatchBlockprint>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Blockprint requested: lowest, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects the row from the `blockprint` table where `slot` is maximum.
pub fn get_highest_blockprint(conn: &mut PgConn) -> Result<Option<WatchBlockprint>, Error> {
use self::blockprint::dsl::*;
let timer = Instant::now();
let result = blockprint
.order_by(slot.desc())
.limit(1)
.first::<WatchBlockprint>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Blockprint requested: highest, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `blockprint` table corresponding to a given `root_query`.
pub fn get_blockprint_by_root(
conn: &mut PgConn,
root_query: WatchHash,
) -> Result<Option<WatchBlockprint>, Error> {
use self::beacon_blocks::dsl::{beacon_blocks, root};
use self::blockprint::dsl::*;
let timer = Instant::now();
let join = beacon_blocks.inner_join(blockprint);
let result = join
.select((slot, best_guess))
.filter(root.eq(root_query))
.first::<WatchBlockprint>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`.
pub fn get_blockprint_by_slot(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<Option<WatchBlockprint>, Error> {
use self::blockprint::dsl::*;
let timer = Instant::now();
let result = blockprint
.filter(slot.eq(slot_query))
.first::<WatchBlockprint>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding
/// row in `blockprint`.
#[allow(dead_code)]
pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result<Vec<Option<WatchSlot>>, Error> {
use self::beacon_blocks::dsl::{beacon_blocks, root, slot};
use self::blockprint::dsl::blockprint;
let join = beacon_blocks.left_join(blockprint);
let result = join
.select(slot)
.filter(root.is_null())
.order_by(slot.desc())
.nullable()
.load::<Option<WatchSlot>>(conn)?;
Ok(result)
}
/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before
/// `target_slot`.
/// Inserts `"Unknown" if no prior proposals exist.
pub fn construct_validator_blockprints_at_slot(
conn: &mut PgConn,
target_slot: WatchSlot,
slots_per_epoch: u64,
) -> Result<HashMap<i32, WatchConsensusClient>, Error> {
use self::blockprint::dsl::{blockprint, slot};
let total_validators =
database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)?
as usize;
let mut blockprint_map = HashMap::with_capacity(total_validators);
let latest_proposals =
database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?;
let latest_proposal_slots: Vec<WatchSlot> = latest_proposals.clone().into_keys().collect();
let result = blockprint
.filter(slot.eq_any(latest_proposal_slots))
.load::<WatchBlockprint>(conn)?;
// Insert the validators which have available blockprints.
for print in result {
if let Some(proposer) = latest_proposals.get(&print.slot) {
blockprint_map.insert(*proposer, print.best_guess);
}
}
// Insert the rest of the unknown validators.
for validator_index in 0..total_validators {
blockprint_map
.entry(validator_index as i32)
.or_insert_with(|| "Unknown".to_string());
}
Ok(blockprint_map)
}
/// Counts the number of occurances of each `client` present in the `validators` table at or before some
/// `target_slot`.
pub fn get_validators_clients_at_slot(
conn: &mut PgConn,
target_slot: WatchSlot,
slots_per_epoch: u64,
) -> Result<HashMap<WatchConsensusClient, usize>, Error> {
let mut client_map: HashMap<WatchConsensusClient, usize> = HashMap::new();
// This includes all validators which were activated at or before `target_slot`.
let validator_blockprints =
construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?;
for client in list_consensus_clients() {
let count = validator_blockprints
.iter()
.filter(|(_, v)| (*v).clone() == client)
.count();
client_map.insert(client, count);
}
Ok(client_map)
}

149
watch/src/blockprint/mod.rs Normal file
View File

@ -0,0 +1,149 @@
pub mod database;
pub mod server;
pub mod updater;
mod config;
use crate::database::WatchSlot;
use eth2::SensitiveUrl;
use reqwest::{Client, Response, Url};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
use types::Slot;
pub use config::Config;
pub use database::{
get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint,
get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint,
list_consensus_clients, WatchBlockprint,
};
pub use server::blockprint_routes;
const TIMEOUT: Duration = Duration::from_secs(50);
#[derive(Debug)]
pub enum Error {
Reqwest(reqwest::Error),
Url(url::ParseError),
BlockprintNotSynced,
Other(String),
}
impl From<reqwest::Error> for Error {
fn from(e: reqwest::Error) -> Self {
Error::Reqwest(e)
}
}
impl From<url::ParseError> for Error {
fn from(e: url::ParseError) -> Self {
Error::Url(e)
}
}
pub struct WatchBlockprintClient {
pub client: Client,
pub server: SensitiveUrl,
pub username: Option<String>,
pub password: Option<String>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct BlockprintSyncingResponse {
pub greatest_block_slot: Slot,
pub synced: bool,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct BlockprintResponse {
pub proposer_index: i32,
pub slot: Slot,
pub best_guess_single: String,
}
impl WatchBlockprintClient {
async fn get(&self, url: Url) -> Result<Response, Error> {
let mut builder = self.client.get(url).timeout(TIMEOUT);
if let Some(username) = &self.username {
builder = builder.basic_auth(username, self.password.as_ref());
}
let response = builder.send().await.map_err(Error::Reqwest)?;
if !response.status().is_success() {
return Err(Error::Other(response.text().await?));
}
Ok(response)
}
// Returns the `greatest_block_slot` as reported by the Blockprint server.
// Will error if the Blockprint server is not synced.
#[allow(dead_code)]
pub async fn ensure_synced(&self) -> Result<Slot, Error> {
let url = self.server.full.join("sync/")?.join("status")?;
let response = self.get(url).await?;
let result = response.json::<BlockprintSyncingResponse>().await?;
if !result.synced {
return Err(Error::BlockprintNotSynced);
}
Ok(result.greatest_block_slot)
}
// Pulls the latest blockprint for all validators.
#[allow(dead_code)]
pub async fn blockprint_all_validators(
&self,
highest_validator: i32,
) -> Result<HashMap<i32, String>, Error> {
let url = self
.server
.full
.join("validator/")?
.join("blocks/")?
.join("latest")?;
let response = self.get(url).await?;
let mut result = response.json::<Vec<BlockprintResponse>>().await?;
result.retain(|print| print.proposer_index <= highest_validator);
let mut map: HashMap<i32, String> = HashMap::with_capacity(result.len());
for print in result {
map.insert(print.proposer_index, print.best_guess_single);
}
Ok(map)
}
// Construct a request to the Blockprint server for a range of slots between `start_slot` and
// `end_slot`.
pub async fn get_blockprint(
&self,
start_slot: Slot,
end_slot: Slot,
) -> Result<Vec<WatchBlockprint>, Error> {
let url = self
.server
.full
.join("blocks/")?
.join(&format!("{start_slot}/{end_slot}"))?;
let response = self.get(url).await?;
let result = response
.json::<Vec<BlockprintResponse>>()
.await?
.iter()
.map(|response| WatchBlockprint {
slot: WatchSlot::from_slot(response.slot),
best_guess: response.best_guess_single.clone(),
})
.collect();
Ok(result)
}
}

View File

@ -0,0 +1,31 @@
use crate::blockprint::database::{
get_blockprint_by_root, get_blockprint_by_slot, WatchBlockprint,
};
use crate::database::{get_connection, PgPool, WatchHash, WatchSlot};
use crate::server::Error;
use axum::{extract::Path, routing::get, Extension, Json, Router};
use eth2::types::BlockId;
use std::str::FromStr;
pub async fn get_blockprint(
Path(block_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBlockprint>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
BlockId::Root(root) => Ok(Json(get_blockprint_by_root(
&mut conn,
WatchHash::from_hash(root),
)?)),
BlockId::Slot(slot) => Ok(Json(get_blockprint_by_slot(
&mut conn,
WatchSlot::from_slot(slot),
)?)),
_ => Err(Error::BadRequest),
}
}
pub fn blockprint_routes() -> Router {
Router::new().route("/v1/blocks/:block/blockprint", get(get_blockprint))
}

View File

@ -0,0 +1,172 @@
use crate::database::{self, Error as DbError};
use crate::updater::{Error, UpdateHandler};
use eth2::types::EthSpec;
use log::{debug, error, warn};
const MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT: u64 = 1600;
impl<T: EthSpec> UpdateHandler<T> {
/// Forward fills the `blockprint` table starting from the entry with the
/// highest slot.
///
/// It constructs a request to the `get_blockprint` API with:
/// `start_slot` -> highest filled `blockprint` + 1 (or lowest beacon block)
/// `end_slot` -> highest beacon block
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`.
pub async fn fill_blockprint(&mut self) -> Result<(), Error> {
// Ensure blockprint in enabled.
if let Some(blockprint_client) = &self.blockprint {
let mut conn = database::get_connection(&self.pool)?;
// Get the slot of the highest entry in the `blockprint` table.
let mut start_slot = if let Some(highest_filled_slot) =
database::get_highest_blockprint(&mut conn)?.map(|print| print.slot)
{
highest_filled_slot.as_slot() + 1
} else {
// No entries in the `blockprint` table. Use `beacon_blocks` instead.
if let Some(lowest_beacon_block) =
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot)
{
lowest_beacon_block.as_slot()
} else {
// There are no blocks in the database, do not fill the `blockprint` table.
warn!("Refusing to fill blockprint as there are no blocks in the database");
return Ok(());
}
};
// The `blockprint` API cannot accept `start_slot == 0`.
if start_slot == 0 {
start_slot += 1;
}
if let Some(highest_beacon_block) =
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
{
let mut end_slot = highest_beacon_block.as_slot();
if start_slot > end_slot {
debug!("Blockprint is up to date with the head of the database");
return Ok(());
}
// Ensure the size of the request does not exceed the maximum allowed value.
if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) {
end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT
}
let mut prints = blockprint_client
.get_blockprint(start_slot, end_slot)
.await?;
// Ensure the prints returned from blockprint are for slots which exist in the
// `beacon_blocks` table.
prints.retain(|print| {
database::get_beacon_block_by_slot(&mut conn, print.slot)
.ok()
.flatten()
.is_some()
});
database::insert_batch_blockprint(&mut conn, prints)?;
} else {
// There are no blocks in the `beacon_blocks` database, but there are entries in either
// `blockprint` table. This is a critical failure. It usually means
// someone has manually tampered with the database tables and should not occur during
// normal operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
}
Ok(())
}
/// Backfill the `blockprint` table starting from the entry with the lowest slot.
///
/// It constructs a request to the `get_blockprint` API with:
/// `start_slot` -> lowest_beacon_block
/// `end_slot` -> lowest filled `blockprint` - 1 (or highest beacon block)
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`.
pub async fn backfill_blockprint(&mut self) -> Result<(), Error> {
// Ensure blockprint in enabled.
if let Some(blockprint_client) = &self.blockprint {
let mut conn = database::get_connection(&self.pool)?;
let max_blockprint_backfill =
self.config.max_backfill_size_epochs * self.slots_per_epoch;
// Get the slot of the lowest entry in the `blockprint` table.
let end_slot = if let Some(lowest_filled_slot) =
database::get_lowest_blockprint(&mut conn)?.map(|print| print.slot)
{
lowest_filled_slot.as_slot().saturating_sub(1_u64)
} else {
// No entries in the `blockprint` table. Use `beacon_blocks` instead.
if let Some(highest_beacon_block) =
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
{
highest_beacon_block.as_slot()
} else {
// There are no blocks in the database, do not backfill the `blockprint` table.
warn!("Refusing to backfill blockprint as there are no blocks in the database");
return Ok(());
}
};
if end_slot <= 1 {
debug!("Blockprint backfill is complete");
return Ok(());
}
if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? {
let mut start_slot = lowest_block_slot.slot.as_slot();
if start_slot >= end_slot {
debug!("Blockprint are up to date with the base of the database");
return Ok(());
}
// Ensure that the request range does not exceed `max_blockprint_backfill` or
// `MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT`.
if start_slot < end_slot.saturating_sub(max_blockprint_backfill) {
start_slot = end_slot.saturating_sub(max_blockprint_backfill)
}
if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT) {
start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCKPRINT)
}
// The `blockprint` API cannot accept `start_slot == 0`.
if start_slot == 0 {
start_slot += 1
}
let mut prints = blockprint_client
.get_blockprint(start_slot, end_slot)
.await?;
// Ensure the prints returned from blockprint are for slots which exist in the
// `beacon_blocks` table.
prints.retain(|print| {
database::get_beacon_block_by_slot(&mut conn, print.slot)
.ok()
.flatten()
.is_some()
});
database::insert_batch_blockprint(&mut conn, prints)?;
} else {
// There are no blocks in the `beacon_blocks` database, but there are entries in the `blockprint`
// table. This is a critical failure. It usually means someone has manually tampered with the
// database tables and should not occur during normal operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
}
Ok(())
}
}

55
watch/src/cli.rs Normal file
View File

@ -0,0 +1,55 @@
use crate::{config::Config, logger, server, updater};
use clap::{App, Arg};
use tokio::sync::oneshot;
pub const SERVE: &str = "serve";
pub const RUN_UPDATER: &str = "run-updater";
pub const CONFIG: &str = "config";
fn run_updater<'a, 'b>() -> App<'a, 'b> {
App::new(RUN_UPDATER).setting(clap::AppSettings::ColoredHelp)
}
fn serve<'a, 'b>() -> App<'a, 'b> {
App::new(SERVE).setting(clap::AppSettings::ColoredHelp)
}
pub fn app<'a, 'b>() -> App<'a, 'b> {
App::new("beacon_watch_daemon")
.author("Sigma Prime <contact@sigmaprime.io>")
.setting(clap::AppSettings::ColoredHelp)
.arg(
Arg::with_name(CONFIG)
.long(CONFIG)
.value_name("PATH_TO_CONFIG")
.help("Path to configuration file")
.takes_value(true)
.global(true),
)
.subcommand(run_updater())
.subcommand(serve())
}
pub async fn run() -> Result<(), String> {
let matches = app().get_matches();
let config = match matches.value_of(CONFIG) {
Some(path) => Config::load_from_file(path.to_string())?,
None => Config::default(),
};
logger::init_logger(&config.log_level);
match matches.subcommand() {
(RUN_UPDATER, Some(_)) => updater::run_updater(config)
.await
.map_err(|e| format!("Failure: {:?}", e)),
(SERVE, Some(_)) => {
let (_shutdown_tx, shutdown_rx) = oneshot::channel();
server::serve(config, shutdown_rx)
.await
.map_err(|e| format!("Failure: {:?}", e))
}
_ => Err("Unsupported subcommand. See --help".into()),
}
}

178
watch/src/client.rs Normal file
View File

@ -0,0 +1,178 @@
use crate::block_packing::WatchBlockPacking;
use crate::block_rewards::WatchBlockRewards;
use crate::database::models::{
WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator,
};
use crate::suboptimal_attestations::WatchAttestation;
use eth2::types::BlockId;
use reqwest::Client;
use serde::de::DeserializeOwned;
use types::Hash256;
use url::Url;
#[derive(Debug)]
pub enum Error {
Reqwest(reqwest::Error),
Url(url::ParseError),
}
impl From<reqwest::Error> for Error {
fn from(e: reqwest::Error) -> Self {
Error::Reqwest(e)
}
}
impl From<url::ParseError> for Error {
fn from(e: url::ParseError) -> Self {
Error::Url(e)
}
}
pub struct WatchHttpClient {
pub client: Client,
pub server: Url,
}
impl WatchHttpClient {
async fn get_opt<T: DeserializeOwned>(&self, url: Url) -> Result<Option<T>, Error> {
let response = self.client.get(url).send().await?;
if response.status() == 404 {
Ok(None)
} else {
response
.error_for_status()?
.json()
.await
.map_err(Into::into)
}
}
pub async fn get_beacon_blocks(
&self,
block_id: BlockId,
) -> Result<Option<WatchBeaconBlock>, Error> {
let url = self
.server
.join("v1/")?
.join("blocks/")?
.join(&block_id.to_string())?;
self.get_opt(url).await
}
pub async fn get_lowest_canonical_slot(&self) -> Result<Option<WatchCanonicalSlot>, Error> {
let url = self.server.join("v1/")?.join("slots/")?.join("lowest")?;
self.get_opt(url).await
}
pub async fn get_highest_canonical_slot(&self) -> Result<Option<WatchCanonicalSlot>, Error> {
let url = self.server.join("v1/")?.join("slots/")?.join("highest")?;
self.get_opt(url).await
}
pub async fn get_lowest_beacon_block(&self) -> Result<Option<WatchBeaconBlock>, Error> {
let url = self.server.join("v1/")?.join("blocks/")?.join("lowest")?;
self.get_opt(url).await
}
pub async fn get_highest_beacon_block(&self) -> Result<Option<WatchBeaconBlock>, Error> {
let url = self.server.join("v1/")?.join("blocks/")?.join("highest")?;
self.get_opt(url).await
}
pub async fn get_next_beacon_block(
&self,
parent: Hash256,
) -> Result<Option<WatchBeaconBlock>, Error> {
let url = self
.server
.join("v1/")?
.join("blocks/")?
.join(&format!("{parent:?}/"))?
.join("next")?;
self.get_opt(url).await
}
pub async fn get_validator_by_index(
&self,
index: i32,
) -> Result<Option<WatchValidator>, Error> {
let url = self
.server
.join("v1/")?
.join("validators/")?
.join(&format!("{index}"))?;
self.get_opt(url).await
}
pub async fn get_proposer_info(
&self,
block_id: BlockId,
) -> Result<Option<WatchProposerInfo>, Error> {
let url = self
.server
.join("v1/")?
.join("blocks/")?
.join(&format!("{block_id}/"))?
.join("proposer")?;
self.get_opt(url).await
}
pub async fn get_block_reward(
&self,
block_id: BlockId,
) -> Result<Option<WatchBlockRewards>, Error> {
let url = self
.server
.join("v1/")?
.join("blocks/")?
.join(&format!("{block_id}/"))?
.join("rewards")?;
self.get_opt(url).await
}
pub async fn get_block_packing(
&self,
block_id: BlockId,
) -> Result<Option<WatchBlockPacking>, Error> {
let url = self
.server
.join("v1/")?
.join("blocks/")?
.join(&format!("{block_id}/"))?
.join("packing")?;
self.get_opt(url).await
}
pub async fn get_all_validators(&self) -> Result<Option<Vec<WatchValidator>>, Error> {
let url = self.server.join("v1/")?.join("validators/")?.join("all")?;
self.get_opt(url).await
}
pub async fn get_attestations(
&self,
epoch: i32,
) -> Result<Option<Vec<WatchAttestation>>, Error> {
let url = self
.server
.join("v1/")?
.join("validators/")?
.join("all/")?
.join("attestation/")?
.join(&format!("{epoch}"))?;
self.get_opt(url).await
}
}

50
watch/src/config.rs Normal file
View File

@ -0,0 +1,50 @@
use crate::blockprint::Config as BlockprintConfig;
use crate::database::Config as DatabaseConfig;
use crate::server::Config as ServerConfig;
use crate::updater::Config as UpdaterConfig;
use serde::{Deserialize, Serialize};
use std::fs::File;
pub const LOG_LEVEL: &str = "debug";
fn log_level() -> String {
LOG_LEVEL.to_string()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
#[serde(default)]
pub blockprint: BlockprintConfig,
#[serde(default)]
pub database: DatabaseConfig,
#[serde(default)]
pub server: ServerConfig,
#[serde(default)]
pub updater: UpdaterConfig,
/// The minimum severity for log messages.
#[serde(default = "log_level")]
pub log_level: String,
}
impl Default for Config {
fn default() -> Self {
Self {
blockprint: BlockprintConfig::default(),
database: DatabaseConfig::default(),
server: ServerConfig::default(),
updater: UpdaterConfig::default(),
log_level: log_level(),
}
}
}
impl Config {
pub fn load_from_file(path_to_file: String) -> Result<Config, String> {
let file =
File::open(path_to_file).map_err(|e| format!("Error reading config file: {:?}", e))?;
let config: Config = serde_yaml::from_reader(file)
.map_err(|e| format!("Error parsing config file: {:?}", e))?;
Ok(config)
}
}

View File

@ -0,0 +1,49 @@
//! Implementations of PostgreSQL compatibility traits.
use crate::database::watch_types::{WatchHash, WatchPK, WatchSlot};
use diesel::deserialize::{self, FromSql};
use diesel::pg::{Pg, PgValue};
use diesel::serialize::{self, Output, ToSql};
use diesel::sql_types::{Binary, Integer};
use std::convert::TryFrom;
macro_rules! impl_to_from_sql_int {
($type:ty) => {
impl ToSql<Integer, Pg> for $type
where
i32: ToSql<Integer, Pg>,
{
fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result {
let v = i32::try_from(self.as_u64()).map_err(|e| Box::new(e))?;
<i32 as ToSql<Integer, Pg>>::to_sql(&v, &mut out.reborrow())
}
}
impl FromSql<Integer, Pg> for $type {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
Ok(Self::new(i32::from_sql(bytes)? as u64))
}
}
};
}
macro_rules! impl_to_from_sql_binary {
($type:ty) => {
impl ToSql<Binary, Pg> for $type {
fn to_sql<'a>(&'a self, out: &mut Output<'a, '_, Pg>) -> serialize::Result {
let b = self.as_bytes();
<&[u8] as ToSql<Binary, Pg>>::to_sql(&b, &mut out.reborrow())
}
}
impl FromSql<Binary, Pg> for $type {
fn from_sql(bytes: PgValue<'_>) -> deserialize::Result<Self> {
Self::from_bytes(bytes.as_bytes()).map_err(|e| e.to_string().into())
}
}
};
}
impl_to_from_sql_int!(WatchSlot);
impl_to_from_sql_binary!(WatchHash);
impl_to_from_sql_binary!(WatchPK);

View File

@ -0,0 +1,74 @@
use serde::{Deserialize, Serialize};
pub const USER: &str = "postgres";
pub const PASSWORD: &str = "postgres";
pub const DBNAME: &str = "dev";
pub const DEFAULT_DBNAME: &str = "postgres";
pub const HOST: &str = "localhost";
pub const fn port() -> u16 {
5432
}
pub const fn connect_timeout_millis() -> u64 {
2_000 // 2s
}
fn user() -> String {
USER.to_string()
}
fn password() -> String {
PASSWORD.to_string()
}
fn dbname() -> String {
DBNAME.to_string()
}
fn default_dbname() -> String {
DEFAULT_DBNAME.to_string()
}
fn host() -> String {
HOST.to_string()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
#[serde(default = "user")]
pub user: String,
#[serde(default = "password")]
pub password: String,
#[serde(default = "dbname")]
pub dbname: String,
#[serde(default = "default_dbname")]
pub default_dbname: String,
#[serde(default = "host")]
pub host: String,
#[serde(default = "port")]
pub port: u16,
#[serde(default = "connect_timeout_millis")]
pub connect_timeout_millis: u64,
}
impl Default for Config {
fn default() -> Self {
Self {
user: user(),
password: password(),
dbname: dbname(),
default_dbname: default_dbname(),
host: host(),
port: port(),
connect_timeout_millis: connect_timeout_millis(),
}
}
}
impl Config {
pub fn build_database_url(&self) -> String {
format!(
"postgres://{}:{}@{}:{}/{}",
self.user, self.password, self.host, self.port, self.dbname
)
}
}

View File

@ -0,0 +1,55 @@
use bls::Error as BlsError;
use diesel::result::{ConnectionError, Error as PgError};
use eth2::SensitiveError;
use r2d2::Error as PoolError;
use std::fmt;
use types::BeaconStateError;
#[derive(Debug)]
pub enum Error {
BeaconState(BeaconStateError),
Database(PgError),
DatabaseCorrupted,
InvalidSig(BlsError),
PostgresConnection(ConnectionError),
Pool(PoolError),
SensitiveUrl(SensitiveError),
InvalidRoot,
Other(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<BeaconStateError> for Error {
fn from(e: BeaconStateError) -> Self {
Error::BeaconState(e)
}
}
impl From<ConnectionError> for Error {
fn from(e: ConnectionError) -> Self {
Error::PostgresConnection(e)
}
}
impl From<PgError> for Error {
fn from(e: PgError) -> Self {
Error::Database(e)
}
}
impl From<PoolError> for Error {
fn from(e: PoolError) -> Self {
Error::Pool(e)
}
}
impl From<BlsError> for Error {
fn from(e: BlsError) -> Self {
Error::InvalidSig(e)
}
}

782
watch/src/database/mod.rs Normal file
View File

@ -0,0 +1,782 @@
mod config;
mod error;
pub mod compat;
pub mod models;
pub mod schema;
pub mod utils;
pub mod watch_types;
use self::schema::{
active_config, beacon_blocks, canonical_slots, proposer_info, suboptimal_attestations,
validators,
};
use diesel::dsl::max;
use diesel::pg::PgConnection;
use diesel::prelude::*;
use diesel::r2d2::{Builder, ConnectionManager, Pool, PooledConnection};
use diesel::upsert::excluded;
use log::{debug, info};
use std::collections::HashMap;
use std::time::Instant;
use types::{EthSpec, SignedBeaconBlock};
pub use self::error::Error;
pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator};
pub use self::watch_types::{WatchHash, WatchPK, WatchSlot};
pub use crate::block_rewards::{
get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards,
get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards,
WatchBlockRewards,
};
pub use crate::block_packing::{
get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing,
get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing,
WatchBlockPacking,
};
pub use crate::suboptimal_attestations::{
get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey,
get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations,
WatchAttestation, WatchSuboptimalAttestation,
};
pub use crate::blockprint::{
get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint,
get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint,
WatchBlockprint,
};
pub use config::Config;
/// Batch inserts cannot exceed a certain size.
/// See https://github.com/diesel-rs/diesel/issues/2414.
/// For some reason, this seems to translate to 65535 / 5 (13107) records.
pub const MAX_SIZE_BATCH_INSERT: usize = 13107;
pub type PgPool = Pool<ConnectionManager<PgConnection>>;
pub type PgConn = PooledConnection<ConnectionManager<PgConnection>>;
/// Connect to a Postgresql database and build a connection pool.
pub fn build_connection_pool(config: &Config) -> Result<PgPool, Error> {
let database_url = config.clone().build_database_url();
info!("Building connection pool at: {database_url}");
let pg = ConnectionManager::<PgConnection>::new(&database_url);
Builder::new().build(pg).map_err(Error::Pool)
}
/// Retrieve an idle connection from the pool.
pub fn get_connection(pool: &PgPool) -> Result<PgConn, Error> {
pool.get().map_err(Error::Pool)
}
/// Insert the active config into the database. This is used to check if the connected beacon node
/// is compatible with the database. These values will not change (except
/// `current_blockprint_checkpoint`).
pub fn insert_active_config(
conn: &mut PgConn,
new_config_name: String,
new_slots_per_epoch: u64,
) -> Result<(), Error> {
use self::active_config::dsl::*;
diesel::insert_into(active_config)
.values(&vec![(
id.eq(1),
config_name.eq(new_config_name),
slots_per_epoch.eq(new_slots_per_epoch as i32),
)])
.on_conflict_do_nothing()
.execute(conn)?;
Ok(())
}
/// Get the active config from the database.
pub fn get_active_config(conn: &mut PgConn) -> Result<Option<(String, i32)>, Error> {
use self::active_config::dsl::*;
Ok(active_config
.select((config_name, slots_per_epoch))
.filter(id.eq(1))
.first::<(String, i32)>(conn)
.optional()?)
}
///
/// INSERT statements
///
/// Inserts a single row into the `canonical_slots` table.
/// If `new_slot.beacon_block` is `None`, the value in the row will be `null`.
///
/// On a conflict, it will do nothing, leaving the old value.
pub fn insert_canonical_slot(conn: &mut PgConn, new_slot: WatchCanonicalSlot) -> Result<(), Error> {
diesel::insert_into(canonical_slots::table)
.values(&new_slot)
.on_conflict_do_nothing()
.execute(conn)?;
debug!("Canonical slot inserted: {}", new_slot.slot);
Ok(())
}
pub fn insert_beacon_block<T: EthSpec>(
conn: &mut PgConn,
block: SignedBeaconBlock<T>,
root: WatchHash,
) -> Result<(), Error> {
use self::canonical_slots::dsl::{beacon_block, slot as canonical_slot};
let block_message = block.message();
// Pull out relevant values from the block.
let slot = WatchSlot::from_slot(block.slot());
let parent_root = WatchHash::from_hash(block.parent_root());
let proposer_index = block_message.proposer_index() as i32;
let graffiti = block_message.body().graffiti().as_utf8_lossy();
let attestation_count = block_message.body().attestations().len() as i32;
let full_payload = block_message.execution_payload().ok();
let transaction_count: Option<i32> = if let Some(bellatrix_payload) =
full_payload.and_then(|payload| payload.execution_payload_merge().ok())
{
Some(bellatrix_payload.transactions.len() as i32)
} else {
full_payload
.and_then(|payload| payload.execution_payload_capella().ok())
.map(|payload| payload.transactions.len() as i32)
};
let withdrawal_count: Option<i32> = full_payload
.and_then(|payload| payload.execution_payload_capella().ok())
.map(|payload| payload.withdrawals.len() as i32);
let block_to_add = WatchBeaconBlock {
slot,
root,
parent_root,
attestation_count,
transaction_count,
withdrawal_count,
};
let proposer_info_to_add = WatchProposerInfo {
slot,
proposer_index,
graffiti,
};
// Update the canonical slots table.
diesel::update(canonical_slots::table)
.set(beacon_block.eq(root))
.filter(canonical_slot.eq(slot))
// Do not overwrite the value if it already exists.
.filter(beacon_block.is_null())
.execute(conn)?;
diesel::insert_into(beacon_blocks::table)
.values(block_to_add)
.on_conflict_do_nothing()
.execute(conn)?;
diesel::insert_into(proposer_info::table)
.values(proposer_info_to_add)
.on_conflict_do_nothing()
.execute(conn)?;
debug!("Beacon block inserted at slot: {slot}, root: {root}, parent: {parent_root}");
Ok(())
}
/// Insert a validator into the `validators` table
///
/// On a conflict, it will only overwrite `status`, `activation_epoch` and `exit_epoch`.
pub fn insert_validator(conn: &mut PgConn, validator: WatchValidator) -> Result<(), Error> {
use self::validators::dsl::*;
let new_index = validator.index;
let new_public_key = validator.public_key;
diesel::insert_into(validators)
.values(validator)
.on_conflict(index)
.do_update()
.set((
status.eq(excluded(status)),
activation_epoch.eq(excluded(activation_epoch)),
exit_epoch.eq(excluded(exit_epoch)),
))
.execute(conn)?;
debug!("Validator inserted, index: {new_index}, public_key: {new_public_key}");
Ok(())
}
/// Insert a batch of values into the `validators` table.
///
/// On a conflict, it will do nothing.
///
/// Should not be used when updating validators.
/// Validators should be updated through the `insert_validator` function which contains the correct
/// `on_conflict` clauses.
pub fn insert_batch_validators(
conn: &mut PgConn,
all_validators: Vec<WatchValidator>,
) -> Result<(), Error> {
use self::validators::dsl::*;
let mut count = 0;
for chunk in all_validators.chunks(1000) {
count += diesel::insert_into(validators)
.values(chunk)
.on_conflict_do_nothing()
.execute(conn)?;
}
debug!("Validators inserted, count: {count}");
Ok(())
}
///
/// SELECT statements
///
/// Selects a single row of the `canonical_slots` table corresponding to a given `slot_query`.
pub fn get_canonical_slot(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<Option<WatchCanonicalSlot>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.filter(slot.eq(slot_query))
.first::<WatchCanonicalSlot>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `canonical_slots` table corresponding to a given `root_query`.
/// Only returns the non-skipped slot which matches `root`.
pub fn get_canonical_slot_by_root(
conn: &mut PgConn,
root_query: WatchHash,
) -> Result<Option<WatchCanonicalSlot>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.filter(root.eq(root_query))
.filter(skipped.eq(false))
.first::<WatchCanonicalSlot>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Canonical root requested: {root_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects `root` from a single row of the `canonical_slots` table corresponding to a given
/// `slot_query`.
#[allow(dead_code)]
pub fn get_root_at_slot(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<Option<WatchHash>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.select(root)
.filter(slot.eq(slot_query))
.first::<WatchHash>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Canonical slot requested: {slot_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value
/// of `slot`.
pub fn get_lowest_canonical_slot(conn: &mut PgConn) -> Result<Option<WatchCanonicalSlot>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.order_by(slot.asc())
.limit(1)
.first::<WatchCanonicalSlot>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Canonical slot requested: lowest, time taken: {time_taken:?}");
Ok(result)
}
/// Selects `slot` from the row of the `canonical_slots` table corresponding to the minimum value
/// of `slot` and where `skipped == false`.
pub fn get_lowest_non_skipped_canonical_slot(
conn: &mut PgConn,
) -> Result<Option<WatchCanonicalSlot>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.filter(skipped.eq(false))
.order_by(slot.asc())
.limit(1)
.first::<WatchCanonicalSlot>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Canonical slot requested: lowest_non_skipped, time taken: {time_taken:?})");
Ok(result)
}
/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value
/// of `slot`.
pub fn get_highest_canonical_slot(conn: &mut PgConn) -> Result<Option<WatchCanonicalSlot>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.order_by(slot.desc())
.limit(1)
.first::<WatchCanonicalSlot>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Canonical slot requested: highest, time taken: {time_taken:?}");
Ok(result)
}
/// Select 'slot' from the row of the `canonical_slots` table corresponding to the maximum value
/// of `slot` and where `skipped == false`.
pub fn get_highest_non_skipped_canonical_slot(
conn: &mut PgConn,
) -> Result<Option<WatchCanonicalSlot>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.filter(skipped.eq(false))
.order_by(slot.desc())
.limit(1)
.first::<WatchCanonicalSlot>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Canonical slot requested: highest_non_skipped, time taken: {time_taken:?}");
Ok(result)
}
/// Select all rows of the `canonical_slots` table where `slot >= `start_slot && slot <=
/// `end_slot`.
pub fn get_canonical_slots_by_range(
conn: &mut PgConn,
start_slot: WatchSlot,
end_slot: WatchSlot,
) -> Result<Option<Vec<WatchCanonicalSlot>>, Error> {
use self::canonical_slots::dsl::*;
let timer = Instant::now();
let result = canonical_slots
.filter(slot.ge(start_slot))
.filter(slot.le(end_slot))
.load::<WatchCanonicalSlot>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!(
"Canonical slots by range requested, start_slot: {}, end_slot: {}, time_taken: {:?}",
start_slot.as_u64(),
end_slot.as_u64(),
time_taken
);
Ok(result)
}
/// Selects `root` from all rows of the `canonical_slots` table which have `beacon_block == null`
/// and `skipped == false`
pub fn get_unknown_canonical_blocks(conn: &mut PgConn) -> Result<Vec<WatchHash>, Error> {
use self::canonical_slots::dsl::*;
let result = canonical_slots
.select(root)
.filter(beacon_block.is_null())
.filter(skipped.eq(false))
.order_by(slot.desc())
.load::<WatchHash>(conn)?;
Ok(result)
}
/// Selects the row from the `beacon_blocks` table where `slot` is minimum.
pub fn get_lowest_beacon_block(conn: &mut PgConn) -> Result<Option<WatchBeaconBlock>, Error> {
use self::beacon_blocks::dsl::*;
let timer = Instant::now();
let result = beacon_blocks
.order_by(slot.asc())
.limit(1)
.first::<WatchBeaconBlock>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Beacon block requested: lowest, time taken: {time_taken:?}");
Ok(result)
}
/// Selects the row from the `beacon_blocks` table where `slot` is maximum.
pub fn get_highest_beacon_block(conn: &mut PgConn) -> Result<Option<WatchBeaconBlock>, Error> {
use self::beacon_blocks::dsl::*;
let timer = Instant::now();
let result = beacon_blocks
.order_by(slot.desc())
.limit(1)
.first::<WatchBeaconBlock>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Beacon block requested: highest, time taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row from the `beacon_blocks` table corresponding to a given `root_query`.
pub fn get_beacon_block_by_root(
conn: &mut PgConn,
root_query: WatchHash,
) -> Result<Option<WatchBeaconBlock>, Error> {
use self::beacon_blocks::dsl::*;
let timer = Instant::now();
let result = beacon_blocks
.filter(root.eq(root_query))
.first::<WatchBeaconBlock>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Beacon block requested: {root_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row from the `beacon_blocks` table corresponding to a given `slot_query`.
pub fn get_beacon_block_by_slot(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<Option<WatchBeaconBlock>, Error> {
use self::beacon_blocks::dsl::*;
let timer = Instant::now();
let result = beacon_blocks
.filter(slot.eq(slot_query))
.first::<WatchBeaconBlock>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Beacon block requested: {slot_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects the row from the `beacon_blocks` table where `parent_root` equals the given `parent`.
/// This fetches the next block in the database.
///
/// Will return `Ok(None)` if there are no matching blocks (e.g. the tip of the chain).
pub fn get_beacon_block_with_parent(
conn: &mut PgConn,
parent: WatchHash,
) -> Result<Option<WatchBeaconBlock>, Error> {
use self::beacon_blocks::dsl::*;
let timer = Instant::now();
let result = beacon_blocks
.filter(parent_root.eq(parent))
.first::<WatchBeaconBlock>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Next beacon block requested: {parent}, time taken: {time_taken:?}");
Ok(result)
}
/// Select all rows of the `beacon_blocks` table where `slot >= `start_slot && slot <=
/// `end_slot`.
pub fn get_beacon_blocks_by_range(
conn: &mut PgConn,
start_slot: WatchSlot,
end_slot: WatchSlot,
) -> Result<Option<Vec<WatchBeaconBlock>>, Error> {
use self::beacon_blocks::dsl::*;
let timer = Instant::now();
let result = beacon_blocks
.filter(slot.ge(start_slot))
.filter(slot.le(end_slot))
.load::<WatchBeaconBlock>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Beacon blocks by range requested, start_slot: {start_slot}, end_slot: {end_slot}, time_taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `proposer_info` table corresponding to a given `root_query`.
pub fn get_proposer_info_by_root(
conn: &mut PgConn,
root_query: WatchHash,
) -> Result<Option<WatchProposerInfo>, Error> {
use self::beacon_blocks::dsl::{beacon_blocks, root};
use self::proposer_info::dsl::*;
let timer = Instant::now();
let join = beacon_blocks.inner_join(proposer_info);
let result = join
.select((slot, proposer_index, graffiti))
.filter(root.eq(root_query))
.first::<WatchProposerInfo>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Proposer info requested for block: {root_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`.
pub fn get_proposer_info_by_slot(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<Option<WatchProposerInfo>, Error> {
use self::proposer_info::dsl::*;
let timer = Instant::now();
let result = proposer_info
.filter(slot.eq(slot_query))
.first::<WatchProposerInfo>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Proposer info requested for slot: {slot_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects multiple rows of the `proposer_info` table between `start_slot` and `end_slot`.
/// Selects a single row of the `proposer_info` table corresponding to a given `slot_query`.
#[allow(dead_code)]
pub fn get_proposer_info_by_range(
conn: &mut PgConn,
start_slot: WatchSlot,
end_slot: WatchSlot,
) -> Result<Option<Vec<WatchProposerInfo>>, Error> {
use self::proposer_info::dsl::*;
let timer = Instant::now();
let result = proposer_info
.filter(slot.ge(start_slot))
.filter(slot.le(end_slot))
.load::<WatchProposerInfo>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!(
"Proposer info requested for range: {start_slot} to {end_slot}, time taken: {time_taken:?}"
);
Ok(result)
}
pub fn get_validators_latest_proposer_info(
conn: &mut PgConn,
indices_query: Vec<i32>,
) -> Result<HashMap<i32, WatchProposerInfo>, Error> {
use self::proposer_info::dsl::*;
let proposers = proposer_info
.filter(proposer_index.eq_any(indices_query))
.load::<WatchProposerInfo>(conn)?;
let mut result = HashMap::new();
for proposer in proposers {
result
.entry(proposer.proposer_index)
.or_insert_with(|| proposer.clone());
let entry = result
.get_mut(&proposer.proposer_index)
.ok_or_else(|| Error::Other("An internal error occured".to_string()))?;
if proposer.slot > entry.slot {
entry.slot = proposer.slot
}
}
Ok(result)
}
/// Selects the max(`slot`) and `proposer_index` of each unique index in the
/// `proposer_info` table and returns them formatted as a `HashMap`.
/// Only returns rows which have `slot <= target_slot`.
///
/// Ideally, this would return the full row, but I have not found a way to do that without using
/// a much more expensive SQL query.
pub fn get_all_validators_latest_proposer_info_at_slot(
conn: &mut PgConn,
target_slot: WatchSlot,
) -> Result<HashMap<WatchSlot, i32>, Error> {
use self::proposer_info::dsl::*;
let latest_proposals: Vec<(i32, Option<WatchSlot>)> = proposer_info
.group_by(proposer_index)
.select((proposer_index, max(slot)))
.filter(slot.le(target_slot))
.load::<(i32, Option<WatchSlot>)>(conn)?;
let mut result = HashMap::new();
for proposal in latest_proposals {
if let Some(latest_slot) = proposal.1 {
result.insert(latest_slot, proposal.0);
}
}
Ok(result)
}
/// Selects a single row from the `validators` table corresponding to a given
/// `validator_index_query`.
pub fn get_validator_by_index(
conn: &mut PgConn,
validator_index_query: i32,
) -> Result<Option<WatchValidator>, Error> {
use self::validators::dsl::*;
let timer = Instant::now();
let result = validators
.filter(index.eq(validator_index_query))
.first::<WatchValidator>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Validator requested: {validator_index_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row from the `validators` table corresponding to a given
/// `public_key_query`.
pub fn get_validator_by_public_key(
conn: &mut PgConn,
public_key_query: WatchPK,
) -> Result<Option<WatchValidator>, Error> {
use self::validators::dsl::*;
let timer = Instant::now();
let result = validators
.filter(public_key.eq(public_key_query))
.first::<WatchValidator>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Validator requested: {public_key_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects all rows from the `validators` table which have an `index` contained in
/// the `indices_query`.
#[allow(dead_code)]
pub fn get_validators_by_indices(
conn: &mut PgConn,
indices_query: Vec<i32>,
) -> Result<Vec<WatchValidator>, Error> {
use self::validators::dsl::*;
let timer = Instant::now();
let query_len = indices_query.len();
let result = validators
.filter(index.eq_any(indices_query))
.load::<WatchValidator>(conn)?;
let time_taken = timer.elapsed();
debug!("{query_len} validators requested, time taken: {time_taken:?}");
Ok(result)
}
// Selects all rows from the `validators` table.
pub fn get_all_validators(conn: &mut PgConn) -> Result<Vec<WatchValidator>, Error> {
use self::validators::dsl::*;
let timer = Instant::now();
let result = validators.load::<WatchValidator>(conn)?;
let time_taken = timer.elapsed();
debug!("All validators requested, time taken: {time_taken:?}");
Ok(result)
}
/// Counts the number of rows in the `validators` table.
#[allow(dead_code)]
pub fn count_validators(conn: &mut PgConn) -> Result<i64, Error> {
use self::validators::dsl::*;
validators.count().get_result(conn).map_err(Error::Database)
}
/// Counts the number of rows in the `validators` table where
/// `activation_epoch <= target_slot.epoch()`.
pub fn count_validators_activated_before_slot(
conn: &mut PgConn,
target_slot: WatchSlot,
slots_per_epoch: u64,
) -> Result<i64, Error> {
use self::validators::dsl::*;
let target_epoch = target_slot.epoch(slots_per_epoch);
validators
.count()
.filter(activation_epoch.le(target_epoch.as_u64() as i32))
.get_result(conn)
.map_err(Error::Database)
}
///
/// DELETE statements.
///
/// Deletes all rows of the `canonical_slots` table which have `slot` greater than `slot_query`.
///
/// Due to the ON DELETE CASCADE clause present in the database migration SQL, deleting rows from
/// `canonical_slots` will delete all corresponding rows in `beacon_blocks, `block_rewards`,
/// `block_packing` and `proposer_info`.
pub fn delete_canonical_slots_above(
conn: &mut PgConn,
slot_query: WatchSlot,
) -> Result<usize, Error> {
use self::canonical_slots::dsl::*;
let result = diesel::delete(canonical_slots)
.filter(slot.gt(slot_query))
.execute(conn)?;
debug!("Deleted canonical slots above {slot_query}: {result} rows deleted");
Ok(result)
}
/// Deletes all rows of the `suboptimal_attestations` table which have `epoch_start_slot` greater
/// than `epoch_start_slot_query`.
pub fn delete_suboptimal_attestations_above(
conn: &mut PgConn,
epoch_start_slot_query: WatchSlot,
) -> Result<usize, Error> {
use self::suboptimal_attestations::dsl::*;
let result = diesel::delete(suboptimal_attestations)
.filter(epoch_start_slot.gt(epoch_start_slot_query))
.execute(conn)?;
debug!("Deleted attestations above: {epoch_start_slot_query}, rows deleted: {result}");
Ok(result)
}

View File

@ -0,0 +1,67 @@
use crate::database::{
schema::{beacon_blocks, canonical_slots, proposer_info, validators},
watch_types::{WatchHash, WatchPK, WatchSlot},
};
use diesel::{Insertable, Queryable};
use serde::{Deserialize, Serialize};
use std::hash::{Hash, Hasher};
pub type WatchEpoch = i32;
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = canonical_slots)]
pub struct WatchCanonicalSlot {
pub slot: WatchSlot,
pub root: WatchHash,
pub skipped: bool,
pub beacon_block: Option<WatchHash>,
}
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = beacon_blocks)]
pub struct WatchBeaconBlock {
pub slot: WatchSlot,
pub root: WatchHash,
pub parent_root: WatchHash,
pub attestation_count: i32,
pub transaction_count: Option<i32>,
pub withdrawal_count: Option<i32>,
}
#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = validators)]
pub struct WatchValidator {
pub index: i32,
pub public_key: WatchPK,
pub status: String,
pub activation_epoch: Option<WatchEpoch>,
pub exit_epoch: Option<WatchEpoch>,
}
// Implement a minimal version of `Hash` and `Eq` so that we know if a validator status has changed.
impl Hash for WatchValidator {
fn hash<H: Hasher>(&self, state: &mut H) {
self.index.hash(state);
self.status.hash(state);
self.activation_epoch.hash(state);
self.exit_epoch.hash(state);
}
}
impl PartialEq for WatchValidator {
fn eq(&self, other: &Self) -> bool {
self.index == other.index
&& self.status == other.status
&& self.activation_epoch == other.activation_epoch
&& self.exit_epoch == other.exit_epoch
}
}
impl Eq for WatchValidator {}
#[derive(Clone, Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = proposer_info)]
pub struct WatchProposerInfo {
pub slot: WatchSlot,
pub proposer_index: i32,
pub graffiti: String,
}

View File

@ -0,0 +1,102 @@
// @generated automatically by Diesel CLI.
diesel::table! {
active_config (id) {
id -> Int4,
config_name -> Text,
slots_per_epoch -> Int4,
}
}
diesel::table! {
beacon_blocks (slot) {
slot -> Int4,
root -> Bytea,
parent_root -> Bytea,
attestation_count -> Int4,
transaction_count -> Nullable<Int4>,
withdrawal_count -> Nullable<Int4>,
}
}
diesel::table! {
block_packing (slot) {
slot -> Int4,
available -> Int4,
included -> Int4,
prior_skip_slots -> Int4,
}
}
diesel::table! {
block_rewards (slot) {
slot -> Int4,
total -> Int4,
attestation_reward -> Int4,
sync_committee_reward -> Int4,
}
}
diesel::table! {
blockprint (slot) {
slot -> Int4,
best_guess -> Text,
}
}
diesel::table! {
canonical_slots (slot) {
slot -> Int4,
root -> Bytea,
skipped -> Bool,
beacon_block -> Nullable<Bytea>,
}
}
diesel::table! {
proposer_info (slot) {
slot -> Int4,
proposer_index -> Int4,
graffiti -> Text,
}
}
diesel::table! {
suboptimal_attestations (epoch_start_slot, index) {
epoch_start_slot -> Int4,
index -> Int4,
source -> Bool,
head -> Bool,
target -> Bool,
}
}
diesel::table! {
validators (index) {
index -> Int4,
public_key -> Bytea,
status -> Text,
activation_epoch -> Nullable<Int4>,
exit_epoch -> Nullable<Int4>,
}
}
diesel::joinable!(block_packing -> beacon_blocks (slot));
diesel::joinable!(block_rewards -> beacon_blocks (slot));
diesel::joinable!(blockprint -> beacon_blocks (slot));
diesel::joinable!(proposer_info -> beacon_blocks (slot));
diesel::joinable!(proposer_info -> validators (proposer_index));
diesel::joinable!(suboptimal_attestations -> canonical_slots (epoch_start_slot));
diesel::joinable!(suboptimal_attestations -> validators (index));
diesel::allow_tables_to_appear_in_same_query!(
active_config,
beacon_blocks,
block_packing,
block_rewards,
blockprint,
canonical_slots,
proposer_info,
suboptimal_attestations,
validators,
);

View File

@ -0,0 +1,29 @@
#![allow(dead_code)]
use crate::database::config::Config;
use diesel::pg::PgConnection;
use diesel::prelude::*;
use diesel_migrations::{FileBasedMigrations, MigrationHarness};
/// Sets `config.dbname` to `config.default_dbname` and returns `(new_config, old_dbname)`.
///
/// This is useful for creating or dropping databases, since these actions must be done by
/// logging into another database.
pub fn get_config_using_default_db(config: &Config) -> (Config, String) {
let mut config = config.clone();
let new_dbname = std::mem::replace(&mut config.dbname, config.default_dbname.clone());
(config, new_dbname)
}
/// Runs the set of migrations as detected in the local directory.
/// Equivalent to `diesel migration run`.
///
/// Contains `unwrap`s so is only suitable for test code.
/// TODO(mac) refactor to return Result<PgConnection, Error>
pub fn run_migrations(config: &Config) -> PgConnection {
let database_url = config.clone().build_database_url();
let mut conn = PgConnection::establish(&database_url).unwrap();
let migrations = FileBasedMigrations::find_migrations_directory().unwrap();
conn.run_pending_migrations(migrations).unwrap();
conn.begin_test_transaction().unwrap();
conn
}

View File

@ -0,0 +1,119 @@
use crate::database::error::Error;
use diesel::{
sql_types::{Binary, Integer},
AsExpression, FromSqlRow,
};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::str::FromStr;
use types::{Epoch, Hash256, PublicKeyBytes, Slot};
#[derive(
Clone,
Copy,
Debug,
AsExpression,
FromSqlRow,
Deserialize,
Serialize,
Hash,
PartialEq,
Eq,
PartialOrd,
Ord,
)]
#[diesel(sql_type = Integer)]
pub struct WatchSlot(Slot);
impl fmt::Display for WatchSlot {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl WatchSlot {
pub fn new(slot: u64) -> Self {
Self(Slot::new(slot))
}
pub fn from_slot(slot: Slot) -> Self {
Self(slot)
}
pub fn as_slot(self) -> Slot {
self.0
}
pub fn as_u64(self) -> u64 {
self.0.as_u64()
}
pub fn epoch(self, slots_per_epoch: u64) -> Epoch {
self.as_slot().epoch(slots_per_epoch)
}
}
#[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, Deserialize, Serialize)]
#[diesel(sql_type = Binary)]
pub struct WatchHash(Hash256);
impl fmt::Display for WatchHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl WatchHash {
pub fn as_hash(&self) -> Hash256 {
self.0
}
pub fn from_hash(hash: Hash256) -> Self {
WatchHash(hash)
}
pub fn as_bytes(&self) -> &[u8] {
self.0.as_bytes()
}
pub fn from_bytes(src: &[u8]) -> Result<WatchHash, Error> {
if src.len() == 32 {
Ok(WatchHash(Hash256::from_slice(src)))
} else {
Err(Error::InvalidRoot)
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, AsExpression, FromSqlRow, Serialize, Deserialize)]
#[diesel(sql_type = Binary)]
pub struct WatchPK(PublicKeyBytes);
impl fmt::Display for WatchPK {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl WatchPK {
pub fn as_bytes(&self) -> &[u8] {
self.0.as_serialized()
}
pub fn from_bytes(src: &[u8]) -> Result<WatchPK, Error> {
Ok(WatchPK(PublicKeyBytes::deserialize(src)?))
}
pub fn from_pubkey(key: PublicKeyBytes) -> Self {
WatchPK(key)
}
}
impl FromStr for WatchPK {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(WatchPK(
PublicKeyBytes::from_str(s).map_err(|e| format!("Cannot be parsed: {}", e))?,
))
}
}

12
watch/src/lib.rs Normal file
View File

@ -0,0 +1,12 @@
#![cfg(unix)]
pub mod block_packing;
pub mod block_rewards;
pub mod blockprint;
pub mod cli;
pub mod client;
pub mod config;
pub mod database;
pub mod logger;
pub mod server;
pub mod suboptimal_attestations;
pub mod updater;

24
watch/src/logger.rs Normal file
View File

@ -0,0 +1,24 @@
use env_logger::Builder;
use log::{info, LevelFilter};
use std::process;
pub fn init_logger(log_level: &str) {
let log_level = match log_level.to_lowercase().as_str() {
"trace" => LevelFilter::Trace,
"debug" => LevelFilter::Debug,
"info" => LevelFilter::Info,
"warn" => LevelFilter::Warn,
"error" => LevelFilter::Error,
_ => {
eprintln!("Unsupported log level");
process::exit(1)
}
};
let mut builder = Builder::new();
builder.filter(Some("watch"), log_level);
builder.init();
info!("Logger initialized with log-level: {log_level}");
}

41
watch/src/main.rs Normal file
View File

@ -0,0 +1,41 @@
#[cfg(unix)]
use std::process;
#[cfg(unix)]
mod block_packing;
#[cfg(unix)]
mod block_rewards;
#[cfg(unix)]
mod blockprint;
#[cfg(unix)]
mod cli;
#[cfg(unix)]
mod config;
#[cfg(unix)]
mod database;
#[cfg(unix)]
mod logger;
#[cfg(unix)]
mod server;
#[cfg(unix)]
mod suboptimal_attestations;
#[cfg(unix)]
mod updater;
#[cfg(unix)]
#[tokio::main]
async fn main() {
match cli::run().await {
Ok(()) => process::exit(0),
Err(e) => {
eprintln!("Command failed with: {}", e);
drop(e);
process::exit(1)
}
}
}
#[cfg(windows)]
fn main() {
eprintln!("Windows is not supported. Exiting.");
}

View File

@ -0,0 +1,28 @@
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
pub const LISTEN_ADDR: &str = "127.0.0.1";
pub const fn listen_port() -> u16 {
5059
}
fn listen_addr() -> IpAddr {
LISTEN_ADDR.parse().expect("Server address is not valid")
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
#[serde(default = "listen_addr")]
pub listen_addr: IpAddr,
#[serde(default = "listen_port")]
pub listen_port: u16,
}
impl Default for Config {
fn default() -> Self {
Self {
listen_addr: listen_addr(),
listen_port: listen_port(),
}
}
}

50
watch/src/server/error.rs Normal file
View File

@ -0,0 +1,50 @@
use crate::database::Error as DbError;
use axum::Error as AxumError;
use axum::{http::StatusCode, response::IntoResponse, Json};
use hyper::Error as HyperError;
use serde_json::json;
#[derive(Debug)]
pub enum Error {
Axum(AxumError),
Hyper(HyperError),
Database(DbError),
BadRequest,
NotFound,
Other(String),
}
impl IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
let (status, error_message) = match self {
Self::BadRequest => (StatusCode::BAD_REQUEST, "Bad Request"),
Self::NotFound => (StatusCode::NOT_FOUND, "Not Found"),
_ => (StatusCode::INTERNAL_SERVER_ERROR, "Internal Server Error"),
};
(status, Json(json!({ "error": error_message }))).into_response()
}
}
impl From<HyperError> for Error {
fn from(e: HyperError) -> Self {
Error::Hyper(e)
}
}
impl From<AxumError> for Error {
fn from(e: AxumError) -> Self {
Error::Axum(e)
}
}
impl From<DbError> for Error {
fn from(e: DbError) -> Self {
Error::Database(e)
}
}
impl From<String> for Error {
fn from(e: String) -> Self {
Error::Other(e)
}
}

266
watch/src/server/handler.rs Normal file
View File

@ -0,0 +1,266 @@
use crate::database::{
self, Error as DbError, PgPool, WatchBeaconBlock, WatchCanonicalSlot, WatchHash, WatchPK,
WatchProposerInfo, WatchSlot, WatchValidator,
};
use crate::server::Error;
use axum::{
extract::{Path, Query},
Extension, Json,
};
use eth2::types::BlockId;
use std::collections::HashMap;
use std::str::FromStr;
pub async fn get_slot(
Path(slot): Path<u64>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchCanonicalSlot>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
Ok(Json(database::get_canonical_slot(
&mut conn,
WatchSlot::new(slot),
)?))
}
pub async fn get_slot_lowest(
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchCanonicalSlot>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
Ok(Json(database::get_lowest_canonical_slot(&mut conn)?))
}
pub async fn get_slot_highest(
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchCanonicalSlot>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
Ok(Json(database::get_highest_canonical_slot(&mut conn)?))
}
pub async fn get_slots_by_range(
Query(query): Query<HashMap<String, u64>>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<Vec<WatchCanonicalSlot>>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
if let Some(start_slot) = query.get("start_slot") {
if let Some(end_slot) = query.get("end_slot") {
if start_slot > end_slot {
Err(Error::BadRequest)
} else {
Ok(Json(database::get_canonical_slots_by_range(
&mut conn,
WatchSlot::new(*start_slot),
WatchSlot::new(*end_slot),
)?))
}
} else {
Err(Error::BadRequest)
}
} else {
Err(Error::BadRequest)
}
}
pub async fn get_block(
Path(block_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBeaconBlock>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
let block_id: BlockId = BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)?;
match block_id {
BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot(
&mut conn,
WatchSlot::from_slot(slot),
)?)),
BlockId::Root(root) => Ok(Json(database::get_beacon_block_by_root(
&mut conn,
WatchHash::from_hash(root),
)?)),
_ => Err(Error::BadRequest),
}
}
pub async fn get_block_lowest(
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBeaconBlock>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
Ok(Json(database::get_lowest_beacon_block(&mut conn)?))
}
pub async fn get_block_highest(
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBeaconBlock>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
Ok(Json(database::get_highest_beacon_block(&mut conn)?))
}
pub async fn get_block_previous(
Path(block_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBeaconBlock>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
BlockId::Root(root) => {
if let Some(block) =
database::get_beacon_block_by_root(&mut conn, WatchHash::from_hash(root))?
.map(|block| block.parent_root)
{
Ok(Json(database::get_beacon_block_by_root(&mut conn, block)?))
} else {
Err(Error::NotFound)
}
}
BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot(
&mut conn,
WatchSlot::new(slot.as_u64().checked_sub(1_u64).ok_or(Error::NotFound)?),
)?)),
_ => Err(Error::BadRequest),
}
}
pub async fn get_block_next(
Path(block_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchBeaconBlock>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
BlockId::Root(root) => Ok(Json(database::get_beacon_block_with_parent(
&mut conn,
WatchHash::from_hash(root),
)?)),
BlockId::Slot(slot) => Ok(Json(database::get_beacon_block_by_slot(
&mut conn,
WatchSlot::from_slot(slot + 1_u64),
)?)),
_ => Err(Error::BadRequest),
}
}
pub async fn get_blocks_by_range(
Query(query): Query<HashMap<String, u64>>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<Vec<WatchBeaconBlock>>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
if let Some(start_slot) = query.get("start_slot") {
if let Some(end_slot) = query.get("end_slot") {
if start_slot > end_slot {
Err(Error::BadRequest)
} else {
Ok(Json(database::get_beacon_blocks_by_range(
&mut conn,
WatchSlot::new(*start_slot),
WatchSlot::new(*end_slot),
)?))
}
} else {
Err(Error::BadRequest)
}
} else {
Err(Error::BadRequest)
}
}
pub async fn get_block_proposer(
Path(block_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchProposerInfo>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
BlockId::Root(root) => Ok(Json(database::get_proposer_info_by_root(
&mut conn,
WatchHash::from_hash(root),
)?)),
BlockId::Slot(slot) => Ok(Json(database::get_proposer_info_by_slot(
&mut conn,
WatchSlot::from_slot(slot),
)?)),
_ => Err(Error::BadRequest),
}
}
pub async fn get_validator(
Path(validator_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Option<WatchValidator>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
if validator_query.starts_with("0x") {
let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?;
Ok(Json(database::get_validator_by_public_key(
&mut conn, pubkey,
)?))
} else {
let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?;
Ok(Json(database::get_validator_by_index(&mut conn, index)?))
}
}
pub async fn get_all_validators(
Extension(pool): Extension<PgPool>,
) -> Result<Json<Vec<WatchValidator>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
Ok(Json(database::get_all_validators(&mut conn)?))
}
pub async fn get_validator_latest_proposal(
Path(validator_query): Path<String>,
Extension(pool): Extension<PgPool>,
) -> Result<Json<HashMap<i32, WatchProposerInfo>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
if validator_query.starts_with("0x") {
let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?;
let validator =
database::get_validator_by_public_key(&mut conn, pubkey)?.ok_or(Error::NotFound)?;
Ok(Json(database::get_validators_latest_proposer_info(
&mut conn,
vec![validator.index],
)?))
} else {
let index = i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?;
Ok(Json(database::get_validators_latest_proposer_info(
&mut conn,
vec![index],
)?))
}
}
pub async fn get_client_breakdown(
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<HashMap<String, usize>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? {
Ok(Json(database::get_validators_clients_at_slot(
&mut conn,
target_slot.slot,
slots_per_epoch,
)?))
} else {
Err(Error::Database(DbError::Other(
"No slots found in database.".to_string(),
)))
}
}
pub async fn get_client_breakdown_percentages(
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<HashMap<String, f64>>, Error> {
let mut conn = database::get_connection(&pool).map_err(Error::Database)?;
let mut result = HashMap::new();
if let Some(target_slot) = database::get_highest_canonical_slot(&mut conn)? {
let total = database::count_validators_activated_before_slot(
&mut conn,
target_slot.slot,
slots_per_epoch,
)?;
let clients =
database::get_validators_clients_at_slot(&mut conn, target_slot.slot, slots_per_epoch)?;
for (client, number) in clients.iter() {
let percentage: f64 = *number as f64 / total as f64 * 100.0;
result.insert(client.to_string(), percentage);
}
}
Ok(Json(result))
}

134
watch/src/server/mod.rs Normal file
View File

@ -0,0 +1,134 @@
use crate::block_packing::block_packing_routes;
use crate::block_rewards::block_rewards_routes;
use crate::blockprint::blockprint_routes;
use crate::config::Config as FullConfig;
use crate::database::{self, PgPool};
use crate::suboptimal_attestations::{attestation_routes, blockprint_attestation_routes};
use axum::{
handler::Handler,
http::{StatusCode, Uri},
routing::get,
Extension, Json, Router,
};
use eth2::types::ErrorMessage;
use log::info;
use std::future::Future;
use std::net::SocketAddr;
use tokio::sync::oneshot;
pub use config::Config;
pub use error::Error;
mod config;
mod error;
mod handler;
pub async fn serve(config: FullConfig, shutdown: oneshot::Receiver<()>) -> Result<(), Error> {
let db = database::build_connection_pool(&config.database)?;
let (_, slots_per_epoch) = database::get_active_config(&mut database::get_connection(&db)?)?
.ok_or_else(|| {
Error::Other(
"Database not found. Please run the updater prior to starting the server"
.to_string(),
)
})?;
let server = start_server(&config, slots_per_epoch as u64, db, async {
let _ = shutdown.await;
})?;
server.await?;
Ok(())
}
/// Creates a server that will serve requests using information from `config`.
///
/// The server will create its own connection pool to serve connections to the database.
/// This is separate to the connection pool that is used for the `updater`.
///
/// The server will shut down gracefully when the `shutdown` future resolves.
///
/// ## Returns
///
/// This function will bind the server to the address specified in the config and then return a
/// Future representing the actual server that will need to be awaited.
///
/// ## Errors
///
/// Returns an error if the server is unable to bind or there is another error during
/// configuration.
pub fn start_server(
config: &FullConfig,
slots_per_epoch: u64,
pool: PgPool,
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
) -> Result<impl Future<Output = Result<(), hyper::Error>> + 'static, Error> {
let mut routes = Router::new()
.route("/v1/slots", get(handler::get_slots_by_range))
.route("/v1/slots/:slot", get(handler::get_slot))
.route("/v1/slots/lowest", get(handler::get_slot_lowest))
.route("/v1/slots/highest", get(handler::get_slot_highest))
.route("/v1/slots/:slot/block", get(handler::get_block))
.route("/v1/blocks", get(handler::get_blocks_by_range))
.route("/v1/blocks/:block", get(handler::get_block))
.route("/v1/blocks/lowest", get(handler::get_block_lowest))
.route("/v1/blocks/highest", get(handler::get_block_highest))
.route(
"/v1/blocks/:block/previous",
get(handler::get_block_previous),
)
.route("/v1/blocks/:block/next", get(handler::get_block_next))
.route(
"/v1/blocks/:block/proposer",
get(handler::get_block_proposer),
)
.route("/v1/validators/:validator", get(handler::get_validator))
.route("/v1/validators/all", get(handler::get_all_validators))
.route(
"/v1/validators/:validator/latest_proposal",
get(handler::get_validator_latest_proposal),
)
.route("/v1/clients", get(handler::get_client_breakdown))
.route(
"/v1/clients/percentages",
get(handler::get_client_breakdown_percentages),
)
.merge(attestation_routes())
.merge(blockprint_routes())
.merge(block_packing_routes())
.merge(block_rewards_routes());
if config.blockprint.enabled && config.updater.attestations {
routes = routes.merge(blockprint_attestation_routes())
}
let app = routes
.fallback(route_not_found.into_service())
.layer(Extension(pool))
.layer(Extension(slots_per_epoch));
let addr = SocketAddr::new(config.server.listen_addr, config.server.listen_port);
let server = axum::Server::try_bind(&addr)?.serve(app.into_make_service());
let server = server.with_graceful_shutdown(async {
shutdown.await;
});
info!("HTTP server listening on {}", addr);
Ok(server)
}
// The default route indicating that no available routes matched the request.
async fn route_not_found(uri: Uri) -> (StatusCode, Json<ErrorMessage>) {
(
StatusCode::METHOD_NOT_ALLOWED,
Json(ErrorMessage {
code: StatusCode::METHOD_NOT_ALLOWED.as_u16(),
message: format!("No route for {uri}"),
stacktraces: vec![],
}),
)
}

View File

@ -0,0 +1,224 @@
use crate::database::{
schema::{suboptimal_attestations, validators},
watch_types::{WatchPK, WatchSlot},
Error, PgConn, MAX_SIZE_BATCH_INSERT,
};
use diesel::prelude::*;
use diesel::{Insertable, Queryable};
use log::debug;
use serde::{Deserialize, Serialize};
use std::time::Instant;
use types::Epoch;
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct WatchAttestation {
pub index: i32,
pub epoch: Epoch,
pub source: bool,
pub head: bool,
pub target: bool,
}
impl WatchAttestation {
pub fn optimal(index: i32, epoch: Epoch) -> WatchAttestation {
WatchAttestation {
index,
epoch,
source: true,
head: true,
target: true,
}
}
}
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
#[diesel(table_name = suboptimal_attestations)]
pub struct WatchSuboptimalAttestation {
pub epoch_start_slot: WatchSlot,
pub index: i32,
pub source: bool,
pub head: bool,
pub target: bool,
}
impl WatchSuboptimalAttestation {
pub fn to_attestation(&self, slots_per_epoch: u64) -> WatchAttestation {
WatchAttestation {
index: self.index,
epoch: self.epoch_start_slot.epoch(slots_per_epoch),
source: self.source,
head: self.head,
target: self.target,
}
}
}
/// Insert a batch of values into the `suboptimal_attestations` table
///
/// Since attestations technically occur per-slot but we only store them per-epoch (via its
/// `start_slot`) so if any slot in the epoch changes, we need to resync the whole epoch as a
/// 'suboptimal' attestation could now be 'optimal'.
///
/// This is handled in the update code, where in the case of a re-org, the affected epoch is
/// deleted completely.
///
/// On a conflict, it will do nothing.
pub fn insert_batch_suboptimal_attestations(
conn: &mut PgConn,
attestations: Vec<WatchSuboptimalAttestation>,
) -> Result<(), Error> {
use self::suboptimal_attestations::dsl::*;
let mut count = 0;
let timer = Instant::now();
for chunk in attestations.chunks(MAX_SIZE_BATCH_INSERT) {
count += diesel::insert_into(suboptimal_attestations)
.values(chunk)
.on_conflict_do_nothing()
.execute(conn)?;
}
let time_taken = timer.elapsed();
debug!("Attestations inserted, count: {count}, time taken: {time_taken:?}");
Ok(())
}
/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is minimum.
pub fn get_lowest_attestation(
conn: &mut PgConn,
) -> Result<Option<WatchSuboptimalAttestation>, Error> {
use self::suboptimal_attestations::dsl::*;
Ok(suboptimal_attestations
.order_by(epoch_start_slot.asc())
.limit(1)
.first::<WatchSuboptimalAttestation>(conn)
.optional()?)
}
/// Selects the row from the `suboptimal_attestations` table where `epoch_start_slot` is maximum.
pub fn get_highest_attestation(
conn: &mut PgConn,
) -> Result<Option<WatchSuboptimalAttestation>, Error> {
use self::suboptimal_attestations::dsl::*;
Ok(suboptimal_attestations
.order_by(epoch_start_slot.desc())
.limit(1)
.first::<WatchSuboptimalAttestation>(conn)
.optional()?)
}
/// Selects a single row from the `suboptimal_attestations` table corresponding to a given
/// `index_query` and `epoch_query`.
pub fn get_attestation_by_index(
conn: &mut PgConn,
index_query: i32,
epoch_query: Epoch,
slots_per_epoch: u64,
) -> Result<Option<WatchSuboptimalAttestation>, Error> {
use self::suboptimal_attestations::dsl::*;
let timer = Instant::now();
let result = suboptimal_attestations
.filter(epoch_start_slot.eq(WatchSlot::from_slot(
epoch_query.start_slot(slots_per_epoch),
)))
.filter(index.eq(index_query))
.first::<WatchSuboptimalAttestation>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Attestation requested for validator: {index_query}, epoch: {epoch_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects a single row from the `suboptimal_attestations` table corresponding
/// to a given `pubkey_query` and `epoch_query`.
#[allow(dead_code)]
pub fn get_attestation_by_pubkey(
conn: &mut PgConn,
pubkey_query: WatchPK,
epoch_query: Epoch,
slots_per_epoch: u64,
) -> Result<Option<WatchSuboptimalAttestation>, Error> {
use self::suboptimal_attestations::dsl::*;
use self::validators::dsl::{public_key, validators};
let timer = Instant::now();
let join = validators.inner_join(suboptimal_attestations);
let result = join
.select((epoch_start_slot, index, source, head, target))
.filter(epoch_start_slot.eq(WatchSlot::from_slot(
epoch_query.start_slot(slots_per_epoch),
)))
.filter(public_key.eq(pubkey_query))
.first::<WatchSuboptimalAttestation>(conn)
.optional()?;
let time_taken = timer.elapsed();
debug!("Attestation requested for validator: {pubkey_query}, epoch: {epoch_query}, time taken: {time_taken:?}");
Ok(result)
}
/// Selects `index` for all validators in the suboptimal_attestations table
/// that have `source == false` for the corresponding `epoch_start_slot_query`.
pub fn get_validators_missed_source(
conn: &mut PgConn,
epoch_start_slot_query: WatchSlot,
) -> Result<Vec<i32>, Error> {
use self::suboptimal_attestations::dsl::*;
Ok(suboptimal_attestations
.select(index)
.filter(epoch_start_slot.eq(epoch_start_slot_query))
.filter(source.eq(false))
.load::<i32>(conn)?)
}
/// Selects `index` for all validators in the suboptimal_attestations table
/// that have `head == false` for the corresponding `epoch_start_slot_query`.
pub fn get_validators_missed_head(
conn: &mut PgConn,
epoch_start_slot_query: WatchSlot,
) -> Result<Vec<i32>, Error> {
use self::suboptimal_attestations::dsl::*;
Ok(suboptimal_attestations
.select(index)
.filter(epoch_start_slot.eq(epoch_start_slot_query))
.filter(head.eq(false))
.load::<i32>(conn)?)
}
/// Selects `index` for all validators in the suboptimal_attestations table
/// that have `target == false` for the corresponding `epoch_start_slot_query`.
pub fn get_validators_missed_target(
conn: &mut PgConn,
epoch_start_slot_query: WatchSlot,
) -> Result<Vec<i32>, Error> {
use self::suboptimal_attestations::dsl::*;
Ok(suboptimal_attestations
.select(index)
.filter(epoch_start_slot.eq(epoch_start_slot_query))
.filter(target.eq(false))
.load::<i32>(conn)?)
}
/// Selects all rows from the `suboptimal_attestations` table for the given
/// `epoch_start_slot_query`.
pub fn get_all_suboptimal_attestations_for_epoch(
conn: &mut PgConn,
epoch_start_slot_query: WatchSlot,
) -> Result<Vec<WatchSuboptimalAttestation>, Error> {
use self::suboptimal_attestations::dsl::*;
Ok(suboptimal_attestations
.filter(epoch_start_slot.eq(epoch_start_slot_query))
.load::<WatchSuboptimalAttestation>(conn)?)
}

View File

@ -0,0 +1,56 @@
pub mod database;
pub mod server;
pub mod updater;
use crate::database::watch_types::WatchSlot;
use crate::updater::error::Error;
pub use database::{
get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey,
get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations,
WatchAttestation, WatchSuboptimalAttestation,
};
pub use server::{attestation_routes, blockprint_attestation_routes};
use eth2::BeaconNodeHttpClient;
use types::Epoch;
/// Sends a request to `lighthouse/analysis/attestation_performance`.
/// Formats the response into a vector of `WatchSuboptimalAttestation`.
///
/// Any attestations with `source == true && head == true && target == true` are ignored.
pub async fn get_attestation_performances(
bn: &BeaconNodeHttpClient,
start_epoch: Epoch,
end_epoch: Epoch,
slots_per_epoch: u64,
) -> Result<Vec<WatchSuboptimalAttestation>, Error> {
let mut output = Vec::new();
let result = bn
.get_lighthouse_analysis_attestation_performance(
start_epoch,
end_epoch,
"global".to_string(),
)
.await?;
for index in result {
for epoch in index.epochs {
if epoch.1.active {
// Check if the attestation is suboptimal.
if !epoch.1.source || !epoch.1.head || !epoch.1.target {
output.push(WatchSuboptimalAttestation {
epoch_start_slot: WatchSlot::from_slot(
Epoch::new(epoch.0).start_slot(slots_per_epoch),
),
index: index.index as i32,
source: epoch.1.source,
head: epoch.1.head,
target: epoch.1.target,
})
}
}
}
}
Ok(output)
}

View File

@ -0,0 +1,299 @@
use crate::database::{
get_canonical_slot, get_connection, get_validator_by_index, get_validator_by_public_key,
get_validators_clients_at_slot, get_validators_latest_proposer_info, PgPool, WatchPK,
WatchSlot,
};
use crate::blockprint::database::construct_validator_blockprints_at_slot;
use crate::server::Error;
use crate::suboptimal_attestations::database::{
get_all_suboptimal_attestations_for_epoch, get_attestation_by_index,
get_validators_missed_head, get_validators_missed_source, get_validators_missed_target,
WatchAttestation, WatchSuboptimalAttestation,
};
use axum::{extract::Path, routing::get, Extension, Json, Router};
use std::collections::{HashMap, HashSet};
use std::str::FromStr;
use types::Epoch;
// Will return Ok(None) if the epoch is not synced or if the validator does not exist.
// In the future it might be worth differentiating these events.
pub async fn get_validator_attestation(
Path((validator_query, epoch_query)): Path<(String, u64)>,
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<Option<WatchAttestation>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
let epoch = Epoch::new(epoch_query);
// Ensure the database has synced the target epoch.
if get_canonical_slot(
&mut conn,
WatchSlot::from_slot(epoch.end_slot(slots_per_epoch)),
)?
.is_none()
{
// Epoch is not fully synced.
return Ok(Json(None));
}
let index = if validator_query.starts_with("0x") {
let pubkey = WatchPK::from_str(&validator_query).map_err(|_| Error::BadRequest)?;
get_validator_by_public_key(&mut conn, pubkey)?
.ok_or(Error::NotFound)?
.index
} else {
i32::from_str(&validator_query).map_err(|_| Error::BadRequest)?
};
let attestation = if let Some(suboptimal_attestation) =
get_attestation_by_index(&mut conn, index, epoch, slots_per_epoch)?
{
Some(suboptimal_attestation.to_attestation(slots_per_epoch))
} else {
// Attestation was not in database. Check if the validator was active.
match get_validator_by_index(&mut conn, index)? {
Some(validator) => {
if let Some(activation_epoch) = validator.activation_epoch {
if activation_epoch <= epoch.as_u64() as i32 {
if let Some(exit_epoch) = validator.exit_epoch {
if exit_epoch > epoch.as_u64() as i32 {
// Validator is active and has not yet exited.
Some(WatchAttestation::optimal(index, epoch))
} else {
// Validator has exited.
None
}
} else {
// Validator is active and has not yet exited.
Some(WatchAttestation::optimal(index, epoch))
}
} else {
// Validator is not yet active.
None
}
} else {
// Validator is not yet active.
None
}
}
None => return Err(Error::Other("Validator index does not exist".to_string())),
}
};
Ok(Json(attestation))
}
pub async fn get_all_validators_attestations(
Path(epoch): Path<u64>,
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<Vec<WatchSuboptimalAttestation>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch));
Ok(Json(get_all_suboptimal_attestations_for_epoch(
&mut conn,
epoch_start_slot,
)?))
}
pub async fn get_validators_missed_vote(
Path((vote, epoch)): Path<(String, u64)>,
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<Vec<i32>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
let epoch_start_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch));
match vote.to_lowercase().as_str() {
"source" => Ok(Json(get_validators_missed_source(
&mut conn,
epoch_start_slot,
)?)),
"head" => Ok(Json(get_validators_missed_head(
&mut conn,
epoch_start_slot,
)?)),
"target" => Ok(Json(get_validators_missed_target(
&mut conn,
epoch_start_slot,
)?)),
_ => Err(Error::BadRequest),
}
}
pub async fn get_validators_missed_vote_graffiti(
Path((vote, epoch)): Path<(String, u64)>,
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<HashMap<String, u64>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
let Json(indices) = get_validators_missed_vote(
Path((vote, epoch)),
Extension(pool),
Extension(slots_per_epoch),
)
.await?;
let graffitis = get_validators_latest_proposer_info(&mut conn, indices)?
.values()
.map(|info| info.graffiti.clone())
.collect::<Vec<String>>();
let mut result = HashMap::new();
for graffiti in graffitis {
if !result.contains_key(&graffiti) {
result.insert(graffiti.clone(), 0);
}
*result
.get_mut(&graffiti)
.ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1;
}
Ok(Json(result))
}
pub fn attestation_routes() -> Router {
Router::new()
.route(
"/v1/validators/:validator/attestation/:epoch",
get(get_validator_attestation),
)
.route(
"/v1/validators/all/attestation/:epoch",
get(get_all_validators_attestations),
)
.route(
"/v1/validators/missed/:vote/:epoch",
get(get_validators_missed_vote),
)
.route(
"/v1/validators/missed/:vote/:epoch/graffiti",
get(get_validators_missed_vote_graffiti),
)
}
/// The functions below are dependent on Blockprint and if it is disabled, the endpoints will be
/// disabled.
pub async fn get_clients_missed_vote(
Path((vote, epoch)): Path<(String, u64)>,
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<HashMap<String, u64>>, Error> {
let mut conn = get_connection(&pool).map_err(Error::Database)?;
let Json(indices) = get_validators_missed_vote(
Path((vote, epoch)),
Extension(pool),
Extension(slots_per_epoch),
)
.await?;
// All validators which missed the vote.
let indices_map = indices.into_iter().collect::<HashSet<i32>>();
let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch));
// All validators.
let client_map =
construct_validator_blockprints_at_slot(&mut conn, target_slot, slots_per_epoch)?;
let mut result = HashMap::new();
for index in indices_map {
if let Some(print) = client_map.get(&index) {
if !result.contains_key(print) {
result.insert(print.clone(), 0);
}
*result
.get_mut(print)
.ok_or_else(|| Error::Other("An unexpected error occurred".to_string()))? += 1;
}
}
Ok(Json(result))
}
pub async fn get_clients_missed_vote_percentages(
Path((vote, epoch)): Path<(String, u64)>,
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<HashMap<String, f64>>, Error> {
let Json(clients_counts) = get_clients_missed_vote(
Path((vote, epoch)),
Extension(pool.clone()),
Extension(slots_per_epoch),
)
.await?;
let target_slot = WatchSlot::from_slot(Epoch::new(epoch).start_slot(slots_per_epoch));
let mut conn = get_connection(&pool)?;
let totals = get_validators_clients_at_slot(&mut conn, target_slot, slots_per_epoch)?;
let mut result = HashMap::new();
for (client, count) in clients_counts.iter() {
let client_total: f64 = *totals
.get(client)
.ok_or_else(|| Error::Other("Client type mismatch".to_string()))?
as f64;
// `client_total` should never be `0`, but if it is, return `0` instead of `inf`.
if client_total == 0.0 {
result.insert(client.to_string(), 0.0);
} else {
let percentage: f64 = *count as f64 / client_total * 100.0;
result.insert(client.to_string(), percentage);
}
}
Ok(Json(result))
}
pub async fn get_clients_missed_vote_percentages_relative(
Path((vote, epoch)): Path<(String, u64)>,
Extension(pool): Extension<PgPool>,
Extension(slots_per_epoch): Extension<u64>,
) -> Result<Json<HashMap<String, f64>>, Error> {
let Json(clients_counts) = get_clients_missed_vote(
Path((vote, epoch)),
Extension(pool),
Extension(slots_per_epoch),
)
.await?;
let mut total: u64 = 0;
for (_, count) in clients_counts.iter() {
total += *count
}
let mut result = HashMap::new();
for (client, count) in clients_counts.iter() {
// `total` should never be 0, but if it is, return `-` instead of `inf`.
if total == 0 {
result.insert(client.to_string(), 0.0);
} else {
let percentage: f64 = *count as f64 / total as f64 * 100.0;
result.insert(client.to_string(), percentage);
}
}
Ok(Json(result))
}
pub fn blockprint_attestation_routes() -> Router {
Router::new()
.route(
"/v1/clients/missed/:vote/:epoch",
get(get_clients_missed_vote),
)
.route(
"/v1/clients/missed/:vote/:epoch/percentages",
get(get_clients_missed_vote_percentages),
)
.route(
"/v1/clients/missed/:vote/:epoch/percentages/relative",
get(get_clients_missed_vote_percentages_relative),
)
}

View File

@ -0,0 +1,236 @@
use crate::database::{self, Error as DbError};
use crate::updater::{Error, UpdateHandler};
use crate::suboptimal_attestations::get_attestation_performances;
use eth2::types::EthSpec;
use log::{debug, error, warn};
const MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS: u64 = 50;
impl<T: EthSpec> UpdateHandler<T> {
/// Forward fills the `suboptimal_attestations` table starting from the entry with the highest
/// slot.
///
/// It construts a request to the `attestation_performance` API endpoint with:
/// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest canonical slot)
/// `end_epoch` -> epoch of highest canonical slot
///
/// It will resync the latest epoch if it is not fully filled but will not overwrite existing
/// values unless there is a re-org.
/// That is, `if highest_filled_slot % slots_per_epoch != 31`.
///
/// In the event the most recent epoch has no suboptimal attestations, it will attempt to
/// resync that epoch. The odds of this occuring on mainnet are vanishingly small so it is not
/// accounted for.
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`.
pub async fn fill_suboptimal_attestations(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
let highest_filled_slot_opt = if self.config.attestations {
database::get_highest_attestation(&mut conn)?
.map(|attestation| attestation.epoch_start_slot.as_slot())
} else {
return Err(Error::NotEnabled("attestations".to_string()));
};
let start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt {
if highest_filled_slot % self.slots_per_epoch == self.slots_per_epoch.saturating_sub(1)
{
// The whole epoch is filled so we can begin syncing the next one.
highest_filled_slot.epoch(self.slots_per_epoch) + 1
} else {
// The epoch is only partially synced. Try to sync it fully.
highest_filled_slot.epoch(self.slots_per_epoch)
}
} else {
// No rows present in the `suboptimal_attestations` table. Use `canonical_slots`
// instead.
if let Some(lowest_canonical_slot) = database::get_lowest_canonical_slot(&mut conn)? {
lowest_canonical_slot
.slot
.as_slot()
.epoch(self.slots_per_epoch)
} else {
// There are no slots in the database, do not fill the `suboptimal_attestations`
// table.
warn!("Refusing to fill the `suboptimal_attestations` table as there are no slots in the database");
return Ok(());
}
};
if let Some(highest_canonical_slot) =
database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot())
{
let mut end_epoch = highest_canonical_slot.epoch(self.slots_per_epoch);
// The `lighthouse/analysis/attestation_performance` endpoint can only retrieve attestations
// which are more than 1 epoch old.
// We assume that `highest_canonical_slot` is near the head of the chain.
end_epoch = end_epoch.saturating_sub(2_u64);
// If end_epoch == 0 then the chain just started so we need to wait until
// `current_epoch >= 2`.
if end_epoch == 0 {
debug!("Chain just begun, refusing to sync attestations");
return Ok(());
}
if start_epoch > end_epoch {
debug!("Attestations are up to date with the head of the database");
return Ok(());
}
// Ensure the size of the request does not exceed the maximum allowed value.
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) {
end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS
}
if let Some(lowest_canonical_slot) =
database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot())
{
let mut attestations = get_attestation_performances(
&self.bn,
start_epoch,
end_epoch,
self.slots_per_epoch,
)
.await?;
// Only insert attestations with corresponding `canonical_slot`s.
attestations.retain(|attestation| {
attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot
&& attestation.epoch_start_slot.as_slot() <= highest_canonical_slot
});
database::insert_batch_suboptimal_attestations(&mut conn, attestations)?;
} else {
return Err(Error::Database(DbError::Other(
"Database did not return a lowest canonical slot when one exists".to_string(),
)));
}
} else {
// There are no slots in the `canonical_slots` table, but there are entries in the
// `suboptimal_attestations` table. This is a critical failure. It usually means
// someone has manually tampered with the database tables and should not occur during
// normal operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
Ok(())
}
/// Backfill the `suboptimal_attestations` table starting from the entry with the lowest slot.
///
/// It constructs a request to the `attestation_performance` API endpoint with:
/// `start_epoch` -> epoch of the lowest `canonical_slot`.
/// `end_epoch` -> epoch of the lowest filled `suboptimal_attestation` - 1 (or epoch of highest
/// canonical slot)
///
/// It will resync the lowest epoch if it is not fully filled.
/// That is, `if lowest_filled_slot % slots_per_epoch != 0`
///
/// In the event there are no suboptimal attestations present in the lowest epoch, it will attempt to
/// resync the epoch. The odds of this occuring on mainnet are vanishingly small so it is not
/// accounted for.
///
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`.
pub async fn backfill_suboptimal_attestations(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
let max_attestation_backfill = self.config.max_backfill_size_epochs;
// Get the slot of the lowest entry in the `suboptimal_attestations` table.
let lowest_filled_slot_opt = if self.config.attestations {
database::get_lowest_attestation(&mut conn)?
.map(|attestation| attestation.epoch_start_slot.as_slot())
} else {
return Err(Error::NotEnabled("attestations".to_string()));
};
let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt {
if lowest_filled_slot % self.slots_per_epoch == 0 {
lowest_filled_slot
.epoch(self.slots_per_epoch)
.saturating_sub(1_u64)
} else {
// The epoch is only partially synced. Try to sync it fully.
lowest_filled_slot.epoch(self.slots_per_epoch)
}
} else {
// No entries in the `suboptimal_attestations` table. Use `canonical_slots` instead.
if let Some(highest_canonical_slot) =
database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot())
{
// Subtract 2 since `end_epoch` must be less than the current epoch - 1.
// We assume that `highest_canonical_slot` is near the head of the chain.
highest_canonical_slot
.epoch(self.slots_per_epoch)
.saturating_sub(2_u64)
} else {
// There are no slots in the database, do not backfill the
// `suboptimal_attestations` table.
warn!("Refusing to backfill attestations as there are no slots in the database");
return Ok(());
}
};
if end_epoch == 0 {
debug!("Attestations backfill is complete");
return Ok(());
}
if let Some(lowest_canonical_slot) =
database::get_lowest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot())
{
let mut start_epoch = lowest_canonical_slot.epoch(self.slots_per_epoch);
if start_epoch > end_epoch {
debug!("Attestations are up to date with the base of the database");
return Ok(());
}
// Ensure the request range does not exceed `max_attestation_backfill` or
// `MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS`.
if start_epoch < end_epoch.saturating_sub(max_attestation_backfill) {
start_epoch = end_epoch.saturating_sub(max_attestation_backfill)
}
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS) {
start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_ATTESTATIONS)
}
if let Some(highest_canonical_slot) =
database::get_highest_canonical_slot(&mut conn)?.map(|slot| slot.slot.as_slot())
{
let mut attestations = get_attestation_performances(
&self.bn,
start_epoch,
end_epoch,
self.slots_per_epoch,
)
.await?;
// Only insert `suboptimal_attestations` with corresponding `canonical_slots`.
attestations.retain(|attestation| {
attestation.epoch_start_slot.as_slot() >= lowest_canonical_slot
&& attestation.epoch_start_slot.as_slot() <= highest_canonical_slot
});
database::insert_batch_suboptimal_attestations(&mut conn, attestations)?;
} else {
return Err(Error::Database(DbError::Other(
"Database did not return a lowest slot when one exists".to_string(),
)));
}
} else {
// There are no slots in the `canonical_slot` table, but there are entries in the
// `suboptimal_attestations` table. This is a critical failure. It usually means
// someone has manually tampered with the database tables and should not occur during
// normal operation.
error!("Database is corrupted. Please re-sync the database");
return Err(Error::Database(DbError::DatabaseCorrupted));
}
Ok(())
}
}

View File

@ -0,0 +1,65 @@
use serde::{Deserialize, Serialize};
pub const BEACON_NODE_URL: &str = "http://127.0.0.1:5052";
pub const fn max_backfill_size_epochs() -> u64 {
2
}
pub const fn backfill_stop_epoch() -> u64 {
0
}
pub const fn attestations() -> bool {
true
}
pub const fn proposer_info() -> bool {
true
}
pub const fn block_rewards() -> bool {
true
}
pub const fn block_packing() -> bool {
true
}
fn beacon_node_url() -> String {
BEACON_NODE_URL.to_string()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
/// The URL of the beacon you wish to sync from.
#[serde(default = "beacon_node_url")]
pub beacon_node_url: String,
/// The maximum size each backfill iteration will allow per request (in epochs).
#[serde(default = "max_backfill_size_epochs")]
pub max_backfill_size_epochs: u64,
/// The epoch at which to never backfill past.
#[serde(default = "backfill_stop_epoch")]
pub backfill_stop_epoch: u64,
/// Whether to sync the suboptimal_attestations table.
#[serde(default = "attestations")]
pub attestations: bool,
/// Whether to sync the proposer_info table.
#[serde(default = "proposer_info")]
pub proposer_info: bool,
/// Whether to sync the block_rewards table.
#[serde(default = "block_rewards")]
pub block_rewards: bool,
/// Whether to sync the block_packing table.
#[serde(default = "block_packing")]
pub block_packing: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
beacon_node_url: beacon_node_url(),
max_backfill_size_epochs: max_backfill_size_epochs(),
backfill_stop_epoch: backfill_stop_epoch(),
attestations: attestations(),
proposer_info: proposer_info(),
block_rewards: block_rewards(),
block_packing: block_packing(),
}
}
}

View File

@ -0,0 +1,56 @@
use crate::blockprint::Error as BlockprintError;
use crate::database::Error as DbError;
use beacon_node::beacon_chain::BeaconChainError;
use eth2::{Error as Eth2Error, SensitiveError};
use std::fmt;
#[derive(Debug)]
pub enum Error {
BeaconChain(BeaconChainError),
Eth2(Eth2Error),
SensitiveUrl(SensitiveError),
Database(DbError),
Blockprint(BlockprintError),
UnableToGetRemoteHead,
BeaconNodeSyncing,
NotEnabled(String),
NoValidatorsFound,
BeaconNodeNotCompatible(String),
InvalidConfig(String),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<BeaconChainError> for Error {
fn from(e: BeaconChainError) -> Self {
Error::BeaconChain(e)
}
}
impl From<Eth2Error> for Error {
fn from(e: Eth2Error) -> Self {
Error::Eth2(e)
}
}
impl From<SensitiveError> for Error {
fn from(e: SensitiveError) -> Self {
Error::SensitiveUrl(e)
}
}
impl From<DbError> for Error {
fn from(e: DbError) -> Self {
Error::Database(e)
}
}
impl From<BlockprintError> for Error {
fn from(e: BlockprintError) -> Self {
Error::Blockprint(e)
}
}

View File

@ -0,0 +1,471 @@
use crate::blockprint::WatchBlockprintClient;
use crate::config::Config as FullConfig;
use crate::database::{self, PgPool, WatchCanonicalSlot, WatchHash, WatchSlot};
use crate::updater::{Config, Error, WatchSpec};
use beacon_node::beacon_chain::BeaconChainError;
use eth2::{
types::{BlockId, SyncingData},
BeaconNodeHttpClient, SensitiveUrl,
};
use log::{debug, error, info, warn};
use std::collections::HashSet;
use std::iter::FromIterator;
use types::{BeaconBlockHeader, EthSpec, Hash256, SignedBeaconBlock, Slot};
use crate::updater::{get_beacon_block, get_header, get_validators};
const MAX_EXPECTED_REORG_LENGTH: u64 = 32;
/// Ensure the existing database is valid for this run.
pub async fn ensure_valid_database<T: EthSpec>(
spec: &WatchSpec<T>,
pool: &mut PgPool,
) -> Result<(), Error> {
let mut conn = database::get_connection(pool)?;
let bn_slots_per_epoch = spec.slots_per_epoch();
let bn_config_name = spec.network.clone();
if let Some((db_config_name, db_slots_per_epoch)) = database::get_active_config(&mut conn)? {
if db_config_name != bn_config_name || db_slots_per_epoch != bn_slots_per_epoch as i32 {
Err(Error::InvalidConfig(
"The config stored in the database does not match the beacon node.".to_string(),
))
} else {
// Configs match.
Ok(())
}
} else {
// No config exists in the DB.
database::insert_active_config(&mut conn, bn_config_name, bn_slots_per_epoch)?;
Ok(())
}
}
pub struct UpdateHandler<T: EthSpec> {
pub pool: PgPool,
pub bn: BeaconNodeHttpClient,
pub blockprint: Option<WatchBlockprintClient>,
pub config: Config,
pub slots_per_epoch: u64,
pub spec: WatchSpec<T>,
}
impl<T: EthSpec> UpdateHandler<T> {
pub async fn new(
bn: BeaconNodeHttpClient,
spec: WatchSpec<T>,
config: FullConfig,
) -> Result<UpdateHandler<T>, Error> {
let blockprint = if config.blockprint.enabled {
if let Some(server) = config.blockprint.url {
let blockprint_url = SensitiveUrl::parse(&server).map_err(Error::SensitiveUrl)?;
Some(WatchBlockprintClient {
client: reqwest::Client::new(),
server: blockprint_url,
username: config.blockprint.username,
password: config.blockprint.password,
})
} else {
return Err(Error::NotEnabled(
"blockprint was enabled but url was not set".to_string(),
));
}
} else {
None
};
let mut pool = database::build_connection_pool(&config.database)?;
ensure_valid_database(&spec, &mut pool).await?;
Ok(Self {
pool,
bn,
blockprint,
config: config.updater,
slots_per_epoch: spec.slots_per_epoch(),
spec,
})
}
/// Gets the syncing status of the connected beacon node.
pub async fn get_bn_syncing_status(&mut self) -> Result<SyncingData, Error> {
Ok(self.bn.get_node_syncing().await?.data)
}
/// Gets a list of block roots from the database which do not yet contain a corresponding
/// entry in the `beacon_blocks` table and inserts them.
pub async fn update_unknown_blocks(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
let roots = database::get_unknown_canonical_blocks(&mut conn)?;
for root in roots {
let block_opt: Option<SignedBeaconBlock<T>> =
get_beacon_block(&self.bn, BlockId::Root(root.as_hash())).await?;
if let Some(block) = block_opt {
database::insert_beacon_block(&mut conn, block, root)?;
}
}
Ok(())
}
/// Performs a head update with the following steps:
/// 1. Pull the latest header from the beacon node and the latest canonical slot from the
/// database.
/// 2. Loop back through the beacon node and database to find the first matching slot -> root
/// pair.
/// 3. Go back `MAX_EXPECTED_REORG_LENGTH` slots through the database ensuring it is
/// consistent with the beacon node. If a re-org occurs beyond this range, we cannot recover.
/// 4. Remove any invalid slots from the database.
/// 5. Sync all blocks between the first valid block of the database and the head of the beacon
/// chain.
///
/// In the event there are no slots present in the database, it will sync from the head block
/// block back to the first slot of the epoch.
/// This will ensure backfills are always done in full epochs (which helps keep certain syncing
/// tasks efficient).
pub async fn perform_head_update(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
// Load the head from the beacon node.
let bn_header = get_header(&self.bn, BlockId::Head)
.await?
.ok_or(Error::UnableToGetRemoteHead)?;
let header_root = bn_header.canonical_root();
if let Some(latest_matching_canonical_slot) =
self.get_first_matching_block(bn_header.clone()).await?
{
// Check for reorgs.
let latest_db_slot = self.check_for_reorg(latest_matching_canonical_slot).await?;
// Remove all slots above `latest_db_slot` from the database.
let result = database::delete_canonical_slots_above(
&mut conn,
WatchSlot::from_slot(latest_db_slot),
)?;
info!("{result} old records removed during head update");
if result > 0 {
// If slots were removed, we need to resync the suboptimal_attestations table for
// the epoch since they will have changed and cannot be fixed by a simple update.
let epoch = latest_db_slot
.epoch(self.slots_per_epoch)
.saturating_sub(1_u64);
debug!("Preparing to resync attestations above epoch {epoch}");
database::delete_suboptimal_attestations_above(
&mut conn,
WatchSlot::from_slot(epoch.start_slot(self.slots_per_epoch)),
)?;
}
// Since we are syncing backwards, `start_slot > `end_slot`.
let start_slot = bn_header.slot;
let end_slot = latest_db_slot + 1;
self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot)
.await?;
info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}");
// Attempt to sync new blocks with blockprint.
//self.sync_blockprint_until(start_slot).await?;
} else {
// There are no matching parent blocks. Sync from the head block back until the first
// block of the epoch.
let start_slot = bn_header.slot;
let end_slot = start_slot.saturating_sub(start_slot % self.slots_per_epoch);
self.reverse_fill_canonical_slots(bn_header, header_root, false, start_slot, end_slot)
.await?;
info!("Reverse sync begun at slot {start_slot} and stopped at slot {end_slot}");
}
Ok(())
}
/// Attempt to find a row in the `canonical_slots` table which matches the `canonical_root` of
/// the block header as reported by the beacon node.
///
/// Any blocks above this value are not canonical according to the beacon node.
///
/// Note: In the event that there are skip slots above the slot returned by the function,
/// they will not be returned, so may be pruned or re-synced by other code despite being
/// canonical.
pub async fn get_first_matching_block(
&mut self,
mut bn_header: BeaconBlockHeader,
) -> Result<Option<WatchCanonicalSlot>, Error> {
let mut conn = database::get_connection(&self.pool)?;
// Load latest non-skipped canonical slot from database.
if let Some(db_canonical_slot) =
database::get_highest_non_skipped_canonical_slot(&mut conn)?
{
// Check if the header or parent root matches the entry in the database.
if bn_header.parent_root == db_canonical_slot.root.as_hash()
|| bn_header.canonical_root() == db_canonical_slot.root.as_hash()
{
Ok(Some(db_canonical_slot))
} else {
// Header is not the child of the highest entry in the database.
// From here we need to iterate backwards through the database until we find
// a slot -> root pair that matches the beacon node.
loop {
// Store working `parent_root`.
let parent_root = bn_header.parent_root;
// Try the next header.
let next_header = get_header(&self.bn, BlockId::Root(parent_root)).await?;
if let Some(header) = next_header {
bn_header = header.clone();
if let Some(db_canonical_slot) = database::get_canonical_slot_by_root(
&mut conn,
WatchHash::from_hash(header.parent_root),
)? {
// Check if the entry in the database matches the parent of
// the header.
if header.parent_root == db_canonical_slot.root.as_hash() {
return Ok(Some(db_canonical_slot));
} else {
// Move on to the next header.
continue;
}
} else {
// Database does not have the referenced root. Try the next header.
continue;
}
} else {
// If we get this error it means that the `parent_root` of the header
// did not reference a canonical block.
return Err(Error::BeaconChain(BeaconChainError::MissingBeaconBlock(
parent_root,
)));
}
}
}
} else {
// There are no non-skipped blocks present in the database.
Ok(None)
}
}
/// Given the latest slot in the database which matches a root in the beacon node,
/// traverse back through the database for `MAX_EXPECTED_REORG_LENGTH` slots to ensure the tip
/// of the database is consistent with the beacon node (in the case that reorgs have occured).
///
/// Returns the slot before the oldest canonical_slot which has an invalid child.
pub async fn check_for_reorg(
&mut self,
latest_canonical_slot: WatchCanonicalSlot,
) -> Result<Slot, Error> {
let mut conn = database::get_connection(&self.pool)?;
let end_slot = latest_canonical_slot.slot.as_u64();
let start_slot = end_slot.saturating_sub(MAX_EXPECTED_REORG_LENGTH);
for i in start_slot..end_slot {
let slot = Slot::new(i);
let db_canonical_slot_opt =
database::get_canonical_slot(&mut conn, WatchSlot::from_slot(slot))?;
if let Some(db_canonical_slot) = db_canonical_slot_opt {
let header_opt = get_header(&self.bn, BlockId::Slot(slot)).await?;
if let Some(header) = header_opt {
if header.canonical_root() == db_canonical_slot.root.as_hash() {
// The roots match (or are both skip slots).
continue;
} else {
// The block roots do not match. We need to re-sync from here.
warn!("Block {slot} does not match the beacon node. Resyncing");
return Ok(slot.saturating_sub(1_u64));
}
} else if !db_canonical_slot.skipped {
// The block exists in the database, but does not exist on the beacon node.
// We need to re-sync from here.
warn!("Block {slot} does not exist on the beacon node. Resyncing");
return Ok(slot.saturating_sub(1_u64));
}
} else {
// This slot does not exist in the database.
let lowest_slot = database::get_lowest_canonical_slot(&mut conn)?
.map(|canonical_slot| canonical_slot.slot.as_slot());
if lowest_slot > Some(slot) {
// The database has not back-filled this slot yet, so skip it.
continue;
} else {
// The database does not contain this block, but has back-filled past it.
// We need to resync from here.
warn!("Slot {slot} missing from database. Resyncing");
return Ok(slot.saturating_sub(1_u64));
}
}
}
// The database is consistent with the beacon node, so return the head of the database.
Ok(latest_canonical_slot.slot.as_slot())
}
/// Fills the canonical slots table beginning from `start_slot` and ending at `end_slot`.
/// It fills in reverse order, that is, `start_slot` is higher than `end_slot`.
///
/// Skip slots set `root` to the root of the previous non-skipped slot and also sets
/// `skipped == true`.
///
/// Since it uses `insert_canonical_slot` to interact with the database, it WILL NOT overwrite
/// existing rows. This means that any part of the chain within `end_slot..=start_slot` that
/// needs to be resynced, must first be deleted from the database.
pub async fn reverse_fill_canonical_slots(
&mut self,
mut header: BeaconBlockHeader,
mut header_root: Hash256,
mut skipped: bool,
start_slot: Slot,
end_slot: Slot,
) -> Result<usize, Error> {
let mut count = 0;
let mut conn = database::get_connection(&self.pool)?;
// Iterate, descending from `start_slot` (higher) to `end_slot` (lower).
for slot in (end_slot.as_u64()..=start_slot.as_u64()).rev() {
// Insert header.
database::insert_canonical_slot(
&mut conn,
WatchCanonicalSlot {
slot: WatchSlot::new(slot),
root: WatchHash::from_hash(header_root),
skipped,
beacon_block: None,
},
)?;
count += 1;
// Load the next header:
// We must use BlockId::Slot since we want to include skip slots.
header = if let Some(new_header) = get_header(
&self.bn,
BlockId::Slot(Slot::new(slot.saturating_sub(1_u64))),
)
.await?
{
header_root = new_header.canonical_root();
skipped = false;
new_header
} else {
if header.slot == 0 {
info!("Reverse fill exhausted at slot 0");
break;
}
// Slot was skipped, so use the parent_root (most recent non-skipped block).
skipped = true;
header_root = header.parent_root;
header
};
}
Ok(count)
}
/// Backfills the `canonical_slots` table starting from the lowest non-skipped slot and
/// stopping after `max_backfill_size_epochs` epochs.
pub async fn backfill_canonical_slots(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
let backfill_stop_slot = self.config.backfill_stop_epoch * self.slots_per_epoch;
// Check to see if we have finished backfilling.
if let Some(lowest_slot) = database::get_lowest_canonical_slot(&mut conn)? {
if lowest_slot.slot.as_slot() == backfill_stop_slot {
debug!("Backfill sync complete, all slots filled");
return Ok(());
}
}
let backfill_slot_count = self.config.max_backfill_size_epochs * self.slots_per_epoch;
if let Some(lowest_non_skipped_canonical_slot) =
database::get_lowest_non_skipped_canonical_slot(&mut conn)?
{
// Set `start_slot` equal to the lowest non-skipped slot in the database.
// While this will attempt to resync some parts of the bottom of the chain, it reduces
// complexity when dealing with skip slots.
let start_slot = lowest_non_skipped_canonical_slot.slot.as_slot();
let mut end_slot = lowest_non_skipped_canonical_slot
.slot
.as_slot()
.saturating_sub(backfill_slot_count);
// Ensure end_slot doesn't go below `backfill_stop_epoch`
if end_slot <= backfill_stop_slot {
end_slot = Slot::new(backfill_stop_slot);
}
let header_opt = get_header(&self.bn, BlockId::Slot(start_slot)).await?;
if let Some(header) = header_opt {
let header_root = header.canonical_root();
let count = self
.reverse_fill_canonical_slots(header, header_root, false, start_slot, end_slot)
.await?;
info!("Backfill completed to slot: {end_slot}, records added: {count}");
} else {
// The lowest slot of the database is inconsistent with the beacon node.
// Currently we have no way to recover from this. The entire database will need to
// be re-synced.
error!(
"Database is inconsistent with the beacon node. \
Please ensure your beacon node is set to the right network, \
otherwise you may need to resync"
);
}
} else {
// There are no blocks in the database. Forward sync needs to happen first.
info!("Backfill was not performed since there are no blocks in the database");
return Ok(());
};
Ok(())
}
// Attempt to update the validator set.
// This downloads the latest validator set from the beacon node, and pulls the known validator
// set from the database.
// We then take any new or updated validators and insert them into the database (overwriting
// exiting validators).
//
// In the event there are no validators in the database, it will initialize the validator set.
pub async fn update_validator_set(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
let current_validators = database::get_all_validators(&mut conn)?;
if !current_validators.is_empty() {
let old_validators = HashSet::from_iter(current_validators);
// Pull the new validator set from the beacon node.
let new_validators = get_validators(&self.bn).await?;
// The difference should only contain validators that contain either a new `exit_epoch` (implying an
// exit) or a new `index` (implying a validator activation).
let val_diff = new_validators.difference(&old_validators);
for diff in val_diff {
database::insert_validator(&mut conn, diff.clone())?;
}
} else {
info!("No validators present in database. Initializing the validator set");
self.initialize_validator_set().await?;
}
Ok(())
}
// Initialize the validator set by downloading it from the beacon node, inserting blockprint
// data (if required) and writing it to the database.
pub async fn initialize_validator_set(&mut self) -> Result<(), Error> {
let mut conn = database::get_connection(&self.pool)?;
// Pull all validators from the beacon node.
let validators = Vec::from_iter(get_validators(&self.bn).await?);
database::insert_batch_validators(&mut conn, validators)?;
Ok(())
}
}

234
watch/src/updater/mod.rs Normal file
View File

@ -0,0 +1,234 @@
use crate::config::Config as FullConfig;
use crate::database::{WatchPK, WatchValidator};
use eth2::{
types::{BlockId, StateId},
BeaconNodeHttpClient, SensitiveUrl, Timeouts,
};
use log::{debug, error, info};
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use std::time::{Duration, Instant};
use types::{BeaconBlockHeader, EthSpec, GnosisEthSpec, MainnetEthSpec, SignedBeaconBlock};
pub use config::Config;
pub use error::Error;
pub use handler::UpdateHandler;
mod config;
pub mod error;
pub mod handler;
const FAR_FUTURE_EPOCH: u64 = u64::MAX;
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5);
const MAINNET: &str = "mainnet";
const GNOSIS: &str = "gnosis";
pub struct WatchSpec<T: EthSpec> {
network: String,
spec: PhantomData<T>,
}
impl<T: EthSpec> WatchSpec<T> {
fn slots_per_epoch(&self) -> u64 {
T::slots_per_epoch()
}
}
impl WatchSpec<MainnetEthSpec> {
pub fn mainnet(network: String) -> Self {
Self {
network,
spec: PhantomData,
}
}
}
impl WatchSpec<GnosisEthSpec> {
fn gnosis(network: String) -> Self {
Self {
network,
spec: PhantomData,
}
}
}
pub async fn run_updater(config: FullConfig) -> Result<(), Error> {
let beacon_node_url =
SensitiveUrl::parse(&config.updater.beacon_node_url).map_err(Error::SensitiveUrl)?;
let bn = BeaconNodeHttpClient::new(beacon_node_url, Timeouts::set_all(DEFAULT_TIMEOUT));
let config_map = bn.get_config_spec::<HashMap<String, String>>().await?.data;
let config_name = config_map
.get("CONFIG_NAME")
.ok_or_else(|| {
Error::BeaconNodeNotCompatible("No field CONFIG_NAME on beacon node spec".to_string())
})?
.clone();
match config_map
.get("PRESET_BASE")
.ok_or_else(|| {
Error::BeaconNodeNotCompatible("No field PRESET_BASE on beacon node spec".to_string())
})?
.to_lowercase()
.as_str()
{
MAINNET => {
let spec = WatchSpec::mainnet(config_name);
run_once(bn, spec, config).await
}
GNOSIS => {
let spec = WatchSpec::gnosis(config_name);
run_once(bn, spec, config).await
}
_ => unimplemented!("unsupported PRESET_BASE"),
}
}
pub async fn run_once<T: EthSpec>(
bn: BeaconNodeHttpClient,
spec: WatchSpec<T>,
config: FullConfig,
) -> Result<(), Error> {
let mut watch = UpdateHandler::new(bn, spec, config.clone()).await?;
let sync_data = watch.get_bn_syncing_status().await?;
if sync_data.is_syncing {
error!(
"Connected beacon node is still syncing: head_slot => {:?}, distance => {}",
sync_data.head_slot, sync_data.sync_distance
);
return Err(Error::BeaconNodeSyncing);
}
info!("Performing head update");
let head_timer = Instant::now();
watch.perform_head_update().await?;
let head_timer_elapsed = head_timer.elapsed();
debug!("Head update complete, time taken: {head_timer_elapsed:?}");
info!("Performing block backfill");
let block_backfill_timer = Instant::now();
watch.backfill_canonical_slots().await?;
let block_backfill_timer_elapsed = block_backfill_timer.elapsed();
debug!("Block backfill complete, time taken: {block_backfill_timer_elapsed:?}");
info!("Updating validator set");
let validator_timer = Instant::now();
watch.update_validator_set().await?;
let validator_timer_elapsed = validator_timer.elapsed();
debug!("Validator update complete, time taken: {validator_timer_elapsed:?}");
// Update blocks after updating the validator set since the `proposer_index` must exist in the
// `validators` table.
info!("Updating unknown blocks");
let unknown_block_timer = Instant::now();
watch.update_unknown_blocks().await?;
let unknown_block_timer_elapsed = unknown_block_timer.elapsed();
debug!("Unknown block update complete, time taken: {unknown_block_timer_elapsed:?}");
// Run additional modules
if config.updater.attestations {
info!("Updating suboptimal attestations");
let attestation_timer = Instant::now();
watch.fill_suboptimal_attestations().await?;
watch.backfill_suboptimal_attestations().await?;
let attestation_timer_elapsed = attestation_timer.elapsed();
debug!("Attestation update complete, time taken: {attestation_timer_elapsed:?}");
}
if config.updater.block_rewards {
info!("Updating block rewards");
let rewards_timer = Instant::now();
watch.fill_block_rewards().await?;
watch.backfill_block_rewards().await?;
let rewards_timer_elapsed = rewards_timer.elapsed();
debug!("Block Rewards update complete, time taken: {rewards_timer_elapsed:?}");
}
if config.updater.block_packing {
info!("Updating block packing statistics");
let packing_timer = Instant::now();
watch.fill_block_packing().await?;
watch.backfill_block_packing().await?;
let packing_timer_elapsed = packing_timer.elapsed();
debug!("Block packing update complete, time taken: {packing_timer_elapsed:?}");
}
if config.blockprint.enabled {
info!("Updating blockprint");
let blockprint_timer = Instant::now();
watch.fill_blockprint().await?;
watch.backfill_blockprint().await?;
let blockprint_timer_elapsed = blockprint_timer.elapsed();
debug!("Blockprint update complete, time taken: {blockprint_timer_elapsed:?}");
}
Ok(())
}
/// Queries the beacon node for a given `BlockId` and returns the `BeaconBlockHeader` if it exists.
pub async fn get_header(
bn: &BeaconNodeHttpClient,
block_id: BlockId,
) -> Result<Option<BeaconBlockHeader>, Error> {
let resp = bn
.get_beacon_headers_block_id(block_id)
.await?
.map(|resp| (resp.data.root, resp.data.header.message));
// When quering with root == 0x000... , slot 0 will be returned with parent_root == 0x0000...
// This check escapes the loop.
if let Some((root, header)) = resp {
if root == header.parent_root {
return Ok(None);
} else {
return Ok(Some(header));
}
}
Ok(None)
}
pub async fn get_beacon_block<T: EthSpec>(
bn: &BeaconNodeHttpClient,
block_id: BlockId,
) -> Result<Option<SignedBeaconBlock<T>>, Error> {
let block = bn.get_beacon_blocks(block_id).await?.map(|resp| resp.data);
Ok(block)
}
/// Queries the beacon node for the current validator set.
pub async fn get_validators(bn: &BeaconNodeHttpClient) -> Result<HashSet<WatchValidator>, Error> {
let mut validator_map = HashSet::new();
let validators = bn
.get_beacon_states_validators(StateId::Head, None, None)
.await?
.ok_or(Error::NoValidatorsFound)?
.data;
for val in validators {
// Only store `activation_epoch` if it not the `FAR_FUTURE_EPOCH`.
let activation_epoch = if val.validator.activation_epoch.as_u64() == FAR_FUTURE_EPOCH {
None
} else {
Some(val.validator.activation_epoch.as_u64() as i32)
};
// Only store `exit_epoch` if it is not the `FAR_FUTURE_EPOCH`.
let exit_epoch = if val.validator.exit_epoch.as_u64() == FAR_FUTURE_EPOCH {
None
} else {
Some(val.validator.exit_epoch.as_u64() as i32)
};
validator_map.insert(WatchValidator {
index: val.index as i32,
public_key: WatchPK::from_pubkey(val.validator.pubkey),
status: val.status.to_string(),
activation_epoch,
exit_epoch,
});
}
Ok(validator_map)
}

1254
watch/tests/tests.rs Normal file

File diff suppressed because it is too large Load Diff