Skip to content

Commit

Permalink
feat: added collector to collect users statements metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
paulobressan committed Dec 10, 2023
1 parent b067e54 commit f9c572c
Show file tree
Hide file tree
Showing 7 changed files with 166 additions and 61 deletions.
3 changes: 0 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,13 @@ prometheus = "0.13.3"
actix-web = "4.4.0"

[[bin]]
doc = false
name = "controller"
path = "src/main.rs"

[[bin]]
doc = false
name = "crdgen"
path = "src/crdgen.rs"

[lib]
name = "controller"
path = "src/lib.rs"

24 changes: 8 additions & 16 deletions src/controller.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
use crate::{postgres::Postgres, Config, Error, Metrics, Result};
use futures::StreamExt;
use kube::{
api::{Patch, PatchParams},
Expand All @@ -17,6 +16,8 @@ use serde_json::json;
use std::{sync::Arc, time::Duration};
use tracing::error;

use crate::{postgres::Postgres, Config, Error, Metrics, State};

pub static DB_SYNC_PORT_FINALIZER: &str = "dbsyncports.demeter.run";

struct Context {
Expand All @@ -33,15 +34,6 @@ impl Context {
}
}
}
#[derive(Clone, Default)]
pub struct State {
registry: prometheus::Registry,
}
impl State {
pub fn metrics(&self) -> Vec<prometheus::proto::MetricFamily> {
self.registry.gather()
}
}

#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)]
pub enum Network {
Expand Down Expand Up @@ -72,7 +64,7 @@ impl DbSyncPort {
.unwrap_or(false)
}

async fn reconcile(&self, ctx: Arc<Context>, pg: &mut Postgres) -> Result<Action> {
async fn reconcile(&self, ctx: Arc<Context>, pg: &mut Postgres) -> Result<Action, Error> {
let client = ctx.client.clone();
let ns = self.namespace().unwrap();
let name = self.name_any();
Expand Down Expand Up @@ -107,15 +99,15 @@ impl DbSyncPort {
Ok(Action::requeue(Duration::from_secs(5 * 60)))
}

async fn cleanup(&self, ctx: Arc<Context>, pg: &mut Postgres) -> Result<Action> {
async fn cleanup(&self, ctx: Arc<Context>, pg: &mut Postgres) -> Result<Action, Error> {
let username = self.status.as_ref().unwrap().username.clone();
pg.user_disable(&username).await?;
ctx.metrics.count_user_deactivated(&username);
Ok(Action::await_change())
}
}

async fn reconcile(crd: Arc<DbSyncPort>, ctx: Arc<Context>) -> Result<Action> {
async fn reconcile(crd: Arc<DbSyncPort>, ctx: Arc<Context>) -> Result<Action, Error> {
let url = match crd.spec.network {
Network::Mainnet => &ctx.config.db_url_mainnet,
Network::Preprod => &ctx.config.db_url_preprod,
Expand Down Expand Up @@ -143,11 +135,11 @@ fn error_policy(crd: Arc<DbSyncPort>, err: &Error, ctx: Arc<Context>) -> Action
Action::requeue(Duration::from_secs(5))
}

pub async fn run(state: State, config: Config) -> Result<(), Error> {
pub async fn run(state: Arc<State>, config: Config) -> Result<(), Error> {
let client = Client::try_default().await?;
let crds = Api::<DbSyncPort>::all(client.clone());
let metrics = Metrics::default().register(&state.registry).unwrap();
let ctx = Context::new(client, metrics, config);

let ctx = Context::new(client, state.metrics.clone(), config);

Controller::new(crds, WatcherConfig::default().any_semantic())
.shutdown_on_signal()
Expand Down
1 change: 1 addition & 0 deletions src/crdgen.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use ext_cardano_dbsync::controller;
use kube::CustomResourceExt;

fn main() {
Expand Down
82 changes: 64 additions & 18 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
use std::time::Duration;

use prometheus::Registry;
use thiserror::Error;

#[derive(Error, Debug)]
Expand All @@ -10,51 +13,94 @@ pub enum Error {

#[error("Finalizer Error: {0}")]
FinalizerError(#[source] Box<kube::runtime::finalizer::Error<Error>>),

#[error("Env Error: {0}")]
EnvError(#[source] std::env::VarError),

#[error("Prometheus Error: {0}")]
PrometheusError(#[source] prometheus::Error),

#[error("Parse Int Error: {0}")]
ParseIntError(#[source] std::num::ParseIntError),
}

impl Error {
pub fn metric_label(&self) -> String {
format!("{self:?}").to_lowercase()
}
}

pub type Result<T, E = Error> = std::result::Result<T, E>;

impl From<tokio_postgres::Error> for Error {
fn from(value: tokio_postgres::Error) -> Self {
Error::PgError(value)
}
}

impl From<kube::Error> for Error {
fn from(value: kube::Error) -> Self {
Error::KubeError(value)
}
}
impl From<std::env::VarError> for Error {
fn from(value: std::env::VarError) -> Self {
Error::EnvError(value)
}
}
impl From<prometheus::Error> for Error {
fn from(value: prometheus::Error) -> Self {
Error::PrometheusError(value)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(value: std::num::ParseIntError) -> Self {
Error::ParseIntError(value)
}
}

#[derive(Clone, Default)]
pub struct State {
registry: Registry,
pub metrics: Metrics,
}
impl State {
pub fn new() -> Self {
let registry = Registry::default();
let metrics = Metrics::default().register(&registry).unwrap();
Self { registry, metrics }
}

pub fn metrics_collected(&self) -> Vec<prometheus::proto::MetricFamily> {
self.registry.gather()
}
}

#[derive(Clone)]
pub struct Config {
pub db_url_mainnet: String,
pub db_url_preprod: String,
pub db_url_preview: String,
pub metrics_delay: Duration,
pub dcu_base: u64,
}
impl Config {
pub fn new() -> Self {
Self {
db_url_mainnet: std::env::var("DB_URL_MAINNET").expect("DB_URL_MAINNET must be set"),
db_url_preprod: std::env::var("DB_URL_PREPROD").expect("DB_URL_PREPROD must be set"),
db_url_preview: std::env::var("DB_URL_PREVIEW").expect("DB_URL_PREVIEW must be set"),
}
}
}
impl Default for Config {
fn default() -> Self {
Self::new()
pub fn try_new() -> Result<Self, Error> {
let db_url_mainnet = std::env::var("DB_URL_MAINNET")?;
let db_url_preprod = std::env::var("DB_URL_PREPROD")?;
let db_url_preview = std::env::var("DB_URL_PREVIEW")?;
let metrics_delay = Duration::from_secs(std::env::var("METRICS_DELAY")?.parse::<u64>()?);
let dcu_base = std::env::var("DCU_BASE")?.parse::<u64>()?;

Ok(Self {
db_url_mainnet,
db_url_preprod,
db_url_preview,
metrics_delay,
dcu_base,
})
}
}

pub mod controller;
pub mod metrics;
pub mod postgres;
pub use crate::controller::*;

mod metrics;
pub use metrics::Metrics;
pub use controller::*;
pub use metrics::*;
24 changes: 15 additions & 9 deletions src/main.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
use std::io;

use actix_web::{
get, middleware, web::Data, App, HttpRequest, HttpResponse, HttpServer, Responder,
};
use controller::{Config, State};
use dotenv::dotenv;
use prometheus::{Encoder, TextEncoder};
use std::{io, sync::Arc};
use tracing::error;

use ext_cardano_dbsync::{controller, metrics as metrics_collector, Config, State};

#[get("/metrics")]
async fn metrics(c: Data<State>, _req: HttpRequest) -> impl Responder {
let metrics = c.metrics();
async fn metrics(c: Data<Arc<State>>, _req: HttpRequest) -> impl Responder {
let metrics = c.metrics_collected();
let encoder = TextEncoder::new();
let mut buffer = vec![];
encoder.encode(&metrics, &mut buffer).unwrap();
Expand All @@ -25,10 +26,11 @@ async fn health(_: HttpRequest) -> impl Responder {
async fn main() -> io::Result<()> {
dotenv().ok();

let state = State::default();
let config = Config::default();
let state = Arc::new(State::new());
let config = Config::try_new().unwrap();

let controller = controller::run(state.clone(), config);
let controller = controller::run(state.clone(), config.clone());
let metrics_collector = metrics_collector::run_metrics_collector(state.clone(), config.clone());

let addr = std::env::var("ADDR").unwrap_or("0.0.0.0:8080".into());

Expand All @@ -41,7 +43,11 @@ async fn main() -> io::Result<()> {
})
.bind(addr)?;

tokio::join!(controller, server.run()).1?;
let result = tokio::join!(controller, metrics_collector, server.run()).1;
if let Err(err) = result {
error!("{err}");
std::process::exit(1)
}

Ok(())
}
44 changes: 43 additions & 1 deletion src/metrics.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
use crate::{DbSyncPort, Error};
use kube::ResourceExt;
use prometheus::{opts, IntCounterVec, Registry};
use std::{sync::Arc, thread::sleep};
use tracing::error;

use crate::{
postgres::Postgres,
Config, DbSyncPort, Error, State,
};

#[derive(Clone)]
pub struct Metrics {
Expand Down Expand Up @@ -68,3 +74,39 @@ impl Metrics {
self.users_deactivated.with_label_values(&[username]).inc();
}
}

pub async fn run_metrics_collector(state: Arc<State>, config: Config) -> Result<(), Error> {
let db_urls = &vec![
config.db_url_mainnet,
config.db_url_preprod,
config.db_url_preview,
];

loop {
for url in db_urls {
let postgres_result = Postgres::new(url).await;
if let Err(err) = postgres_result {
error!("Error to connect postgres: {err}");
continue;
}
let postgres = postgres_result.unwrap();

let user_statements_result = postgres.user_metrics().await;
if let Err(err) = user_statements_result {
error!("Error get user statements: {err}");
continue;
}

let user_statements = user_statements_result.unwrap();
if user_statements.is_none() {
continue;
}

let user_statements = user_statements.unwrap();

// TODO: calculate dcu
}

sleep(config.metrics_delay)
}
}
49 changes: 35 additions & 14 deletions src/postgres.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,7 @@
use tokio_postgres::{Client, NoTls};
use tokio_postgres::{Client, NoTls, Row};

use crate::Error;

// const QUERY_GET_METRICS: &str = "
// SELECT
// usename,
// SUM(calls) AS total_queries,
// SUM(total_exec_time) AS total_exec_time
// FROM
// pg_stat_statements
// inner join
// pg_catalog.pg_user on pg_catalog.pg_user.usesysid = userid
// GROUP BY
// usename;";

pub struct Postgres {
client: Client,
}
Expand Down Expand Up @@ -97,6 +85,39 @@ impl Postgres {

Ok(result.is_some())
}

pub async fn user_metrics(&self) -> Result<Option<Vec<UserStatements>>, Error> {
let query_metrics = "SELECT
usename,
SUM(calls) AS total_queries,
SUM(total_exec_time) AS total_exec_time
FROM
pg_stat_statements
inner join
pg_catalog.pg_user on pg_catalog.pg_user.usesysid = userid
GROUP BY
usename;";

let stmt = self.client.prepare(&query_metrics).await?;
let result = self.client.query(&stmt, &[]).await?;

Ok(result
.is_empty()
.then_some(result.iter().map(|r| r.into()).collect()))
}
}

pub async fn get_metrics() {}
pub struct UserStatements {
pub usename: String,
pub total_queries: u32,
pub total_exec_time: u32,
}
impl From<&Row> for UserStatements {
fn from(row: &Row) -> Self {
Self {
usename: row.get("usename"),
total_queries: row.get("total_queries"),
total_exec_time: row.get("total_exec_time"),
}
}
}

0 comments on commit f9c572c

Please sign in to comment.