Compare commits

..

No commits in common. "master" and "feature/1117" have entirely different histories.

16 changed files with 184 additions and 1237 deletions

View File

@ -1,4 +0,0 @@
/target
Cargo.lock
*.sock
.env

View File

@ -1,47 +1,15 @@
# Template .env for API grabber # Template .env for API grabber
# PostgreSQL connection [DEPRECATED] # PostgreSQL connection [DEPRECATED]
# -------------------------------
DB_HOST = "ip.addr.postgresql.server" DB_HOST = "ip.addr.postgresql.server"
DB_USER = "db_user" DB_USER = "db_user"
DB_PASSWORD = "db_user_password" DB_PASSWORD = "db_user_password"
DB_DBNAME = "db_name"1 DB_DBNAME = "db_name"1
# Prometheus-Exporter info # Prometheus-Exporter info
# -------------------------------
EXPORTER_URL = "http(s)://ip.ip.ip.ip:port" EXPORTER_URL = "http(s)://ip.ip.ip.ip:port"
# VINTEO Jitter puller (needed to init Jitter native grab)
# -------------------------------
VINTEO_URL_BASE = "http(s)://ip.ip.ip.ip:port"
VINTEO_ENDPOINT_CONFERENCES = "/api/v1/to/something"
VINTEO_ENDPOINT_PARTICIPANTS = "/api/v1/to/something"
VINTEO_API_KEY = "6fe8b0db-62b4-4065-9c1e-441ec4228341.9acec20bd17d7178f332896f8c006452877a22b8627d089105ed39c5baef9711"
# Status Model API support
# > if exists, ignore `EXPORTER_URL` var
STATUS_SYSTEM_URL = "http://192.168.2.39:9999/api/input"
# eNODE.Monitoring configuration # eNODE.Monitoring configuration
# -------------------------------
# eNODE.Monitoring server IP
ENODE_MONITORING_IP = "ip.ip.ip.ip" ENODE_MONITORING_IP = "ip.ip.ip.ip"
# eNODE.Monitoring credentials
ENODE_MONITORING_LOGIN = "admin_user_enode_monitoring" # admin user is required ENODE_MONITORING_LOGIN = "admin_user_enode_monitoring" # admin user is required
ENODE_MONITORING_PASSWORD = "admin_password_enode_monitoring" # # admin password is required ENODE_MONITORING_PASSWORD = "admin_password_enode_monitoring" # # admin password is required
# List of target devices
ENODE_TARGET_DEVICES = "18, 19"
# to work with unlimit API-Token
ENODE_API_TOKEN = "sssswwwwaaaaffff"
# OPTIONAL SETTINGS
# -------------------------------
# IM configuration for max level of logging info
# for example DEBUG, INFO, WARN, ERROR, TRACE
IM_LOG_INFO = "INFO"
# IM configuration for setting up API connetion
# timeout (in secs). Default value - 10
IM_CONNECTION_TIMEOUT = "10"
# IM configuration for delay of requests
# delay (in secs). Default value - 5
IM_REQUEST_DELAY = "20"

View File

@ -5,11 +5,7 @@ RUN apt update && apt install -y musl-tools
RUN rustup target add x86_64-unknown-linux-musl RUN rustup target add x86_64-unknown-linux-musl
COPY . . COPY . .
RUN cargo test
ENV CARGO_HTTP_DEBUG=true
ENV CARGO_HTTP_TIMEOUT=100
RUN cargo test --verbose
RUN cargo build --release --target=x86_64-unknown-linux-musl RUN cargo build --release --target=x86_64-unknown-linux-musl
FROM alpine:latest FROM alpine:latest

56
Jenkinsfile vendored
View File

@ -1,24 +1,3 @@
def notify(
String context,
String giteaUser,
String giteaPass,
String repositoryUrl,
String repositoryName,
String commitHash,
String buildStatus
) {
def status = buildStatus == 'success' ? 'success' : 'failure'
def description = buildStatus == 'success' ? 'Build succeeded' : 'Build failed'
sh """
curl -X POST \
-u "${giteaUser}:${giteaPass}" \
-H "Content-Type: application/json" \
-d '{"context":"${context}","state": "${status}", "description": "${description}"}' \
${repositoryUrl}deployer3000/${repositoryName}/statuses/${commitHash}
"""
}
pipeline { pipeline {
agent any agent any
environment { environment {
@ -34,22 +13,7 @@ pipeline {
} }
steps { steps {
script { script {
def hasTags = sh(script: "git tag -l | wc -l", returnStdout: true).trim().toInteger() > 0 env.IMAGE_TAG = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
def lastVersion = "0.0.0"
if (hasTags) {
lastVersion = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
}
echo "Last version: ${lastVersion}"
def (major, minor, patch) = lastVersion.tokenize('.')
def newVersion = "${major}.${minor}.${patch.toInteger() + 1}"
echo "New version: ${newVersion}"
env.IMAGE_TAG = newVersion
env.NEW_VERSION = newVersion
} }
} }
} }
@ -94,30 +58,14 @@ pipeline {
echo "Attempting to merge PR ${env.CHANGE_ID} into master..." echo "Attempting to merge PR ${env.CHANGE_ID} into master..."
withCredentials([usernamePassword(credentialsId: 'gitea_creds', usernameVariable: 'GITEA_USER', passwordVariable: 'GITEA_PASS')]) { withCredentials([usernamePassword(credentialsId: 'gitea_creds', usernameVariable: 'GITEA_USER', passwordVariable: 'GITEA_PASS')]) {
def prId = env.CHANGE_ID def prId = env.CHANGE_ID
sh """ sh """
curl -X POST \ curl -X POST \
-u "${GITEA_USER}:${GITEA_PASS}" \ -u "${GITEA_USER}:${GITEA_PASS}" \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{"do":"merge"}' \ -d '{"do":"merge"}' \
http://git.entcor/api/v1/repos/deployer3000/${env.IMAGE_NAME}/pulls/${prId}/merge http://git.entcor/api/v1/repos/deployer3000/integration-module/pulls/${prId}/merge
""" """
def commitHash = sh(script: "git rev-parse HEAD~1", returnStdout: true).trim() // необходим для корректного отображения статусов
echo "PR ${prId} merged successfully into master!" echo "PR ${prId} merged successfully into master!"
sleep(time: 15, unit: 'SECONDS')
sh "git checkout master && git pull origin master"
sh """
curl -v -X POST -u "${GITEA_USER}:${GITEA_PASS}" \
-H "Content-Type: application/json" \
-d '{"tag_name": "${env.NEW_VERSION}", "name": "Release ${env.NEW_VERSION}", "target_commitish": "master"}' \
"${env.GITEA_REPOSITORY_URL}deployer3000/${env.IMAGE_NAME}/releases"
"""
echo "New release succeeded!"
def context = "test-org/${env.IMAGE_NAME}/pipeline/pr-${env.CHANGE_TARGET}"
notify(context, GITEA_USER, GITEA_PASS, env.GITEA_REPOSITORY_URL, env.IMAGE_NAME, commitHash, "success")
} }
} }
} }

164
README.md
View File

@ -1,173 +1,39 @@
# Интеграционный модуль для проекта "Буревестник ВКС" # Интеграционный модуль для проекта "Буревестник ВКС"
## Описание ## Описание
`Интеграционный модуль (ИМ)` - Rust-пакет, предоставляющий функционал интеграционного модуля в проекте "Буревестник ВКС", состоящий из бинарных крейтов для: `integr_mod` - Rust-пакет, предоставляющий функционал интеграционного модуля в проекте "Буревестник ВКС", состоящий из бинарных крейтов для:
- получение данных через API ВКС - получение данных через API ВКС
- поддержку хранения, валидации и актуализации собственных конфигураций - поддержку хранения, валидации и актуализации собственных конфигураций
- предобработку полученных данных и сохранение в БД - предобработку полученных данных и ~~отправку `Системе Мониторинга`~~ сохранение в БД
- интеграции с `еНОД.Мониторинг`
## Специфика работы
На даннный момент предусмотрено два режима работы:
1) **Нативный** - режим работы, производящий прямой опрос сервиса видео-конференц связи `Vinteo` и соотвествующий процесс `ETL`
2) **Статичный** - режим работы *"посредник"*, когда все метрические данные ВКС `Vinteo` получаются через `REST-Full API` средства `еНОД.Мониторинг`
3) **Системный** - аналогичный **статичному** режиму, но метрические данные (заведомо обогащенные нулевым статусом) отправялются не напрямую в модуль `Prometehus exporter`, а в `Статусную модель`
4) **Vinteo** - особый режим работы, предполагающий сбор определенного набора метрик напрямую с ВКС `Vinteo` механизмом многоэтапного `API-запроса`
> **Примечание**
По стандарту `ИМ` работает в **НАТИВНОМ** режиме и ожидает конфигурации в формате `.json`, однако приоритетным считается **СТАТИЧНЫЙ** режим. Подробная информация о настройке в пункте `Руководство`
## Руководство ## Руководство
В данном разделе опсиан алгоритм настройки, сборки и запуска программного модуля `ИМ` 1. Заполнить .env файл или установить переменные окружения в соотвествии с примером в `.env.example` файле
### Преднастройка
1. Выбор режима работы модуля, который скорректирует принцип настройки:
| Режим работы | .env | config-api.json | $STATUS_SYSTEM_URL | $EXPORTER_URL |
|---|---|---|---|---|
| Нативный | ❌ | ✅ | ❌ | ❌ |
| Статичный | ✅ | ❌ | ❌❌❌ | ✅ |
| Системный | ✅ | ❌ | ✅ | ❌ |
, где:
✅ -- следует настроить (предпринять)
❌ -- игнорируется системой, не стоит настраивать
❌❌❌ -- **НЕЛЬЗЯ** настраивать (предпринимать), возможны ошибки в работе
> Режим работы `Vinteo` *не описан* в таблице **намеренно**
### Настройка режима работы "Нативный"
Для настройки данного режима необходимо расположить в **активной** директории конфигурационный `config_api.json` файл:
``` json
{
"config": [
{
"id":"zvks",
"login" : "",
"pass" : "",
"api_key" : "6fe8b0db-62b4-4065-9c1e-441ec4228341.9acec20bd17d7178f332896f8c006452877a22b8627d089105ed39c5baef9711",
"period" : "",
"timeout" : "5",
"metrics" : [
{
"name": "conferences",
"url": "https://demo.vcs.vinteo.dev/api/v1/conferences",
"measure": [
{ "id":"number", "type": "text", "addr": "data.conferences[].number" },
{ "id":"total", "type": "integer", "addr": "data.total" },
{ "id":"participants_total", "type": "integer", "addr": "data.conferences[].participants.total" },
{ "id":"parts_total_in_each", "type": "integer", "addr": "data.conferences[description].participants.total" },
{ "id":"participants_online", "type": "integer", "addr": "data.conferences[].participants.online" }
]
},
{
"name": "abonents",
"url": "https://demo.vcs.vinteo.dev/api/v1/accounts",
"measure": [
{ "id":"total", "type": "integer", "addr": "data.total" }
]
}
]
}
]
}
```
> **Примечание**
Название конфигурационного файла должно быть как в примере - `config_api.json`
### Настройка режима работы "Статичный"
Для настройки данного режима необходимо пополнить данными о сервере в `.env` файле по примеру:
``` toml ``` toml
... # Template .env for API grabber
EXPORTER_URL = "http(s)://ip.ip.ip.ip:port" # <--- экспорт данных (обязательно)
# Prometheus-Exporter info
EXPORTER_URL = "http(s)://ip.ip.ip.ip:port"
# eNODE.Monitoring configuration # eNODE.Monitoring configuration
ENODE_MONITORING_IP = "ip.ip.ip.ip" ENODE_MONITORING_IP = "ip.ip.ip.ip"
# admin user is required # admin user is required
ENODE_MONITORING_LOGIN = "admin_user_enode_monitoring"# ---> получение данных ENODE_MONITORING_LOGIN = "admin_user_enode_monitoring"
# admin password is required # admin password is required
ENODE_MONITORING_PASSWORD = "admin_password_enode_monitoring" ENODE_MONITORING_PASSWORD = "admin_password_enode_monitoring"
...
``` ```
### Настройка режима работы "Системный"
Для настройки данного режима необходимо пополнить данными о сервере в `.env` файле по примеру:
``` toml
...
STATUS_SYSTEM_URL = "http(s)://{ip}:{port}/api/input"# <--- экспорт данных
# eNODE.Monitoring configuration
ENODE_MONITORING_IP = "{ip}.{ip}.{ip}.{ip}"
# admin user is required
ENODE_MONITORING_LOGIN = "admin_user_enode_monitoring"# ---> получение данных
# admin password is required
ENODE_MONITORING_PASSWORD = "admin_password_enode_monitoring"
...
```
### Настройка режима работы "Vinteo"
Для работы в данном режиме необходимо установить переменные окружения в соотвествии со списком ниже
``` toml
...
VINTEO_URL_BASE = "https://demo.vcs.vinteo.dev"
VINTEO_ENDPOINT_CONFERENCES = "/api/v1/conferences"
VINTEO_ENDPOINT_PARTICIPANTS = "/api/v1/participants/"
VINTEO_API_KEY = "00000000000111111111.aaaaaaaaaaaaaaabbbbbbbbbbbbb"
...
```
### Настройка экспорта полученных и обработанных данных
Настройка *точки выхода* для полученных и обработанных метрик определеяется установленными в переменных окружения параметрами, варианта два:
1) **Экспорт в статусную модель** в рамках механизма сквозного прохода данных в проекте `Буревестник ВКС`
``` toml
...
STATUS_SYSTEM_URL = "{BASE_URL}/{ROUTE}"
...
```
2) **Экспорт в экспортер или иной потребитель данных**
``` toml
...
EXPORTER_URL = "{BASE_URL}/{ROUTE}"
...
```
> ### **ОЧЕНЬ ВАЖНОЕ ПРИМЕЧАНИЕ**
> ---
> Одновременное использование `$STATUS_SYSTEM_URL` и `$EXPORTER_URL` **НЕДОПУСТИМО** !! Вариант со ссылкой **на статусную модель** является _по стандарту_ **БОЛЕЕ ПРИОРИТЕТНЫМ**, второй затрется, использовать необходимо только один
2. Произвести сборку проекта командой : 2. Произвести сборку проекта командой :
``` bash ``` bash
cargo build --release cargo build --release
``` ```
3. Запустить 3. Запустить
> **Debug** версия > Debug версия
``` bash ``` bash
cargo run --bin api-grub cargo run --bin api-grub
``` ```
или или
> **Release** версия > Release версия
``` bash ``` bash
cargo run --release --bin api-grub cargo run --release --bin api-grub
``` ```
@ -176,6 +42,6 @@ cargo run --release --bin api-grub
| Крейт (подмодуль) | Прогресс | | Крейт (подмодуль) | Прогресс |
|---|---| |---|---|
|`api-grub` | ✅✅✅✅✅✅✅✅✅🛠️ | |`api-grub` | ✅✅✅✅✅✅✅✅✅🛠️ |
|`config-delivery` [migrated] | ❌❌❌❌❌❌❌❌❌❌ | |`config-delivery [migrated]` | ❌❌❌❌❌❌❌❌❌❌ |
|`integrs-structs` | ✅✅✅✅✅✅✅✅✅✅ | |`integrs-structs` | ✅✅✅✅✅✅✅✅✅✅ |
|`preproc` [temp-deprecated] | ❌❌❌❌❌❌❌❌❌❌ | (разработка временно остановлена) |`preproc` [temp-deprecated] | ❌❌❌❌❌❌❌❌❌❌ | (разработка временно остановлена)

View File

@ -1,20 +1,15 @@
[package] [package]
name = "api-grub" name = "api-grub"
version = "1.0.15" version = "1.0.1"
edition = "2021" edition = "2021"
authors = ["Vladislav Drozdov <maseeeeeeeed@gmail.com>"]
description = "API poller for ZVKS project"
homepage = "http://git.enode/deployer3000/integration-module/src/branch/master/crates/api-grub"
repository = "http://git.enode/deployer3000/integration-module/src/branch/master/crates/api-grub"
license = "MIT OR Apache-2.0"
keywords = ["api", "grub", "zvks"]
publish = ["kellnr"]
[dependencies] [dependencies]
serde = { version = "1.0.217", features = ["derive"] } serde = { version = "1.0.217", features = ["derive"] }
serde_json = "1.0.135" serde_json = "1.0.135"
tokio = { version = "1.43.0", features = ["full"] } tokio = { version = "1.43.0", features = ["full"] }
integr-structs = { version = ">=0.1.0", path="../integr-structs"} integr-structs = {path = "../integr-structs"}
env_logger = "0.11.6"
log = "0.4.25"
anyhow = "1.0.95" anyhow = "1.0.95"
chrono = "0.4.39" chrono = "0.4.39"
reqwest = { version = "0.12.12", features = ["rustls-tls", "json"] } reqwest = { version = "0.12.12", features = ["rustls-tls", "json"] }
@ -25,8 +20,3 @@ md5 = "0.7.0"
rand = "0.9.0" rand = "0.9.0"
sysinfo = "0.33.1" sysinfo = "0.33.1"
openssl = { version = "0.10", features = ["vendored"] } openssl = { version = "0.10", features = ["vendored"] }
tracing-subscriber = "0.3.19"
tracing = "0.1.41"
lazy_static = "1.5.0"
futures = "0.3.31"
async-stream = "0.3.6"

View File

@ -2,7 +2,7 @@
// 1) check changes in unix-socket // 1) check changes in unix-socket
// 2) save changes in local config file // 2) save changes in local config file
use anyhow::{Error, Ok, Result}; use anyhow::{Error, Ok, Result};
use tracing::{info, warn, error}; use log::{info, warn, error};
use std::{fs, path::Path}; use std::{fs, path::Path};
use serde_json::from_str; use serde_json::from_str;
use tokio::{io::AsyncReadExt, net::UnixListener}; use tokio::{io::AsyncReadExt, net::UnixListener};
@ -15,20 +15,6 @@ const CONFIG_PATH: &str = "config_api.json";
const SOCKET_PATH: &str = "api-grub.sock"; const SOCKET_PATH: &str = "api-grub.sock";
// TODO: rewrite to use current_exe // TODO: rewrite to use current_exe
/// # Fn `pull_local_config`
///
/// ## function to one-time pulling local config by straight reading
///
/// ### Dev-Info :
///
/// *input* : -
///
/// *output* : `anyhow::Result<integr_structs::api::v3::Config>`
///
/// *initiator* : fn `main`
///
/// *managing* : -
///
pub async fn pull_local_config() -> Result<Config> { pub async fn pull_local_config() -> Result<Config> {
let path = Path::new(CONFIG_PATH); let path = Path::new(CONFIG_PATH);
if path.exists() && path.is_file() { if path.exists() && path.is_file() {
@ -42,22 +28,6 @@ pub async fn pull_local_config() -> Result<Config> {
} }
// for config pulling // for config pulling
/// # Fn `pull_local_config`
///
/// ## function to init Unix-Socket listening for grabbing new configs
///
/// ### Dev-Info :
///
/// *input* : `&tokio::sync::mpsc::Sender<integr_structs::api::v3::Config>`
///
/// *output* : `anyhow::Result<()>`
///
/// *initiator* : fn `main`
///
/// *managing* : -
///
/// *depends on* : `const SOCKET_PATH`
///
pub async fn init_config_grub_mechanism(tx: &Sender<Config>) -> Result<()> { pub async fn init_config_grub_mechanism(tx: &Sender<Config>) -> Result<()> {
info!("Initializing Unix-Socket listening for pulling new configs..."); info!("Initializing Unix-Socket listening for pulling new configs...");
let server = init_unix_listener().await?; let server = init_unix_listener().await?;
@ -87,43 +57,12 @@ pub async fn init_config_grub_mechanism(tx: &Sender<Config>) -> Result<()> {
} }
// saving new config locally // saving new config locally
/// # Fn `save_new_config`
///
/// ## function for saving new config locally
///
/// ### Dev-Info :
///
/// *input* : `&String | &str`
///
/// *output* : `anyhow::Result<()>`
///
/// *initiator* : fn `main`
///
/// *managing* : -
///
/// *depends on* : `const CONFIG_PATH`
///
async fn save_new_config(config: &String) -> Result<()> { async fn save_new_config(config: &String) -> Result<()> {
fs::write(CONFIG_PATH, config)?; fs::write(CONFIG_PATH, config)?;
Ok(()) Ok(())
} }
/// # Fn `pull_local_config`
///
/// ## function for saving new config locally
///
/// ### Dev-Info :
///
/// *input* : `&tokio::sync::mpsc::Sender<Config>`
///
/// *output* : `anyhow::Result<tokio::net::UnixListener>`
///
/// *initiator* : fn `init_config_grub_mechanism`
///
/// *managing* : -
///
/// *depends on* : `const SOCKET_PATH`
///
async fn init_unix_listener() -> Result<UnixListener> { async fn init_unix_listener() -> Result<UnixListener> {
let _ = fs::remove_file(SOCKET_PATH); let _ = fs::remove_file(SOCKET_PATH);
Ok(UnixListener::bind(SOCKET_PATH)?) Ok(UnixListener::bind(SOCKET_PATH)?)

View File

@ -4,45 +4,13 @@ use reqwest::Client;
use tokio_postgres::NoTls; use tokio_postgres::NoTls;
use std::env; use std::env;
use anyhow::Result; use anyhow::Result;
use tracing::{debug, error, info, trace}; use log::{info, error};
use std::ops::Drop;
/// An entity which handles DB connections.
///
/// This struct is used to be a layer between API-grab workers
/// and the `pool` of DB connections to save grabbing processes statuses
/// (now `PostgreSQL`, slowly migrating to `ClickHouse` support)
///
/// # Examples
///
/// ```
/// use api-grub::export::Expoter;
///
/// let exporter = Expoter::init();
///
/// assert!(exporter.get_connection_from_pool().is_some());
/// ```
///
/// # Hint:
///
/// - use `export_data` method to export metrics to DB
///
/// - use `export_metrics` method to export metrics (`PrometehusMetrics`
/// type) to the Prometehus exporter (`$EXPORTER_URL`)
///
/// - use `export_metrics_extended` method to export metrics (
/// `*PrometehusMetricsExtended*` type with `desc` field) to the
/// Prometehus exporter (`$EXPORTER_URL`)
pub struct Exporter { pub struct Exporter {
pool : Option<Pool>, pool : Option<Pool>,
} }
impl Exporter { impl Exporter {
/// Fills `deadpool_postgres::Config` object with values from ENV VARS:
/// - `DB_HOST`
/// - `DB_DBNAME`
/// - `DB_USER`
/// - `DB_PASSWORD`
fn config_construct() -> Result<Config> { fn config_construct() -> Result<Config> {
let mut cfg = Config::new(); let mut cfg = Config::new();
cfg.host = Some(env::var("DB_HOST")?); cfg.host = Some(env::var("DB_HOST")?);
@ -51,9 +19,6 @@ impl Exporter {
cfg.password = Some(env::var("DB_PASSWORD")?); cfg.password = Some(env::var("DB_PASSWORD")?);
Ok(cfg) Ok(cfg)
} }
/// Uses `deadpool_postgres::Config` object to create DB connections
/// pool to share between async tasks and to restrict a count of parallel
/// connections
fn pool_construct() -> Option<Pool> { fn pool_construct() -> Option<Pool> {
return match Self::config_construct() { return match Self::config_construct() {
Ok(config) => { Ok(config) => {
@ -69,7 +34,6 @@ impl Exporter {
}, },
} }
} }
/// Checks if DB connections pool is empty
#[allow(unused)] #[allow(unused)]
pub fn is_no_connection(&self) -> bool { self.pool.is_none() } pub fn is_no_connection(&self) -> bool { self.pool.is_none() }
pub fn init() -> Self { pub fn init() -> Self {
@ -77,9 +41,6 @@ impl Exporter {
pool : Self::pool_construct(), pool : Self::pool_construct(),
} }
} }
/// Shares a connection `deadpool_postgres::Client as PgClient`
///
/// Function awaits til the moment it can return `Option<deadpool_postgres::Client as PgClient>`
#[allow(unused)] #[allow(unused)]
pub async fn get_connection_from_pool(&self) -> Option<PgClient> { pub async fn get_connection_from_pool(&self) -> Option<PgClient> {
if let Some(pool) = &self.pool { if let Some(pool) = &self.pool {
@ -87,22 +48,15 @@ impl Exporter {
} }
None None
} }
/// Exports data in `&str` jsonb format to DB using connection from the pool
#[allow(unused)] #[allow(unused)]
#[tracing::instrument(name = "PostgreSQL export")]
pub async fn export_data(client: PgClient, metrics: &str) -> Result<()> { pub async fn export_data(client: PgClient, metrics: &str) -> Result<()> {
let query = client.prepare_cached("INSERT INTO metrics (body) VALUES ($1);").await?; let query = client.prepare_cached("INSERT INTO metrics (body) VALUES ($1);").await?;
let _ = client.query(&query, &[&metrics]).await?; let _ = client.query(&query, &[&metrics]).await?;
Ok(()) Ok(())
} }
/// Exports metrics in `PrometheusMetrics` format to Exporter defined
/// as env var $EXORPTER_URL
#[tracing::instrument(name = "Prometheus export")]
pub async fn export_metrics(metrics: PrometheusMetrics) -> Result<usize> { pub async fn export_metrics(metrics: PrometheusMetrics) -> Result<usize> {
let url = env::var("EXPORTER_URL")?; let url = env::var("EXPORTER_URL")?;
debug!("Exporting: {:?}", &metrics);
let req = Client::new() let req = Client::new()
.post(url) .post(url)
.json(&metrics) .json(&metrics)
@ -111,31 +65,14 @@ impl Exporter {
req?; req?;
Ok(metrics.get_bytes_len()) Ok(metrics.get_bytes_len())
} }
/// Exports metrics in `PrometheusMetricsExtended` format to Exporter defined
/// as env var $EXORPTER_URL
#[tracing::instrument(name = "Prometheus/Status System export")]
pub async fn export_extended_metrics(metrics: PrometheusMetricsExtended) -> Result<usize> { pub async fn export_extended_metrics(metrics: PrometheusMetricsExtended) -> Result<usize> {
// let url = env::var("EXPORTER_URL")?; let url = env::var("EXPORTER_URL")?;
let url = env::var("STATUS_SYSTEM_URL").or_else(|err| {
trace!("cannot fetch $STATUS_SYSTEM_URL var due to {}. working only with Prometheus exporter link", err);
env::var("EXPORTER_URL")
})?;
debug!("Exporting: {:?}", &metrics);
let req = Client::new() let req = Client::new()
.post(&url) .post(url)
.json(&metrics) .json(&metrics)
.send().await; .send().await;
req?; req?;
Ok(metrics.get_bytes_len()) Ok(metrics.get_bytes_len())
} }
}
impl Drop for Exporter {
// Custom destructor to log deinitializing of the `Exporter`
fn drop(&mut self) {
info!("Deinitializng Exporter and DB connection pool ...")
}
} }

View File

@ -1,319 +0,0 @@
use std::collections::{HashMap, HashSet};
use integr_structs::api::v3::{PrometheusMetricsExtended, MetricOutputExtended};
use std::sync::Arc;
use reqwest::Client;
use lazy_static::lazy_static;
use tracing::{error, info, trace, warn};
use std::pin::Pin;
use serde_json::{Value, from_str};
use crate::export::Exporter;
use async_stream::stream;
use futures::{future, pin_mut, stream::Stream, StreamExt};
lazy_static! {
static ref VINTEO_BASE: String = std::env::var("VINTEO_URL_BASE").unwrap_or_else(|_| String::from("https://demo.vcs.vinteo.dev"));
static ref CONFERENCES_ENDPOINT: String = std::env::var("VINTEO_ENDPOINT_CONFERENCES").unwrap_or_else(|_| String::from("/api/v1/conferences"));
static ref USERS_ENDPOINT: String = std::env::var("VINTEO_ENDPOINT_PARTICIPANTS").unwrap_or_else(|_| String::from("/api/v1/participants/"));
}
// conferences ids
type Conferences = HashSet<(String, String)>;
type OutputConferences = HashMap<(String, String), Value>;
#[derive(Debug, Clone)]
struct ConferencesDigital(OutputConferences);
impl ConferencesDigital {
pub fn new(item: OutputConferences) -> Self {
Self(item)
}
pub fn go_across(self) -> impl Stream<Item = ((String, String), Arc<Value>)> {
stream! {
for (key, value) in self.0 {
info!("Conference {} ({}) is processing ...", key.0, key.1);
yield (key, Arc::new(value));
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
}
}
}
}
// to handle async http requests
#[derive(Debug)]
struct Requester {
base: String,
api_key: String,
client: Client,
}
impl Requester {
pub fn new() -> anyhow::Result<Self> {
Ok(Self {
base: VINTEO_BASE.clone().to_owned(),
api_key: std::env::var("VINTEO_API_KEY")?,
client: Client::builder().user_agent("api-grub").build()?,
})
}
pub async fn get_conferences(&self) -> anyhow::Result<Value> {
trace!("getting conferences list from API");
let url = format!("{}{}", self.base, *CONFERENCES_ENDPOINT);
let req = self.client.get(url)
.timeout(tokio::time::Duration::from_secs(10))
.header("accept", "application/json")
.header("x-api-key", &self.api_key);
let resp = req.send().await;
Ok(from_str(&resp?.text().await?)?)
}
pub async fn get_conferences_from_value(conferences: Value) -> anyhow::Result<Conferences> {
trace!("extracting conferences list");
let mut hashset = Conferences::new();
match conferences.get("data") {
None => return Err(anyhow::Error::msg("Invalid JSON format: no `data` field")),
Some(data) => {
match data.get("conferences") {
None => return Err(anyhow::Error::msg("Invalid JSON format: no `conferences` field")),
Some(confs) => {
if let Some(confs) = confs.as_array() {
confs.iter()
.for_each(|conf_obj| {
if let Some(id) = conf_obj.get("number") {
if let Some(id) = id.as_str() {
let id = {
if let Some(desc) = conf_obj.get("description") {
match desc.as_str() {
Some(desc) => (desc.to_string(), id.to_string()),
None => (String::new(), id.to_string())
}
} else {
(String::new(), id.to_string())
}
};
hashset.insert(id);
}
}
});
} else {
return Err(anyhow::Error::msg("Invalid JSON format: `conferences` is not an array"))
}
},
}
},
}
Ok(hashset)
}
pub async fn get_users_jitter_by_conference(&self, conferences : Conferences) -> anyhow::Result<OutputConferences> {
trace!("getting users list for each conference from API ...");
let mut output = OutputConferences::new();
for conf in conferences {
let url = format!("{}{}{}", self.base, *USERS_ENDPOINT, &conf.1);
let req = self.client.get(url)
.timeout(tokio::time::Duration::from_secs(10))
.header("accept", "application/json")
.header("x-api-key", &self.api_key);
match req.send().await {
Err(er) => error!("Cannot GET data about conference - {:?}: {}", conf, er),
Ok(resp) => {
match resp.text().await {
Err(er) => error!("Cannot convert response into text: {}", er),
Ok(resp) => {
let resp: Value = from_str(&resp)?;
output.entry(conf).or_insert(resp);
}
}
},
}
}
Ok(output)
}
}
fn across_participants(conf: &Value) -> impl Stream<Item = Arc<Value>> + '_ {
trace!("fetching participants for conf and going across ...");
let participants_iter = conf
.get("data")
.and_then(|data| data.get("participants"))
.and_then(|participants| participants.as_array())
.into_iter()
.flatten()
.cloned();
stream! {
for participant in participants_iter {
yield Arc::new(participant);
}
}
}
async fn gather_futures(parts: Arc<Value>, conf_id: Arc<str>, conf_desc: Arc<str>, targets: Vec<(&str, &str)>) -> Vec<Option<MetricOutputExtended>> {
trace!("extracting {} measures for current participant in current conference {} ...", targets.len(), conf_id);
let mut futures: Vec<Pin<Box<dyn futures::Future<Output = Option<MetricOutputExtended>> + Send>>> = Vec::new();
for (target, description) in targets {
futures.push(Box::pin(extract_from_participant(
parts.clone(),
target,
conf_id.clone(),
conf_desc.clone(),
description
)));
}
future::join_all(futures).await
}
// GET participants/{conference_id} data.total
async fn extract_total_participants(conferences : Arc<Value>) -> Value {
if let Some(total) = conferences
.get("data")
.and_then(|data| data.get("total")) {
return total.clone()
}
Value::Null
}
async fn extract_total_anon_participants(conferences : Arc<Value>) -> Value {
let sum = conferences
.get("data")
.and_then(|data| data.get("participants"))
.and_then(|parts| parts.as_array())
.iter()
.flat_map(|&val| val.iter())
.filter_map(|part| part.get("isAnonymous"))
.filter_map(|is_anon| is_anon.as_bool())
.filter(|is_anon| *is_anon == true)
.count();
Value::Number(sum.into())
}
// GET participants/{conference_id} data.participants[].isAnonymous
async fn extract_from_participant(participant : Arc<Value>, target: &str, conf_id: Arc<str>, conf_desc: Arc<str>, description: &str) -> Option<MetricOutputExtended> {
trace!("extracting `{}` measure for current participant ...", target);
if let Some(value) = participant.get("params")
.and_then(|params| params.get(target)) {
let name = participant.get("watermark").unwrap_or_else(|| &Value::Null).as_str().unwrap_or_else(|| "unknown");
let id = participant.get("id").unwrap_or_else(|| &Value::Null).as_str().unwrap_or_else(|| "unknown");
let metric_name = format!("{}_{}_{}", target, conf_id, id);
return Some(
MetricOutputExtended::new_with_slices(
&metric_name,
&metric_name,
"type",
"Vinteo native",
format!(
"{} для пользователя {} ({}) в конференции {} ({})",
description,
name,
id,
conf_desc,
conf_id
).as_ref(),
Some(18),
Some("module$12".to_string()),
value.clone()
)
)
}
None
}
pub async fn init_grubbing_jitter() -> anyhow::Result<()> {
// create requester
let requester = Requester::new()?;
// get conference id's
loop {
info!("Getting conferences array ...");
let conferences = requester.get_conferences().await?;
let conferences = Requester::get_conferences_from_value(conferences).await?;
info!("Getting jitter metrics by user in each conference ...");
let confs = requester.get_users_jitter_by_conference(conferences).await?;
info!("Initializing extraction mechanism ...");
let conferences = ConferencesDigital::new(confs);
let extracted_conference = conferences.go_across();
let mut buffer: Vec<MetricOutputExtended> = Vec::new();
pin_mut!(extracted_conference);
while let Some(((desc, id), item)) = extracted_conference.next().await {
let parts_stream = across_participants(&item);
pin_mut!(parts_stream);
let total_participants = MetricOutputExtended::new_with_slices(
format!("TotalParticipants_{}", id).as_ref(),
format!("TotalParticipants_{}", id).as_ref(),
"type",
"Vinteo Native",
format!("Общее количество участников в конференции {}", &desc).as_ref(),
Some(18),
Some(String::from("module$12")),
extract_total_participants(item.clone()).await
);
// anon NON_STABLE
let total_anon_participants = MetricOutputExtended::new_with_slices(
format!("TotalAnonymousParticipants_{}", id).as_ref(),
format!("TotalAnonymousParticipants_{}", id).as_ref(),
"type",
"Vinteo Native",
format!("Общее количество анонимных участников в конференции {}", &desc).as_ref(),
Some(18),
Some(String::from("module$12")),
extract_total_anon_participants(item.clone()).await
);
// description
buffer.push(total_participants);
buffer.push(total_anon_participants);
while let Some(participant) = parts_stream.next().await {
buffer.append(
&mut gather_futures(
participant,
Arc::from(id.as_ref()),
Arc::from(desc.as_ref()),
vec![
("txBitrate", "Скорость отправки данных"),
("rxBitrate", "Скорость приёма данных"),
("txFPS", "Количество отправляемых кадров в секунду"),
("rxFPS", "Количество получаемых кадров в секунду"),
("rxLost", "Количество потерянных пакетов на приёме"),
("txLost", "Количество потерянных пакетов на отправке"),
("jBLen", "Фазовое отклонение на цифровом канале"),
("txLostPercent", "Процент потерянных пакетов на отправке"),
("rxLostPercent", "Процент потерянных пакетов на приёме"),
("txH239Bitrate", "Скорость передачи данных в дополнительном видео-потоке"),
("rxH239Bitrate", "Скорость приёма данных в дополнительном видео-потоке"),
("txVideoCodec", "Используемый видеокодек на отправке"),
("rxVideoCodec", "Используемый видеокодек на приёме"),
("txAudioCodec", "Используемый аудиокодек на отправке"),
("rxAudioCodec", "Используемый аудиокодек на приёме"),
("txResolution", "Разрешение передаваемого видео"),
("rxResolution", "Разрешение принимаемого видео"),
("txH239Resolution", "Разрешение дополнительного видео-потока"),
("rxH239Resolution", "Разрешение дополнительного видео-потока на принимающей стороне"),
("duration", "Длительсность сеанса (в минутах)"),
],
).await
.into_iter()
.filter(|metric| metric.is_some())
.map(|metric| metric.unwrap() )
.collect::<Vec<MetricOutputExtended>>()
);
}
}
// let metrics = Requester::get_metrics_from_jitter(jitter).await;
if !buffer.is_empty() {
let metrics = PrometheusMetricsExtended::new_zvks(buffer).await;
match Exporter::export_extended_metrics(metrics).await {
Ok(bytes) => info!("Successfully transmitted metrics ({} bytes)", bytes),
Err(er) => error!("Cannot export data: {}", er),
}
} else {
warn!("Metrics array is empty. Ignoring exporting ...");
}
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
}
// get users' jitter info
}

View File

@ -2,21 +2,6 @@
use serde_json::{json, Value}; use serde_json::{json, Value};
use integr_structs::api::v3::{Metric, MetricOutput}; use integr_structs::api::v3::{Metric, MetricOutput};
/// A JSON-parser struct
///
/// Using in metric extracting from Server Response
/// with metrics mechanism
///
/// # Example
///
/// ```
/// use api-grub::json::JsonParser;
/// use use integr_structs::api::v3::Metric;
///
/// let json = b""flat1" : { "room1" : { "rt_tempo" : "+16" }}".to_vec();
///
/// assert!(!JsonParser::parse(vec![Metric::template()], json).is_empty());
/// ```
pub struct JsonParser; pub struct JsonParser;
impl JsonParser { impl JsonParser {

View File

@ -1,38 +1,25 @@
use std::str::FromStr; use chrono::Local;
use env_logger::Builder;
use log::LevelFilter;
use std::io::Write;
use anyhow::Result; use anyhow::Result;
use tracing::info; use log::info;
/// # Fn `setup_logger`
///
/// ## function to init terminal logger
///
/// ### Dev-Info :
///
/// *input* : -
///
/// *output* : `anyhow::Result<()>`
///
/// *initiator* : fn `main`
///
/// *managing* : -
///
/// *depends on* : -
///
pub async fn setup_logger() -> Result<()> { pub async fn setup_logger() -> Result<()> {
let log_level = std::env::var("IM_LOG_INFO").unwrap_or_else(|_| String::from("INFO")); Builder::new()
.format(move |buf, record| {
tracing_subscriber::fmt() writeln!(
.with_max_level( buf,
tracing::Level::from_str(&log_level) "|{}| {} [{}] - {}",
.unwrap_or_else(|_| tracing::Level::INFO)) "api-grubber",
.with_writer(std::io::stdout) Local::now().format("%d-%m-%Y %H:%M:%S"),
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::NEW) record.level(),
// .with_timer(Local::now().format("%d-%m-%Y %H:%M:%S")) record.args(),
.with_line_number(false) )
.with_target(false) })
.with_file(false) .filter(None, LevelFilter::Info)
.compact() .target(env_logger::Target::Stdout)
.init(); .init();
info!("Logger configured"); info!("Logger configured");
Ok(()) Ok(())
@ -42,17 +29,22 @@ pub async fn setup_logger() -> Result<()> {
#[cfg(test)] #[cfg(test)]
mod logger_unittests { mod logger_unittests {
use tokio::test; use tokio::test;
use super::*;
#[test] #[test]
async fn check_logger_builder() { async fn check_logger_builder() {
tracing_subscriber::fmt() Builder::new()
.with_max_level(tracing::Level::INFO) .format(move |buf, record| {
.with_test_writer() writeln!(
.with_span_events(tracing_subscriber::fmt::format::FmtSpan::NEW) buf,
// .with_timer(Local::now().format("%d-%m-%Y %H:%M:%S")) "|{}| {} [{}] - {}",
.with_line_number(false) "api-grubber",
.with_target(false) Local::now().format("%d-%m-%Y %H:%M:%S"),
.with_file(false) record.level(),
.compact() record.args(),
)
})
.filter(None, LevelFilter::Info)
.target(env_logger::Target::Stdout)
.init(); .init();
} }
} }

View File

@ -4,19 +4,17 @@ mod logger;
mod json; mod json;
mod export; mod export;
mod monitoring; mod monitoring;
mod jitter;
use anyhow::Result; use anyhow::Result;
// use integr_structs::api::ApiConfigV2;
use integr_structs::api::v3::Config; use integr_structs::api::v3::Config;
use logger::setup_logger; use logger::setup_logger;
// use log::{info, warn};
use config::{pull_local_config, init_config_grub_mechanism}; use config::{pull_local_config, init_config_grub_mechanism};
use net::init_api_grub_mechanism; use net::init_api_grub_mechanism;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tracing::{error, info, warn}; use log::{error, info, warn};
// ENODE_MONITORING_IP
use monitoring::get_metrics_from_monitoring; use monitoring::get_metrics_from_monitoring;
// VINTEO_API_KEY
use jitter::init_grubbing_jitter;
#[tokio::main(flavor = "multi_thread")] #[tokio::main(flavor = "multi_thread")]
async fn main() -> Result<()>{ async fn main() -> Result<()>{
@ -25,6 +23,10 @@ async fn main() -> Result<()>{
let config = get_config().await; let config = get_config().await;
// config update channel // config update channel
let (tx, mut rx) = mpsc::channel::<Config>(1); let (tx, mut rx) = mpsc::channel::<Config>(1);
// futures
// todo : rewrite with spawn
// let config_fut = init_config_grub_mechanism(&tx);
// let grub_fut = init_api_grub_mechanism(config, &mut rx);
let event_config = tokio::spawn(async move { let event_config = tokio::spawn(async move {
match init_config_grub_mechanism(&tx).await { match init_config_grub_mechanism(&tx).await {
@ -36,30 +38,24 @@ async fn main() -> Result<()>{
}, },
} }
}); });
let request_delay = std::env::var("IM_REQUEST_DELAY")
.unwrap_or_else(|_| String::from("5"))
.parse::<u32>()
.unwrap_or_else(|_| {
warn!("No delay was set, setting up as 5 secs ..."); 5
});
let event_grub = tokio::spawn(async move { let event_grub = tokio::spawn(async move {
// GRAB USING eNODE.MONITORING API GATEWAY
if std::env::var("ENODE_MONITORING_IP").is_ok() { if std::env::var("ENODE_MONITORING_IP").is_ok() {
match get_metrics_from_monitoring(0, request_delay as usize).await { match get_metrics_from_monitoring(0, 5).await {
Ok(_) => info!("Grabing (eNODE.Monitoring) task de-initialized"), Ok(_) => {
Err(er) => error!("Grabing task returned an error : {}", er), info!("Grabing (eNODE.Monitoring) task deinitialized");
},
Err(er) => {
error!("Grabing task returned an error : {}", er);
},
} }
// JITTER NATIVE GRAB TASK
} else if std::env::var("VINTEO_API_KEY").is_ok() {
match init_grubbing_jitter().await {
Ok(_) => info!("Grabing (Vinteo - Jitter native) task de-initialized"),
Err(er) => error!("Jitter grabing mechanism crushed : {}", er),
}
// NATIVE GRAB TASK USING `config_api.json`
} else { } else {
match init_api_grub_mechanism(config, &mut rx).await { match init_api_grub_mechanism(config, &mut rx).await {
Ok(_) => info!("Grabing task de-initialized"), Ok(_) => {
Err(er) => error!("Grabing task returned an error : {}", er), info!("Grabing task deinitialized");
},
Err(er) => {
error!("Grabing task returned an error : {}", er);
},
} }
} }
}); });
@ -67,7 +63,8 @@ async fn main() -> Result<()>{
for event in events_handler { for event in events_handler {
let _ = event.await; let _ = event.await;
} }
// let _ = tokio::join!(config_fut, grub_fut);
Ok(()) Ok(())
} }

View File

@ -4,360 +4,108 @@ use serde_json::{Map, Value};
use reqwest::Client; use reqwest::Client;
use tokio::sync::Semaphore; use tokio::sync::Semaphore;
use std::sync::Arc; use std::sync::Arc;
use integr_structs::api::enode_monitoring::{AuthResponse, ForTokenCredentials, get_chunk_size}; use integr_structs::api::enode_monitoring::{AuthResponse, ForTokenCredentials, GenericUrl, LazyUnzip, get_chunk_size};
use integr_structs::api::enode_monitoring::cmdb::Query; use integr_structs::api::enode_monitoring::cmdb::Query;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use std::pin::Pin; use std::pin::Pin;
use std::future::Future; use std::future::Future;
use integr_structs::api::v3::{MetricOutputExtended, PrometheusMetricsExtended}; use integr_structs::api::v3::{MetricOutputExtended, PrometheusMetricsExtended};
use tracing::{error, info, warn}; use log::{error, info, warn};
use std::collections::HashMap; use std::collections::HashMap;
trait AsDeviceRequest {
fn as_devices(self) -> Vec<String>;
}
trait IntoEnodeRequset {
fn into_enode_request(self) -> String;
}
impl AsDeviceRequest for Vec<String> {
fn as_devices(mut self) -> Vec<String> {
self.iter_mut()
.for_each(|dev| *dev = format!("/measures/device${}", dev));
self
}
}
#[derive(Debug)]
struct MetricInstance {
dola_id : String,
name : String,
desc : String,
device : String,
source : String,
}
impl IntoEnodeRequset for &[MetricInstance] {
fn into_enode_request(self) -> String {
let mut vec: Vec<String> = Vec::new();
vec.push("%5B".to_owned());
self.iter()
.enumerate()
.for_each(|(id, val)| {
if id > 0 {
vec.push(",".to_owned());
}
vec.push(format!("%22{}%22", val.dola_id));
});
vec.push("%5D".to_owned());
vec.concat()
}
}
#[derive(Debug)]
struct MetricMeta {
name : String,
desc : String,
device : String,
source : String,
}
impl Default for MetricMeta {
fn default() -> Self {
Self {
name : String::new(),
desc : String::new(),
device : String::new(),
source : String::new(),
}
}
}
#[allow(private_interfaces)]
pub trait LazyUnzipInstance {
fn lazy_unzip(&self) -> HashMap<String, MetricMeta>;
}
impl LazyUnzipInstance for &[MetricInstance] {
fn lazy_unzip(&self) -> HashMap<String, MetricMeta> {
self.iter().map(
|obj|
(
obj.dola_id.to_string(),
MetricMeta::new(
&obj.name,
&obj.desc,
&obj.device,
&obj.source
)
)
).collect()
}
}
impl MetricInstance {
fn new(id : &str, name : &str, desc : &str, device : &str, source : &str) -> Self {
Self {
dola_id : id.to_owned(),
name : name.to_owned(),
desc : desc.to_owned(),
device : device.to_owned(),
source : source.to_owned(),
}
}
}
impl MetricMeta {
fn new(name : &str, desc : &str, device : &str, source : &str) -> Self {
Self {
name : name.to_owned(),
desc : desc.to_owned(),
device : device.to_owned(),
source : source.to_owned(),
}
}
}
/// # Fn `get_metrics_from_monitoring`
///
/// A function to init pulling and exporting metrics mechanism
/// from CM to Exporter. It spawns async tasks to get measures
/// and their values and then extract needed info to export
///
/// ### Dev-Info :
///
/// *input* : duration and delay as `usize` (in secs)
///
/// *output* : `anyhow::Result<()>`
///
/// *initiator* : fn `main`
///
/// *managing* : runtime of N async tasks (N - count of chunks)
///
/// # Example
///
/// ```
/// use api-grub::monitoring::get_metrics_from_monitoring;
///
/// // exec func without time restriction but with delay in 5 secs
/// assert_eq!(get_metrics_from_monitoring(0, 5).await, Ok(()));
/// ```
///
#[tracing::instrument(name = "cm_fn_initiator", skip_all)]
pub async fn get_metrics_from_monitoring(duration: usize, delay: usize) -> anyhow::Result<()> { pub async fn get_metrics_from_monitoring(duration: usize, delay: usize) -> anyhow::Result<()> {
let timer = tokio::time::Instant::now(); let timer = tokio::time::Instant::now();
let mut a = MonitoringImporter::new().await;
'outer: loop { 'outer: loop {
// let mut a = MonitoringImporter::new().await; let mut a = MonitoringImporter::new().await;
a.start_session().await?; a.start_session().await?;
tracing::debug!("CM creds struct - {:#?}", a);
let vec = Arc::new(a.get_metrics_list().await.unwrap_or_else(|_| vec![])); let vec = Arc::new(a.get_metrics_list().await.unwrap_or_else(|_| vec![]));
tracing::debug!("Measures Vec - {:#?}", vec);
'inner: loop { 'inner: loop {
if duration != 0 && timer.elapsed() >= tokio::time::Duration::from_secs(duration as u64) { if duration != 0 && timer.elapsed() >= tokio::time::Duration::from_secs(duration as u64) {
break 'outer; break 'outer;
} }
if vec.len() == 0 || a.get_measure_info(vec.clone()).await.is_err() { if let Err(_) = a.get_measure_info(vec.clone()).await {
warn!("Session dropped, creating new ..."); warn!("Session dropped, creating new ...");
break 'inner; break 'inner;
} }
tokio::time::sleep(tokio::time::Duration::from_secs(delay as u64)).await tokio::time::sleep(tokio::time::Duration::from_secs(delay as u64)).await
} }
} }
Ok(()) Ok(())
} }
/// An entity which handle CM creds #[derive(Debug, Clone)]
///
/// Used to capture measures and there values, to preprocess all measures to
/// relevant Exporter's structure
///
/// # Example:
///
/// ```
/// use api-grub::monitoring::MonitoringImporter;
///
/// let mut a = MonitoringImporter::new().await;
/// a.start_session().await?;
/// let vec = Arc::new(a.get_metrics_list().await.unwrap_or_else(|_| vec![]));
///
/// assert_eq!(a.get_measure_info(vec.clone()).await, Ok(()));
/// ```
///
#[derive(Clone)]
pub struct MonitoringImporter { pub struct MonitoringImporter {
ip : String, ip : String,
login : String, login : String,
password : String, password : String,
access_token : String, access_token : String,
api_token : String,
ts : String, ts : String,
timeout : usize,
} }
impl MonitoringImporter { impl MonitoringImporter {
/// The most simple constructor for `MonitoringImporter`
///
/// Returns `Self` object that is constructing according to
/// env vars:
/// - `ENODE_MONITORING_IP`
/// - `ENODE_MONITORING_LOGIN`
/// - `ENODE_MONITORING_PASSWORD`
///
/// If env vars will not be set, it returns `Self` with
/// empty fields
///
pub async fn new() -> Self { pub async fn new() -> Self {
MonitoringImporter { MonitoringImporter {
ip : env::var("ENODE_MONITORING_IP").unwrap_or_else(|_| String::new()), ip : env::var("ENODE_MONITORING_IP").unwrap_or_else(|_| String::new()),
login : env::var("ENODE_MONITORING_LOGIN").unwrap_or_else(|_| String::new()), login : env::var("ENODE_MONITORING_LOGIN").unwrap_or_else(|_| String::new()),
password : env::var("ENODE_MONITORING_PASSWORD").unwrap_or_else(|_| String::new()), password : env::var("ENODE_MONITORING_PASSWORD").unwrap_or_else(|_| String::new()),
access_token : String::new(), access_token : String::new(),
api_token : env::var("ENODE_API_TOKEN").unwrap_or_else(|_| String::new()),
ts : String::new(), ts : String::new(),
timeout : std::env::var("IM_CONNECTION_TIMEOUT").unwrap_or_else(|_| "10".to_string()).parse().unwrap_or_else(|_| 10)
} }
} }
/// Function that checks is current `MonitoringImporter` valid
/// and can be used to pull and push info to and from CM
///
async fn is_valid(&self) -> bool { async fn is_valid(&self) -> bool {
!self.ip.is_empty() && ((!self.login.is_empty() && !self.password.is_empty() ) || !self.api_token.is_empty()) !self.ip.is_empty() && !self.login.is_empty() && !self.password.is_empty()
} }
/// Function that checks is current `MonitoringImporter` valid
/// and can be used to pull and push info to and from CM
///
async fn is_minimal(&self) -> bool {
(self.login.is_empty() || self.password.is_empty()) && !self.api_token.is_empty()
}
/// A setter of `timestamp`
///
/// This function is needed to set a `timestamp` after
/// CM session creation.
///
/// This `timestamp` is a date of creation a session
/// on the CM Server
async fn set_ts(&mut self, ts: &str) { async fn set_ts(&mut self, ts: &str) {
self.ts = ts.to_owned(); self.ts = ts.to_owned();
} }
/// A function for creation CM session
///
/// Returns OK(()) if session was created and there were
/// no errors (neither internal no external)
///
/// *Also* it saves ts and access-key in it's runtime environment,
/// there's no way to get access-key of session
#[tracing::instrument(name = "cm_fn_session_start", skip_all)]
pub async fn start_session(&mut self) -> anyhow::Result<()> { pub async fn start_session(&mut self) -> anyhow::Result<()> {
if !self.is_valid().await { if !self.is_valid().await {
if self.is_minimal().await {
return Err(Error::msg(format!("Given API-Token is no more actual now ({})", &self.access_token)));
}
return Err(Error::msg("Invalid eNODE-Monitoring configuration")); return Err(Error::msg("Invalid eNODE-Monitoring configuration"));
} }
if !self.api_token.is_empty() { let client = Client::new();
std::mem::swap(&mut self.access_token, &mut self.api_token); let url = format!("http://{}/e-data-front/auth/login", self.ip);
info!("API-Token that was in the ENODE configuration was set as access-token"); let fortoken = ForTokenCredentials::new(&self.login, &self.password);
} else {
let client = Client::new(); let client = client
let url = format!("http://{}/e-data-front/auth/login", self.ip); .post(url)
let fortoken = ForTokenCredentials::new(&self.login, &self.password); .header("Content-Type", "application/json")
let mut delay = 1; .json(&fortoken);
let resp = client.send().await?;
let auth = resp.json::<AuthResponse>().await?;
self.set_ts(&fortoken.ts).await;
self.access_token = auth.access_token.to_owned();
loop {
let client = client
.post(&url)
.timeout(tokio::time::Duration::from_secs(self.timeout as u64))
.header("Content-Type", "application/json")
.json(&fortoken);
if let Ok(resp) = client.send().await {
match resp.json::<AuthResponse>().await {
Ok(auth) => {
self.set_ts(&fortoken.ts).await;
self.access_token = auth.access_token.to_owned();
tracing::trace!("Access key was changed");
break;
},
Err(er) => error!("Error with extracting access-key from CM response due to {}", er),
}
}
error!("Error while trying to create a new session, waiting {} secs and retrying ...", delay);
tokio::time::sleep(tokio::time::Duration::from_secs(delay)).await;
delay = delay * 2;
}
info!("Started a new CM session");
}
Ok(()) Ok(())
} }
/// A function for pulling measures list pub async fn get_metrics_list(&self) -> anyhow::Result<Vec<(String, String)>> {
///
/// Used with actual credentials for current CM session
/// and returning measures in format of `Ok(Vec<(String, String)>)`
/// , where `(String, String)` is a tuple of measure `id` and `description`
/// (`name`)
#[tracing::instrument(name = "CM get metrics list mechanism", skip_all)]
pub async fn get_metrics_list(&self) -> anyhow::Result<Vec<MetricInstance>> {
tracing::trace!("Trying ti get measures list from CM ...");
let client = Client::new(); let client = Client::new();
let mut vec: Vec<MetricInstance> = Vec::new(); let mut vec: Vec<(String, String)> = Vec::new();
let url = format!("http://{}/e-cmdb/api/query", self.ip); let url = format!("http://{}/e-cmdb/api/query", self.ip);
let id_list = {
match std::env::var("ENODE_TARGET_DEVICES") {
Err(_) => vec![String::from("18"), String::from("19")],
Ok(var) => var.split(',').into_iter().map(|st| st.trim().to_string()).collect::<Vec<String>>(),
}
};
let list_of_devices = id_list.clone().as_devices();
let client = client let client = client
.post(url) .post(url)
.timeout(tokio::time::Duration::from_secs(self.timeout as u64))
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.bearer_auth(&self.access_token) .header("access-token", &self.access_token)
.json(&Query::device_oriented(list_of_devices)); .json(&Query::default());
let resp = client.send().await?.text().await?; let resp = client.send().await?.text().await?;
let resp: Value = serde_json::from_str(&resp)?; let resp: Value = serde_json::from_str(&resp)?;
if let Some(arr) = resp.as_array() { if let Some(arr) = resp.as_array() {
for device in arr { for measure in arr {
let device_id = { let id = measure.get("id");
match device.get("name") { let cls = measure.get("cls");
Some(name) => { let name = measure.get("name");
match serde_json::to_string(name) { if id.is_some() && cls.is_some() {
Ok(name) => { // todo: later wait for Vaitaliy call of classification
name.split('$').last().unwrap_or_else(|| "undefined-device").to_owned() let id = id.unwrap().as_i64().unwrap_or_default();
}, let cls = cls.unwrap().as_str().unwrap_or_else(|| "");
Err(_) => "undefined-device".to_string(), let name = name.unwrap_or_else(|| &Value::Null).as_str().unwrap_or_else(|| "null");
} if cls.is_empty() {
}, return Err(Error::msg("Invalid JSON in response. Wrong types of fields (`id` or `cls`)"));
None => "undefined-device".to_string(),
}
};
let device_id = device_id.trim_end_matches('"');
if let Some(links) = device.get("links") {
if let Some(measures) = links.as_array() {
for measure in measures.iter() {
let dola_id = measure.get("id");
let id = measure.get("measure_id");
let source = measure.get("source_id");
let desc = measure.get("name");
if id.is_some() && source.is_some() && dola_id.is_some() {
let dola_id = format!("measure${}", dola_id.unwrap().as_i64().unwrap_or_else(|| 0));
let id = id.unwrap().as_str().unwrap_or_else(|| "no-name");
let source = source.unwrap().as_str().unwrap_or_else(|| "no-source");
let desc = desc.unwrap_or_else(|| &Value::Null).as_str().unwrap_or_else(|| "no description");
if source.is_empty() {
return Err(Error::msg("Invalid JSON in response. Wrong types of fields (`measure_id` or `source_id`)"));
}
vec.push(MetricInstance::new(&dola_id, id, desc, device_id.as_ref(), source));
}
}
} }
vec.push((format!("{}${}", cls, id), name.to_string()));
} }
} }
} else { } else {
@ -366,25 +114,7 @@ impl MonitoringImporter {
info!("List of measures was pulled, total - {}", &vec.len()); info!("List of measures was pulled, total - {}", &vec.len());
Ok(vec) Ok(vec)
} }
pub async fn get_measure_info(&self, measures: Arc<Vec<(String, String)>>) -> anyhow::Result<()> {
/// A function to get realtime data
///
/// It pulles info about 1 measure or a slice of measures and
/// exports all data to Prometehus exporter
///
/// # How it works
/// 1) creates a restriction for max count of async
/// tasks (`tokio::sync::Semaphore`)
///
/// 2) divides vec of measures in case of creating chunks with
/// the most optimal sizes to optimize self and server load
///
/// 3) spawns async tasks-grabbers to get measures info which
/// exprots all data by itselfs
///
#[tracing::instrument(name = "CM get measures info mechanism", skip_all)]
pub async fn get_measure_info(&self, measures: Arc<Vec<MetricInstance>>) -> anyhow::Result<()> {
tracing::trace!("Trying to get info about each measure ...");
let mut sys = sysinfo::System::new(); let mut sys = sysinfo::System::new();
sys.refresh_cpu_all(); sys.refresh_cpu_all();
// adaptive permition on task spawm to prevent system overload // adaptive permition on task spawm to prevent system overload
@ -401,16 +131,11 @@ impl MonitoringImporter {
let arc = arc.clone(); let arc = arc.clone();
let client = client.clone(); let client = client.clone();
let hm = measure.lazy_unzip(); let hm = measure.lazy_unzip();
let measure = Arc::new(measure.into_enode_request()); let measure = Arc::new(measure.display());
let _permit = permit.acquire().await.unwrap(); let _permit = permit.acquire().await.unwrap();
let jh: JoinHandle<anyhow::Result<PrometheusMetricsExtended>> = tokio::spawn(async move { let jh: JoinHandle<anyhow::Result<PrometheusMetricsExtended>> = tokio::spawn(async move {
Self::process_endpoint( Self::process_endpoint(measure.clone(), client.clone(), arc.clone(), &hm).await
measure.clone(),
client.clone(),
arc.clone(),
&hm,
).await
}); });
jh_vec.push(jh); jh_vec.push(jh);
@ -419,42 +144,23 @@ impl MonitoringImporter {
for event in jh_vec { for event in jh_vec {
match event.await { match event.await {
Ok(val) => { Ok(val) => {
match crate::export::Exporter::export_extended_metrics(val?).await { if let Ok(val) = val {
Ok(bytes) => {info!("Successfully transmitted {} bytes", bytes)}, match crate::export::Exporter::export_extended_metrics(val).await {
Err(er) => error!("Cannot export data due to : `{}`", er), Ok(bytes) => {info!("Successfully transmitted {} bytes to the Prometehus exporter", bytes)},
Err(er) => error!("Cannot export data to the Prometehus exporter due to : `{}`", er),
}
} }
}, },
Err(er) => { Err(er) => println!("Fatal error on async task: {}", er),
println!("Fatal error on async task: {}", er);
return Err(anyhow::Error::msg(format!("Fatal error on async task: {}", er)))
},
} }
} }
Ok(()) Ok(())
} }
async fn process_endpoint(measure: Arc<String>, client: Arc<Client>, arc: Arc<Self>, hm: &HashMap<String, String>) -> anyhow::Result<PrometheusMetricsExtended> {
/// An async task-grabber
///
/// Used to create request to the CM server and
/// get all measure(s) data
///
/// # Also
/// An argument `measure: Arc<String>` can be a single measure like `measure$1` or
/// a slice of measures in special format `%5B%22measure$1%22,%20%22measure$2%22%5D`.
/// This is a neccesary measure to handle two types of requests and URL restrictions
///
async fn process_endpoint(
measure: Arc<String>,
client: Arc<Client>,
arc: Arc<Self>,
hm: &HashMap<String, MetricMeta>,
) -> anyhow::Result<PrometheusMetricsExtended> {
tracing::trace!("Processing CM endpoint with one or more measure names");
let resp = client let resp = client
.get(format!("http://{}/e-nms/mirror/measure/{}", arc.ip, &measure)) .get(format!("http://{}/e-nms/mirror/measure/{}", arc.ip, &measure))
.timeout(tokio::time::Duration::from_secs(arc.timeout as u64))
.header("Content-Type", "application/json") .header("Content-Type", "application/json")
.bearer_auth(&arc.access_token) .header("access-token", &arc.access_token)
.send().await? .send().await?
.text().await?; .text().await?;
tokio::task::yield_now().await; tokio::task::yield_now().await;
@ -464,22 +170,7 @@ impl MonitoringImporter {
PrometheusMetricsExtended::new_zvks(Self::extract_metric_data(resp, hm).await?).await PrometheusMetricsExtended::new_zvks(Self::extract_metric_data(resp, hm).await?).await
) )
} }
fn extract_metric_data(json: Value, hm: &HashMap<String, String>) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<MetricOutputExtended>>> + Send + '_>> {
/// An recursive extractor of data
///
/// Uses target-json CM-Server response as `Value` and HashMap of
/// measures' `id`s and their appropriate `description`s
///
/// # How it works
/// 1) if `Value` is an `Object` -> executes `Self::process_value` on it and
/// returns result of the function as `Vec`
///
/// 2) if `Value` is an `Array` -> self-executes for each pat of the array
/// and aggregates all data in the `Vec` by using `.append(&mut Vec<...>)`
///
/// 3) if `Value` is `_` -> returns error **Invalid JSON format**
///
fn extract_metric_data(json: Value, hm: &HashMap<String, MetricMeta>) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<MetricOutputExtended>>> + Send + '_>> {
Box::pin(async move { Box::pin(async move {
return match json { return match json {
Value::Object(obj) => { Value::Object(obj) => {
@ -499,64 +190,50 @@ impl MonitoringImporter {
} }
}) })
} }
async fn process_value(obj : &Map<String, Value>, hm: &HashMap<String, String>) -> anyhow::Result<MetricOutputExtended> {
/// A function-extractor for single measure object
///
/// Searches for certain fields and aggregates it in the `MetricOutputExtended`
/// object
async fn process_value(obj : &Map<String, Value>, hm: &HashMap<String, MetricMeta>) -> anyhow::Result<MetricOutputExtended> {
tracing::trace!("Processing atomic Object value in CM JSON-response");
let id = obj.get("$id"); let id = obj.get("$id");
let val = obj.get("value"); let val = obj.get("value");
let description = {
let dola_ip = obj.get("$id").unwrap_or_else(|| &Value::Null);
let zero = String::new();
if dola_ip.is_null() {
zero
} else {
hm.get(
dola_ip.as_str().unwrap_or_else(|| "")
)
.unwrap_or_else(|| &zero)
.to_owned()
}
};
if id.is_none() || val.is_none() { if id.is_none() || val.is_none() {
return Err(Error::msg("Cannot get values of fields `id` and `value` from JSON Response")) return Err(Error::msg("Cannot get values of fields `id` and `value` from JSON Response"))
} }
let id = id.unwrap().as_str().unwrap_or_else(|| "").replace("$", "_");
let id = id.unwrap().as_str().unwrap_or_else(|| "");
let default_meta = MetricMeta::default();
let meta = hm.get(id).unwrap_or_else(|| &default_meta);
let id = id.replace("$", "_");
let val = val.unwrap(); let val = val.unwrap();
let device = meta.device.parse::<usize>().unwrap_or_else(|_| 0);
if id.is_empty() { if id.is_empty() {
return Err(Error::msg("Empty `id` field. Invalid JSON response")) return Err(Error::msg("Empty `id` field. Invalid JSON response"))
} }
Ok(MetricOutputExtended::new_with_slices(
id.as_ref(), Ok(MetricOutputExtended {
&meta.name, id : id.to_owned(),
{ json_type : match val {
match val {
Value::Number(val) => { Value::Number(val) => {
if val.is_i64() { if val.is_i64() {
"i64" "i64".to_owned()
} else if val.is_u64() { } else if val.is_u64() {
"u64" "u64".to_owned()
} else { } else {
"f64" "f64".to_owned()
} }
}, },
_ => "unknown", _ => "unknown".to_owned(),
}
}, },
"enode.monitoring.api", addr : "enode.monitoring.api".to_owned(),
&meta.desc, desc : description,
Some(device), value : val.clone()
Some(meta.source.clone()), })
val.clone(),
))
}
}
impl std::fmt::Debug for MonitoringImporter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MonitoringImporter")
.field("ip", &self.ip)
.field("login", &self.login)
.field("password", &"****")
.field("access_key", &"HIDDEN")
.field("ts", &self.ts)
.finish()
} }
} }

View File

@ -1,7 +1,7 @@
// module to handle unix-socket connection + pulling info from api // module to handle unix-socket connection + pulling info from api
use anyhow::Result; use anyhow::Result;
use tracing::{error, info}; use log::{error, info};
use rand::random; use rand::random;
use tokio::sync::mpsc::Receiver; use tokio::sync::mpsc::Receiver;
use tokio::time::{sleep, Duration}; use tokio::time::{sleep, Duration};
@ -97,8 +97,9 @@ impl<'a> ApiPoll<'a> {
error!("Bad JSON in response. Error: {}", er); error!("Bad JSON in response. Error: {}", er);
}, },
Ok(_) => { Ok(_) => {
let endpoint_name = &metrics.name;
let preproc = JsonParser::parse(&metrics.measure, &response); let preproc = JsonParser::parse(&metrics.measure, &response);
let preproc = PrometheusMetrics::new(&service_id, preproc); let preproc = PrometheusMetrics::new(&service_id, endpoint_name, preproc);
match Exporter::export_metrics(preproc).await { match Exporter::export_metrics(preproc).await {
Ok(bytes) => { Ok(bytes) => {
info!("Successfully exported {} bytes of metrics data to Prometheus", bytes); info!("Successfully exported {} bytes of metrics data to Prometheus", bytes);

View File

@ -2,16 +2,9 @@
name = "integr-structs" name = "integr-structs"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
description = "Structs for API poller in ZVKS project"
homepage = "http://git.enode/deployer3000/integration-module/src/branch/master/crates/integr-structs"
repository = "http://git.enode/deployer3000/integration-module/src/branch/master/crates/integr-structs"
license = "MIT OR Apache-2.0"
keywords = ["api", "grub", "zvks", "structs", "contracts"]
publish = ["kellnr"]
[dependencies] [dependencies]
anyhow = "1.0.95" anyhow = "1.0.95"
chrono = "0.4.40" chrono = "0.4.40"
dotenv = "0.15.0"
serde = { version = "1.0.217", features = ["derive"] } serde = { version = "1.0.217", features = ["derive"] }
serde_json = "1.0.135" serde_json = "1.0.135"

View File

@ -160,16 +160,6 @@ pub mod v3 {
pub json_type : String, pub json_type : String,
pub addr : String, pub addr : String,
} }
impl Metric {
pub fn template() -> Self {
Self {
id : "room_tempo".to_string(),
json_type : "String".to_string(),
addr : "flat1.room1.rt_tempo".to_string(),
}
}
}
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Metrics { pub struct Metrics {
pub name : String, pub name : String,
@ -265,29 +255,21 @@ pub mod v3 {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct MetricOutputExtended { pub struct MetricOutputExtended {
pub id : String, pub id : String,
pub name : String,
#[serde(rename = "type")] #[serde(rename = "type")]
pub json_type : String, pub json_type : String,
pub addr : String, pub addr : String,
pub value : Value, pub value : Value,
#[serde(rename = "description")] #[serde(rename = "description")]
pub desc : String, pub desc : String,
pub status: usize,
pub device: Option<usize>,
pub source: Option<String>,
} }
impl MetricOutputExtended { impl MetricOutputExtended {
pub fn new_with_slices(id : &str, name: &str, json_type : &str, addr: &str, desc : &str, device: Option<usize>, source: Option<String>, value : Value) -> Self { pub fn new_with_slices(id : &str, json_type : &str, addr: &str, desc : &str, value : Value) -> Self {
MetricOutputExtended { MetricOutputExtended {
id : id.to_string(), id : id.to_string(),
name : name.to_string(),
json_type : json_type.to_string(), json_type : json_type.to_string(),
addr : addr.to_string(), addr : addr.to_string(),
value : value, value : value,
desc : desc.to_string(), desc : desc.to_string(),
device,
source,
status: 0,
} }
} }
} }
@ -295,21 +277,21 @@ pub mod v3 {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct PrometheusMetrics { pub struct PrometheusMetrics {
pub service_name: String, pub service_name: String,
// pub endpoint_name: String, pub endpoint_name: String,
pub metrics: Vec<MetricOutput>, pub metrics: Vec<MetricOutput>,
} }
impl PrometheusMetrics { impl PrometheusMetrics {
pub fn new(service: &str, metrics: Vec<MetricOutput>) -> Self { pub fn new(service: &str, endpoint: &str, metrics: Vec<MetricOutput>) -> Self {
Self { Self {
service_name: service.to_string(), service_name: service.to_string(),
// endpoint_name: endpoint.to_string(), endpoint_name: endpoint.to_string(),
metrics: metrics metrics: metrics
} }
} }
pub async fn new_zvks(metrics: Vec<MetricOutput>) -> Self { pub async fn new_zvks(metrics: Vec<MetricOutput>) -> Self {
Self { Self {
service_name : "zvks".to_owned(), service_name : "zvks".to_owned(),
// endpoint_name : "apiforsnmp".to_owned(), endpoint_name : "apiforsnmp".to_owned(),
metrics : metrics, metrics : metrics,
} }
} }
@ -323,21 +305,14 @@ pub mod v3 {
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct PrometheusMetricsExtended { pub struct PrometheusMetricsExtended {
pub service_name: String, pub service_name: String,
pub endpoint_name: String,
pub metrics: Vec<MetricOutputExtended>, pub metrics: Vec<MetricOutputExtended>,
} }
impl PrometheusMetricsExtended { impl PrometheusMetricsExtended {
pub fn new_empty_jitter() -> Self {
Self {
service_name : "zvks".to_owned(),
metrics : Vec::new(),
}
}
pub fn add(&mut self, metric: MetricOutputExtended) {
self.metrics.push(metric);
}
pub async fn new_zvks(metrics: Vec<MetricOutputExtended>) -> Self { pub async fn new_zvks(metrics: Vec<MetricOutputExtended>) -> Self {
Self { Self {
service_name : "zvks".to_owned(), service_name : "zvks".to_owned(),
endpoint_name : "apiforsnmp".to_owned(),
metrics : metrics, metrics : metrics,
} }
} }
@ -380,18 +355,18 @@ pub mod enode_monitoring {
pub struct Query { pub struct Query {
id : Vec<String>, id : Vec<String>,
data : Data, data : Data,
// #[serde(rename = "postQuery")] #[serde(rename = "postQuery")]
// post_query : String, post_query : String,
#[serde(rename = "enableActions")] #[serde(rename = "enableActions")]
enable_actions : bool, enable_actions : bool,
ts : usize ts : usize
} }
impl Query { impl Default for Query {
pub fn device_oriented(devices: Vec<String>) -> Self { fn default() -> Self {
Self { Self {
id : devices, id : vec!["/measures/device$18".to_owned()],
data : Data::default(), data : Data::default(),
// post_query : "links".to_owned(), post_query : "links".to_owned(),
enable_actions : false, enable_actions : false,
ts : 1740060679399 ts : 1740060679399
} }
@ -419,7 +394,7 @@ pub mod enode_monitoring {
fn default() -> Self { fn default() -> Self {
Self { Self {
flatten : true, flatten : true,
filter : Filter::default(), filter : Filter::default()
} }
} }
} }
@ -436,6 +411,7 @@ pub mod enode_monitoring {
} }
} }
// "{\"access_token\":\"5BNQsmiGFQRNA651HeQxZekYgYUAWZ4e\",\"role\":\"administrator\",\"startRT\":\"2025-02-25T09:03:27.581Z\",\"login\":\"admin\",\"push_active\":null,\"$id\":\"systemuser$1\"}",
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct AuthResponse { pub struct AuthResponse {
pub access_token : String, pub access_token : String,
@ -489,9 +465,14 @@ pub mod enode_monitoring {
pub fn get_chunk_size(total_measures: usize) -> usize { pub fn get_chunk_size(total_measures: usize) -> usize {
match total_measures { match total_measures {
0..=432 => total_measures, 0..=144 => total_measures,
433..=1008 => total_measures / 2, 145..=288 => total_measures / 4,
_ => total_measures / 4, 289..=432 => total_measures / 5,
433..=576 => total_measures / 6,
577..=720 => total_measures / 7,
721..=864 => total_measures / 8,
865..=1008 => total_measures / 9,
_ => total_measures / 10,
} }
} }